NAN 해결방법ㅠNAN 해결방법ㅠ
본문
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np
x_data = [[25,10,3],[29,6,4],[0,1,1],[28,2,0],[12,14,1],[5,13,3],[28,1,4],[20,0,3],[5,2,0],[3,0,1],[2,6,3],[20,2,2],[7,15,4],[27,14,2],[18,8,0],[1,12,3],[21,5,4],[19,12,2],[2,5,3],[17,0,4],[5,5,0],[15,3,3],[25,7,4],[26,3,3],[14,12,1],[0,11,0],[9,13,2],[6,6,3],[17,15,2],[19,13,0]]
y_data = [[0,1,0],[0,0,1],[0,0,1],[0,0,1],[0,1,0],[0,1,0],[0,0,1],[0,0,1],[0,0,1],[0,0,1],[0,0,1],[0,0,1],[0,1,0],[1,0,0],[0,0,1],[0,0,1],[0,0,1],[1,0,0],[0,0,1],[0,0,1],[0,0,1],[0,0,1],[0,0,1],[0,0,1],[0,1,0],[0,0,1],[0,1,0],[0,0,1],[1,0,0],[0,1,0]]
X=tf.placeholder(tf.float32,[None,3])
Y=tf.placeholder(tf.float32,[None,3])
nb_classes = 3
W=tf.Variable(tf.random_normal([3, nb_classes]), name = 'weight')
b=tf.Variable(tf.random_normal([nb_classes]), name = 'bias')
hypothesis = tf.nn.softmax(tf.matmul(X,W) + b)
cost = tf.reduce_mean( - tf.reduce_sum(Y * tf.log(hypothesis) + (1-Y) * tf.log(1-hypothesis)))
optimizer =tf.train.GradientDescentOptimizer(learning_rate=0.001)
train=optimizer.minimize(cost)
#-----------------------------------------------------------------------#
xdata_new= [[1,11,7],[1,3,4],[1,1,0],[1,1,0]]
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for step in range(2001):
_, cost_val=sess.run([train,cost], feed_dict={X:x_data, Y:y_data})
if step %100==0:
print(step, cost_val)
sess.run(hypothesis, feed_dict={X: x_data})
a = sess.run(hypothesis, feed_dict = { X:xdata_new})
print(a, sess.run(tf.arg_max(a,1)))
위와같이 모델을 만들었는데 제가 데이터를 많이 넣었더니 nan이 뜨네요ㅠ learnig rate만 조절해 봤는데 해결이 안되서 부탁드립니다..cost 값이 0.xxxx값이 나오도록 할수 있을까요..?
답변 1
0 nan 100 nan 200 nan 300 nan 400 nan 500 nan 600 nan 700 nan 800 nan 900 nan 1000 nan 1100 nan 1200 nan 1300 nan 1400 nan 1500 nan 1600 nan 1700 nan 1800 nan 1900 nan 2000 nan [[nan nan nan] [nan nan nan] [nan nan nan] [nan nan nan]] [0 0 0 0]
결과는 이렇게 나오고있습니다!