In [1]:
import tensorflow as tf
import numpy as np
In [2]:
xy = np.loadtxt('../data/softmax_data.txt',unpack=True, dtype='float32')
x_data = np.transpose(xy[0:3])
y_data = np.transpose(xy[3:])
In [3]:
np.transpose(xy)
Out[3]:
In [4]:
X = tf.placeholder("float",[None, 3])
Y = tf.placeholder("float",[None, 3])
W = tf.Variable(tf.zeros([3,3]))
h = tf.nn.softmax(tf.matmul(X,W))
learning_rate = 0.001
cost = tf.reduce_mean(-tf.reduce_sum(Y*tf.log(h),reduction_indices=1))
opt = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for step in range(2000):
sess.run(opt, feed_dict={X:x_data, Y:y_data})
if step % 200 == 0:
print(step, sess.run(cost,feed_dict={X:x_data,Y:y_data}),sess.run(W))
In [5]:
a = sess.run(h, feed_dict={X:[[1,11,7]]})
print(a,sess.run(tf.arg_max(a,1)))
b = sess.run(h, feed_dict={X:[[1,3,4]]})
print(b,sess.run(tf.arg_max(b,1)))
c = sess.run(h, feed_dict={X:[[1,1,0]]})
print(c,sess.run(tf.arg_max(c,1)))
all = sess.run(h, feed_dict={X:[[1,1,0],[1,3,4],[1,1,0]]})
print(all,sess.run(tf.arg_max(all,1)))