In [1]:
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist=input_data.read_data_sets('MNIST_data',one_hot=True)
In [2]:
x =tf.placeholder(tf.float32,shape=[None,784]) #28*28
W0 =tf.Variable(tf.zeros([784,200]),tf.float32)
b0 =tf.Variable(tf.zeros([200]),tf.float32)
W1 =tf.Variable(tf.zeros([200,10]),tf.float32)
b1 =tf.Variable(tf.zeros([10]),tf.float32)
y_ =tf.placeholder(tf.float32,shape=[None,10])
In [3]:
y0=tf.nn.sigmoid( tf.matmul(x,W0)+b0 )
y1=tf.nn.softmax( tf.matmul(y0,W1)+b1 )
# cross_entropy=tf.reduce_mean(-tf.reduce_sum(y_*tf.log(y),reduction_indices=[1]))
# train_step=tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
cross_entropy=tf.reduce_mean(tf.reduce_sum(tf.square(y_ - y1),reduction_indices=[1]))
train_step=tf.train.AdadeltaOptimizer(0.9).minimize(cross_entropy)
In [4]:
with tf.Session() as sess:
sess.run( tf.global_variables_initializer() )
for i in range(10000):
batch=mnist.train.next_batch(100)
#train_step.run(,feed_dict={x:batch[0], y_:batch[1]})
train, cost=sess.run([train_step,cross_entropy],
feed_dict={x:batch[0], y_:batch[1]})
if i%1000==0:
print(train,cost)
correct_prediction=tf.equal(tf.argmax(y1,1),tf.arg_max(y_,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
acc=accuracy.eval(feed_dict={x:mnist.test.images, y_:mnist.test.labels})*100
print ("Accuracy of the ANN is {}".format(acc) )
In [ ]: