In [1]:
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist=input_data.read_data_sets('MNIST_data',one_hot=True)


Extracting MNIST_data\train-images-idx3-ubyte.gz
Extracting MNIST_data\train-labels-idx1-ubyte.gz
Extracting MNIST_data\t10k-images-idx3-ubyte.gz
Extracting MNIST_data\t10k-labels-idx1-ubyte.gz

In [2]:
x  =tf.placeholder(tf.float32,shape=[None,784]) #28*28
W0  =tf.Variable(tf.zeros([784,200]),tf.float32)
b0  =tf.Variable(tf.zeros([200]),tf.float32)
W1  =tf.Variable(tf.zeros([200,10]),tf.float32)
b1  =tf.Variable(tf.zeros([10]),tf.float32)
y_ =tf.placeholder(tf.float32,shape=[None,10])

In [3]:
y0=tf.nn.sigmoid( tf.matmul(x,W0)+b0 )
y1=tf.nn.softmax( tf.matmul(y0,W1)+b1 )
# cross_entropy=tf.reduce_mean(-tf.reduce_sum(y_*tf.log(y),reduction_indices=[1]))
# train_step=tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
cross_entropy=tf.reduce_mean(tf.reduce_sum(tf.square(y_ - y1),reduction_indices=[1]))
train_step=tf.train.AdadeltaOptimizer(0.9).minimize(cross_entropy)

In [4]:
with tf.Session() as sess:
    sess.run( tf.global_variables_initializer() )
    for i in range(10000):
        batch=mnist.train.next_batch(100)
        #train_step.run(,feed_dict={x:batch[0], y_:batch[1]})
        train, cost=sess.run([train_step,cross_entropy], 
                             feed_dict={x:batch[0], y_:batch[1]})
        if i%1000==0:
            print(train,cost)
    
    correct_prediction=tf.equal(tf.argmax(y1,1),tf.arg_max(y_,1))
    accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
    acc=accuracy.eval(feed_dict={x:mnist.test.images, y_:mnist.test.labels})*100
    print ("Accuracy of the ANN is {}".format(acc) )


None 0.9
None 0.899349
None 0.815511
None 0.806085
None 0.77586
None 0.790273
None 0.822304
None 0.77466
None 0.691044
None 0.769931
Accuracy of the ANN is 31.88999891281128

In [ ]: