In [1]:
    
import tensorflow as tf
    
In [2]:
    
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
    
    
Neural network: W.x + b
In [3]:
    
X = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
    
In [4]:
    
# model
Y = tf.nn.softmax(tf.matmul(X, W) + b)
    
In [5]:
    
# placeholder for correct answers ("one-hot" encoded)
Y_ = tf.placeholder(tf.float32, [None, 10])
    
In [6]:
    
# loss function
cross_entropy = tf.reduce_mean(-tf.reduce_sum(Y_ * tf.log(Y), reduction_indices=[1]))
    
Mathematical operators documentation https://www.tensorflow.org/versions/r0.11/api_docs/python/math_ops.html
In [7]:
    
# % of correct answers found in batch
is_correct = tf.equal(tf.argmax(Y,1), tf.argmax(Y_,1))
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
    
In [8]:
    
# traning
optimizer = tf.train.GradientDescentOptimizer(0.003) # learning rate
train_step = optimizer.minimize(cross_entropy)  # loss function
    
In [9]:
    
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
    
In [10]:
    
for i in range(1000):
    # load batch of images and correct answers
    batch_X, batch_Y = mnist.train.next_batch(100)
    train_data = {X: batch_X, Y_: batch_Y}
    
    # train
    sess.run(train_step, feed_dict=train_data)
        
    # success on test data ?
    test_data = {X: mnist.test.images, Y_: mnist.test.labels}
    a, c = sess.run([accuracy, cross_entropy], feed_dict=test_data)
    
In [ ]: