In [1]:
'''
Functions for downloading and reading MNIST data.
'''

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import gzip
import os
import tempfile

import numpy
from six.moves import urllib
from six.moves import xrange  # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets

mnist = read_data_sets("MNIST_data/", one_hot=True)


Extracting MNIST_data/train-images-idx3-ubyte.gz
Extracting MNIST_data/train-labels-idx1-ubyte.gz
Extracting MNIST_data/t10k-images-idx3-ubyte.gz
Extracting MNIST_data/t10k-labels-idx1-ubyte.gz

In [2]:
# input image
x = tf.placeholder('float', [None, 784])

# Params
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))

# output and true value
y = tf.nn.softmax(tf.matmul(x, W) + b) 
y_ = tf.placeholder('float', [None, 10])

# loss function
cross_entropy = -tf.reduce_sum(y_*tf.log(y))

train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
init_op = tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(init_op)
    for i in range(1, 1001):
        batch_xs, batch_ys = mnist.train.next_batch(100)
        sess.run(train_step, feed_dict = {x:batch_xs, y_:batch_ys})
        if i % 100 == 0:
            correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
            result = sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})
            print('step:', i, 'accuracy : ', result)


step: 100 accuracy :  0.8818
step: 200 accuracy :  0.8947
step: 300 accuracy :  0.9056
step: 400 accuracy :  0.9119
step: 500 accuracy :  0.9121
step: 600 accuracy :  0.9128
step: 700 accuracy :  0.9134
step: 800 accuracy :  0.9193
step: 900 accuracy :  0.9138
step: 1000 accuracy :  0.9158