TensorFlow MNIST


In [20]:
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets('./datasets/ud730/mnist', one_hot=True, reshape=False)


Extracting ./datasets/ud730/mnist/train-images-idx3-ubyte.gz
Extracting ./datasets/ud730/mnist/train-labels-idx1-ubyte.gz
Extracting ./datasets/ud730/mnist/t10k-images-idx3-ubyte.gz
Extracting ./datasets/ud730/mnist/t10k-labels-idx1-ubyte.gz

Learning Parameters


In [21]:
import tensorflow as tf

# Parameters
learning_rate = 0.001
training_epochs = 20
batch_size = 128  # Decrease batch size if you don't have enough memory
display_step = 1

n_input = 784  # MNIST data input (img shape: 28*28)
n_classes = 10  # MNIST total classes (0-9 digits)

Hidden Layer Parameters


In [22]:
n_hidden_layer = 256 # layer number of features

Weights and Biases


In [23]:
weights = {
    'hidden_layer': tf.Variable(tf.random_normal([n_input, n_hidden_layer])),
    'out': tf.Variable(tf.random_normal([n_hidden_layer, n_classes]))
}
biases = {
    'hidden_layer': tf.Variable(tf.random_normal([n_hidden_layer])),
    'out': tf.Variable(tf.random_normal([n_classes]))
}

Input


In [24]:
x = tf.placeholder("float", [None, 28, 28, 1])
y = tf.placeholder("float", [None, n_classes])

x_flat = tf.reshape(x, [-1, n_input])

Multilayer Perceptron


In [25]:
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x_flat, weights['hidden_layer']),biases['hidden_layer'])
layer_1 = tf.nn.relu(layer_1)
# Output layer with linear activation
logits = tf.add(tf.matmul(layer_1, weights['out']), biases['out'])

Optimizer


In [26]:
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)

Session


In [27]:
# Initializing the variables
init = tf.global_variables_initializer()


with tf.Session() as sess:
    sess.run(init)
    # Training cycle
    for epoch in range(training_epochs):
        total_batch = int(mnist.train.num_examples/batch_size)
        # Loop over all batches
        for i in range(total_batch):
            batch_x, batch_y = mnist.train.next_batch(batch_size)
            # Run optimization op (backprop) and cost op (to get loss value)
            sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
        # Display logs per epoch step
        if epoch % display_step == 0:
            c = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
            print("Epoch:", '%04d' % (epoch+1), "cost=", \
                "{:.9f}".format(c))
    print("Optimization Finished!")

    # Test model
    correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
    # Calculate accuracy
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
    # Decrease test_size if you don't have enough memory
    test_size = 256
    print("Accuracy:", accuracy.eval({x: mnist.test.images[:test_size], y: mnist.test.labels[:test_size]}))


Epoch: 0001 cost= 37.288757324
Epoch: 0002 cost= 28.099151611
Epoch: 0003 cost= 21.492095947
Epoch: 0004 cost= 15.777803421
Epoch: 0005 cost= 13.104373932
Epoch: 0006 cost= 16.443382263
Epoch: 0007 cost= 12.179491997
Epoch: 0008 cost= 11.330167770
Epoch: 0009 cost= 8.374776840
Epoch: 0010 cost= 10.616512299
Epoch: 0011 cost= 8.541707993
Epoch: 0012 cost= 5.481113434
Epoch: 0013 cost= 5.806009293
Epoch: 0014 cost= 8.566146851
Epoch: 0015 cost= 10.841135025
Epoch: 0016 cost= 8.520166397
Epoch: 0017 cost= 6.733136177
Epoch: 0018 cost= 4.299148560
Epoch: 0019 cost= 5.420190334
Epoch: 0020 cost= 8.197362900
Optimization Finished!
Accuracy: 0.839844

In [ ]: