In [11]:
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("./mnist/", one_hot=True)

import tensorflow as tf

# Parameters
learning_rate = 0.001
training_epochs = 30
batch_size = 100
display_step = 1

# Network Parameters
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)

# tf Graph input
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])


Extracting ./train-images-idx3-ubyte.gz
Extracting ./train-labels-idx1-ubyte.gz
Extracting ./t10k-images-idx3-ubyte.gz
Extracting ./t10k-labels-idx1-ubyte.gz

In [12]:
#pre-define the  
def conv2d(x, W):
  return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')

def max_pool_2x2(x):
  return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
                        strides=[1, 2, 2, 1], padding='SAME')

In [13]:
def multilayer_perceptron(x, weights, biases):
    #now, we want to change this to a CNN network

    #first reshape the data to 4-D

    x_image = tf.reshape(x, [-1,28,28,1])

    #then apply cnn layers

    h_conv1 = tf.nn.relu(conv2d(x_image, weights['conv1']) + biases['conv_b1'])
    h_pool1 = max_pool_2x2(h_conv1)

    h_conv2 = tf.nn.relu(conv2d(h_pool1, weights['conv2']) + biases['conv_b2'])
    h_pool2 = max_pool_2x2(h_conv2)

    h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, weights['fc1']) + biases['fc1_b'])


    # Output layer with linear activation
    out_layer = tf.matmul(h_fc1, weights['out']) + biases['out_b']
    return out_layer

In [14]:
# Store layers weight & biases
weights = {
    'conv1': tf.Variable(tf.random_normal([5, 5, 1, 32])),
    'conv2': tf.Variable(tf.random_normal([5, 5, 32, 64])),
    'fc1' : tf.Variable(tf.random_normal([7*7*64,256])),
    'out': tf.Variable(tf.random_normal([256,n_classes]))
}
biases = {
    'conv_b1': tf.Variable(tf.random_normal([32])),
    'conv_b2': tf.Variable(tf.random_normal([64])),
    'fc1_b': tf.Variable(tf.random_normal([256])),
    'out_b': tf.Variable(tf.random_normal([n_classes]))
}

# Construct model
pred = multilayer_perceptron(x, weights, biases)

# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

# Initializing the variables
init = tf.global_variables_initializer()

In [15]:
# Launch the graph
with tf.Session() as sess:
    sess.run(init)

    # Training cycle
    for epoch in range(training_epochs):
        avg_cost = 0.
        total_batch = int(mnist.train.num_examples/batch_size)
        # Loop over all batches
        for i in range(total_batch):
            batch_x, batch_y = mnist.train.next_batch(batch_size)
            # Run optimization op (backprop) and cost op (to get loss value)
            _, c = sess.run([optimizer, cost], feed_dict={x: batch_x,
                                                          y: batch_y})
            # Compute average loss
            avg_cost += c / total_batch
        # Display logs per epoch step
        if epoch % display_step == 0:
            print("Epoch:", '%04d' % (epoch+1), "cost=", \
                "{:.9f}".format(avg_cost))
    print("Optimization Finished!")

    # Test model
    correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
    # Calculate accuracy
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
    print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))


('Epoch:', '0001', 'cost=', '2005.953651756')
('Epoch:', '0002', 'cost=', '361.200756125')
('Epoch:', '0003', 'cost=', '222.655593089')
('Epoch:', '0004', 'cost=', '154.397716973')
('Epoch:', '0005', 'cost=', '108.289408546')
('Epoch:', '0006', 'cost=', '83.728486200')
('Epoch:', '0007', 'cost=', '63.813128544')
('Epoch:', '0008', 'cost=', '52.091127872')
('Epoch:', '0009', 'cost=', '38.352929364')
('Epoch:', '0010', 'cost=', '30.455494692')
('Epoch:', '0011', 'cost=', '25.972187011')
('Epoch:', '0012', 'cost=', '20.754565103')
('Epoch:', '0013', 'cost=', '18.515140012')
('Epoch:', '0014', 'cost=', '14.170893429')
('Epoch:', '0015', 'cost=', '13.025495452')
('Epoch:', '0016', 'cost=', '11.380087092')
('Epoch:', '0017', 'cost=', '12.045677507')
('Epoch:', '0018', 'cost=', '9.095552578')
('Epoch:', '0019', 'cost=', '8.405252479')
('Epoch:', '0020', 'cost=', '7.802369204')
('Epoch:', '0021', 'cost=', '8.664561321')
('Epoch:', '0022', 'cost=', '6.413273589')
('Epoch:', '0023', 'cost=', '7.001173552')
('Epoch:', '0024', 'cost=', '3.928643572')
('Epoch:', '0025', 'cost=', '6.000280571')
('Epoch:', '0026', 'cost=', '3.947065584')
('Epoch:', '0027', 'cost=', '5.913655243')
('Epoch:', '0028', 'cost=', '4.686071558')
('Epoch:', '0029', 'cost=', '3.783876064')
('Epoch:', '0030', 'cost=', '3.133972832')
Optimization Finished!
('Accuracy:', 0.98420006)

In [ ]: