In [1]:
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)


Extracting /tmp/data/train-images-idx3-ubyte.gz
Extracting /tmp/data/train-labels-idx1-ubyte.gz
Extracting /tmp/data/t10k-images-idx3-ubyte.gz
Extracting /tmp/data/t10k-labels-idx1-ubyte.gz

In [2]:
learning_rate = 0.001
epochs = 200000
batch_size = 128
display_step = 10

n_input = 784
n_classes = 10

dropout = 0.75

X = tf.placeholder(tf.float32, [None, n_input])
Y = tf.placeholder(tf.float32, [None, n_classes])
keep_prob = tf.placeholder(tf.float32)

In [3]:
def conv2d(X, W, b, strides=1):
    X = tf.nn.conv2d(X,W,strides=[1,strides, strides,1],padding='SAME')
    X = tf.nn.bias_add(X,b)
    return tf.nn.relu(X)

def maxpool2d(X, k =2):
    # ksize is window!, strides is just strides!
    return tf.nn.max_pool(X,ksize = [1,k,k,1],strides=[1,k,k,1], padding='SAME')

def conv_net(X, weight, biases, dropout):
    X = tf.reshape(X, shape=[-1, 28, 28, 1])
    
    conv1 = conv2d(X, weights['wc1'], biases['bc1'])
    conv1 = maxpool2d(conv1,k=2)
    
    conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
    conv2 = maxpool2d(conv2, k=2)
    
    fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])
    fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
    fc1 = tf.nn.relu(fc1)
    
    fc1 = tf.nn.dropout(fc1, dropout)
    
    out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
    return out

In [4]:
weights = {
    # 5x5 conv, 1 input, 32 outputs
    # I think '1 input' means the channel's dimension is one
    'wc1': tf.Variable(tf.random_normal([5,5,1,32])),
    'wc2': tf.Variable(tf.random_normal([5,5,32,64])),
    'wd1': tf.Variable(tf.random_normal([7*7*64, 1024])),
    'out': tf.Variable(tf.random_normal([1024, n_classes]))
}

biases = {
    'bc1': tf.Variable(tf.random_normal([32])),
    'bc2': tf.Variable(tf.random_normal([64])),
    'bd1': tf.Variable(tf.random_normal([1024])),
    'out': tf.Variable(tf.random_normal([n_classes]))
}

In [ ]:
pred = conv_net(X,weights, biases, keep_prob)

cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred,Y))
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)

correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(Y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

init = tf.initialize_all_variables()

In [ ]:
with tf.Session() as sess:
    sess.run(init)
    step = 1
    
    while step* batch_size < epochs:
        batch_X, batch_Y = mnist.train.next_batch(batch_size)
        
        sess.run(optimizer, feed_dict={X:batch_X, Y:batch_Y, keep_prob:dropout})
        
        if step% display_step == 0:
            loss, acc = sess.run([cost,accuracy], feed_dict={X:batch_X, Y:batch_Y,
                                                               keep_prob: 1.})
            
            print( "Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
                  "{:.6f}".format(loss) + ", Training Accuracy= " + \
                  "{:.5f}".format(acc))
            
        step +=1
        
    print("Optimization Finished!")
    print("Testing Accuracy"+ str(sess.run(accuracy, feed_dict={X: mnist.test.images[:256],
                                                                    Y:mnist.test.labels[:256],
                                                                      keep_prob:1.})))


Iter 1280, Minibatch Loss= 24655.193359, Training Accuracy= 0.20312
Iter 2560, Minibatch Loss= 13318.351562, Training Accuracy= 0.50000
Iter 3840, Minibatch Loss= 6683.816406, Training Accuracy= 0.64062
Iter 5120, Minibatch Loss= 4439.487793, Training Accuracy= 0.75000
Iter 6400, Minibatch Loss= 3797.314941, Training Accuracy= 0.80469
Iter 7680, Minibatch Loss= 8002.142090, Training Accuracy= 0.73438
Iter 8960, Minibatch Loss= 3424.151367, Training Accuracy= 0.80469
Iter 10240, Minibatch Loss= 2609.185547, Training Accuracy= 0.82031
Iter 11520, Minibatch Loss= 1934.555420, Training Accuracy= 0.84375
Iter 12800, Minibatch Loss= 3136.123779, Training Accuracy= 0.82812
Iter 14080, Minibatch Loss= 1406.236938, Training Accuracy= 0.92969
Iter 15360, Minibatch Loss= 1129.215820, Training Accuracy= 0.93750
Iter 16640, Minibatch Loss= 2185.318359, Training Accuracy= 0.90625
Iter 17920, Minibatch Loss= 1143.690674, Training Accuracy= 0.92969
Iter 19200, Minibatch Loss= 1554.617065, Training Accuracy= 0.91406
Iter 20480, Minibatch Loss= 183.450684, Training Accuracy= 0.97656
Iter 21760, Minibatch Loss= 4240.766602, Training Accuracy= 0.84375
Iter 23040, Minibatch Loss= 607.102661, Training Accuracy= 0.97656
Iter 24320, Minibatch Loss= 1255.804443, Training Accuracy= 0.89844
Iter 25600, Minibatch Loss= 1451.037109, Training Accuracy= 0.89844

In [ ]:


In [ ]: