In [1]:
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

In [2]:
mnist = input_data.read_data_sets("../Datasets/MNIST/", one_hot=True)


Extracting Datasets/MNIST/train-images-idx3-ubyte.gz
Extracting Datasets/MNIST/train-labels-idx1-ubyte.gz
Extracting Datasets/MNIST/t10k-images-idx3-ubyte.gz
Extracting Datasets/MNIST/t10k-labels-idx1-ubyte.gz

In [3]:
print('MNIST traininig set images:', mnist.train.images.shape)
print('MNIST traininig set labels:', mnist.train.labels.shape)
print('MNIST test set images:', mnist.test.images.shape)
print('MNIST test set labels:', mnist.test.labels.shape)


MNIST traininig set images: (55000, 784)
MNIST traininig set labels: (55000, 10)
MNIST test set images: (10000, 784)
MNIST test set labels: (10000, 10)

In [4]:
#define the neural net
n_1 = 100
n_2 = 10
batch_size = 100

In [5]:
X = tf.placeholder('float', [None, 784])
y = tf.placeholder('float')

In [6]:
def neural_net_model(X):
    W1 = tf.Variable(tf.random_normal([784, n_1]))
    b1 = tf.Variable(tf.random_normal([n_1]))
    
    W2 = tf.Variable(tf.random_normal([n_1, n_2]))
    b2 = tf.Variable(tf.random_normal([n_2]))
    
    Z1 = tf.matmul(X, W1) + b1
    A1 = tf.nn.relu(Z1)
    
    Z2 = tf.matmul(A1, W2) + b2
    
    return Z2

In [11]:
def train_neural_network(X):
    y_ = neural_net_model(X)
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
        logits=y_, labels=y))
    optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(
        cost)
    
    num_epochs = 50
    
    with tf.Session() as session:
        session.run(tf.global_variables_initializer())
        
        for epoch in range(num_epochs):
            epoch_loss=0
            
            for i in range(mnist.train.num_examples // batch_size):
                ex , ey = mnist.train.next_batch(batch_size)
                _, c = session.run([optimizer, cost], feed_dict={
                    X: ex, y: ey})    
                epoch_loss += c
                
            print('Epoch', epoch, '/', num_epochs, 'loss', epoch_loss)
        
        correct = tf.equal(tf.argmax(y_, 1), tf.argmax(y, 1))
        accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
        print('Accuracy:', accuracy.eval({X: mnist.test.images,
                                          y: mnist.test.labels}))

In [12]:
train_neural_network(X)


Epoch 0 / 50 loss 11327.8911083
Epoch 1 / 50 loss 2468.31120396
Epoch 2 / 50 loss 1620.01331177
Epoch 3 / 50 loss 1212.80015841
Epoch 4 / 50 loss 966.119965628
Epoch 5 / 50 loss 794.298366524
Epoch 6 / 50 loss 668.586895261
Epoch 7 / 50 loss 569.185600482
Epoch 8 / 50 loss 489.69756758
Epoch 9 / 50 loss 422.509947735
Epoch 10 / 50 loss 367.728823163
Epoch 11 / 50 loss 320.247648891
Epoch 12 / 50 loss 281.808742669
Epoch 13 / 50 loss 248.236684531
Epoch 14 / 50 loss 218.806274018
Epoch 15 / 50 loss 193.697556652
Epoch 16 / 50 loss 171.586835374
Epoch 17 / 50 loss 152.455200034
Epoch 18 / 50 loss 134.983071653
Epoch 19 / 50 loss 120.163487326
Epoch 20 / 50 loss 106.763981999
Epoch 21 / 50 loss 94.3795946984
Epoch 22 / 50 loss 84.2924697781
Epoch 23 / 50 loss 74.0250246499
Epoch 24 / 50 loss 67.4630546325
Epoch 25 / 50 loss 59.8408673078
Epoch 26 / 50 loss 53.3330634968
Epoch 27 / 50 loss 47.699119626
Epoch 28 / 50 loss 42.1673633164
Epoch 29 / 50 loss 37.7393258693
Epoch 30 / 50 loss 33.9136206851
Epoch 31 / 50 loss 30.3999313523
Epoch 32 / 50 loss 25.9128826809
Epoch 33 / 50 loss 23.7119988474
Epoch 34 / 50 loss 21.0407776485
Epoch 35 / 50 loss 19.1104371544
Epoch 36 / 50 loss 17.1273262296
Epoch 37 / 50 loss 14.9561727709
Epoch 38 / 50 loss 12.7273364051
Epoch 39 / 50 loss 11.5810692544
Epoch 40 / 50 loss 10.1987457536
Epoch 41 / 50 loss 9.52566082914
Epoch 42 / 50 loss 7.90401929247
Epoch 43 / 50 loss 7.38633174911
Epoch 44 / 50 loss 5.75961560563
Epoch 45 / 50 loss 5.19252674716
Epoch 46 / 50 loss 5.44796222748
Epoch 47 / 50 loss 4.3318664981
Epoch 48 / 50 loss 4.10958547908
Epoch 49 / 50 loss 3.66447401663
Accuracy: 0.9501

In [ ]:


In [ ]: