''' A Reccurent Neural Network (LSTM) implementation example using TensorFlow library. This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/) Long Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf

Author: Aymeric Damien Project: https://github.com/aymericdamien/TensorFlow-Examples/ '''


In [1]:
import tensorflow as tf
from tensorflow.contrib import rnn
import numpy as np

# Import MINST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("data/MNIST/", one_hot=True)


Extracting data/MNIST/train-images-idx3-ubyte.gz
Extracting data/MNIST/train-labels-idx1-ubyte.gz
Extracting data/MNIST/t10k-images-idx3-ubyte.gz
Extracting data/MNIST/t10k-labels-idx1-ubyte.gz

''' To classify images using a reccurent neural network, we consider every image row as a sequence of pixels. Because MNIST image shape is 28*28px, we will then handle 28 sequences of 28 steps for every sample. '''


In [3]:
# Parameters
learning_rate = 0.001
training_iters = 100000
batch_size = 100
display_step = 10

# Network Parameters
n_input = 28 # MNIST data input (img shape: 28*28)
n_steps = 28 # timesteps
n_hidden = 150 # hidden layer num of features
n_classes = 10 # MNIST total classes (0-9 digits)

# tf Graph input
x = tf.placeholder("float", [None, n_steps, n_input])
y = tf.placeholder("float", [None, n_classes])

# Define weights
weights = {
    'out': tf.Variable(tf.random_normal([n_hidden, n_classes]))
}
biases = {
    'out': tf.Variable(tf.random_normal([n_classes]))
}

In [4]:
def RNN(x, weights, biases):

    # Prepare data shape to match `rnn` function requirements
    # Current data input shape: (batch_size, n_steps, n_input)
    # Required shape: 'n_steps' tensors list of shape (batch_size, n_input)
    
    # Unstack to get a list of 'n_steps' tensors of shape (batch_size, n_input)
    x = tf.unstack(x, n_steps, 1)

    # Define a lstm cell with tensorflow
    lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)

    # Get lstm cell output
    outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)

    # Linear activation, using rnn inner loop last output
    return tf.matmul(outputs[-1], weights['out']) + biases['out']

pred = RNN(x, weights, biases)

# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

# Evaluate model
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

# Initializing the variables
init = tf.global_variables_initializer()

In [8]:
# Launch the graph
with tf.Session() as sess:
    sess.run(init)
    step = 1
    # Keep training until reach max iterations
    while step * batch_size < training_iters:
        batch_x, batch_y = mnist.train.next_batch(batch_size)
        # Reshape data to get 28 seq of 28 elements
        batch_x = batch_x.reshape((batch_size, n_steps, n_input))
        # Run optimization op (backprop)
        sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
        if step % display_step == 0:
            # Calculate batch accuracy
            acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})
            # Calculate batch loss
            loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
            print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
                  "{:.6f}".format(loss) + ", Training Accuracy= " + \
                  "{:.5f}".format(acc))
        step += 1
    print("Optimization Finished!")

    # Calculate accuracy for 128 mnist test images
    test_len = 200
    test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input))
    test_label = mnist.test.labels[:test_len]
    print("Testing Accuracy:", \
        sess.run(accuracy, feed_dict={x: test_data, y: test_label}))


Iter 1000, Minibatch Loss= 1.874949, Training Accuracy= 0.36000
Iter 2000, Minibatch Loss= 1.353046, Training Accuracy= 0.53000
Iter 3000, Minibatch Loss= 1.155910, Training Accuracy= 0.56000
Iter 4000, Minibatch Loss= 0.951262, Training Accuracy= 0.76000
Iter 5000, Minibatch Loss= 0.765478, Training Accuracy= 0.79000
Iter 6000, Minibatch Loss= 1.017228, Training Accuracy= 0.72000
Iter 7000, Minibatch Loss= 0.627647, Training Accuracy= 0.79000
Iter 8000, Minibatch Loss= 0.799816, Training Accuracy= 0.69000
Iter 9000, Minibatch Loss= 0.952224, Training Accuracy= 0.71000
Iter 10000, Minibatch Loss= 0.464142, Training Accuracy= 0.82000
Iter 11000, Minibatch Loss= 0.635689, Training Accuracy= 0.80000
Iter 12000, Minibatch Loss= 0.439846, Training Accuracy= 0.86000
Iter 13000, Minibatch Loss= 0.347363, Training Accuracy= 0.88000
Iter 14000, Minibatch Loss= 0.327679, Training Accuracy= 0.89000
Iter 15000, Minibatch Loss= 0.332494, Training Accuracy= 0.89000
Iter 16000, Minibatch Loss= 0.693066, Training Accuracy= 0.77000
Iter 17000, Minibatch Loss= 0.297323, Training Accuracy= 0.91000
Iter 18000, Minibatch Loss= 0.180413, Training Accuracy= 0.94000
Iter 19000, Minibatch Loss= 0.401889, Training Accuracy= 0.88000
Iter 20000, Minibatch Loss= 0.400645, Training Accuracy= 0.82000
Iter 21000, Minibatch Loss= 0.299183, Training Accuracy= 0.87000
Iter 22000, Minibatch Loss= 0.180329, Training Accuracy= 0.91000
Iter 23000, Minibatch Loss= 0.210516, Training Accuracy= 0.94000
Iter 24000, Minibatch Loss= 0.291695, Training Accuracy= 0.90000
Iter 25000, Minibatch Loss= 0.443077, Training Accuracy= 0.82000
Iter 26000, Minibatch Loss= 0.382372, Training Accuracy= 0.90000
Iter 27000, Minibatch Loss= 0.187876, Training Accuracy= 0.95000
Iter 28000, Minibatch Loss= 0.269733, Training Accuracy= 0.90000
Iter 29000, Minibatch Loss= 0.143875, Training Accuracy= 0.94000
Iter 30000, Minibatch Loss= 0.182848, Training Accuracy= 0.94000
Iter 31000, Minibatch Loss= 0.195116, Training Accuracy= 0.93000
Iter 32000, Minibatch Loss= 0.143959, Training Accuracy= 0.97000
Iter 33000, Minibatch Loss= 0.328421, Training Accuracy= 0.91000
Iter 34000, Minibatch Loss= 0.112657, Training Accuracy= 0.98000
Iter 35000, Minibatch Loss= 0.335462, Training Accuracy= 0.90000
Iter 36000, Minibatch Loss= 0.209571, Training Accuracy= 0.94000
Iter 37000, Minibatch Loss= 0.224569, Training Accuracy= 0.95000
Iter 38000, Minibatch Loss= 0.369561, Training Accuracy= 0.90000
Iter 39000, Minibatch Loss= 0.260849, Training Accuracy= 0.91000
Iter 40000, Minibatch Loss= 0.260469, Training Accuracy= 0.92000
Iter 41000, Minibatch Loss= 0.275292, Training Accuracy= 0.92000
Iter 42000, Minibatch Loss= 0.076988, Training Accuracy= 0.98000
Iter 43000, Minibatch Loss= 0.202321, Training Accuracy= 0.92000
Iter 44000, Minibatch Loss= 0.404642, Training Accuracy= 0.92000
Iter 45000, Minibatch Loss= 0.197941, Training Accuracy= 0.92000
Iter 46000, Minibatch Loss= 0.123726, Training Accuracy= 0.97000
Iter 47000, Minibatch Loss= 0.268655, Training Accuracy= 0.91000
Iter 48000, Minibatch Loss= 0.346132, Training Accuracy= 0.90000
Iter 49000, Minibatch Loss= 0.243077, Training Accuracy= 0.91000
Iter 50000, Minibatch Loss= 0.260850, Training Accuracy= 0.91000
Iter 51000, Minibatch Loss= 0.065117, Training Accuracy= 0.98000
Iter 52000, Minibatch Loss= 0.067339, Training Accuracy= 0.98000
Iter 53000, Minibatch Loss= 0.046059, Training Accuracy= 0.99000
Iter 54000, Minibatch Loss= 0.030664, Training Accuracy= 0.99000
Iter 55000, Minibatch Loss= 0.221561, Training Accuracy= 0.96000
Iter 56000, Minibatch Loss= 0.075141, Training Accuracy= 0.99000
Iter 57000, Minibatch Loss= 0.076046, Training Accuracy= 0.98000
Iter 58000, Minibatch Loss= 0.159267, Training Accuracy= 0.95000
Iter 59000, Minibatch Loss= 0.268979, Training Accuracy= 0.95000
Iter 60000, Minibatch Loss= 0.068022, Training Accuracy= 0.98000
Iter 61000, Minibatch Loss= 0.105439, Training Accuracy= 0.95000
Iter 62000, Minibatch Loss= 0.097037, Training Accuracy= 0.95000
Iter 63000, Minibatch Loss= 0.188382, Training Accuracy= 0.96000
Iter 64000, Minibatch Loss= 0.098991, Training Accuracy= 0.98000
Iter 65000, Minibatch Loss= 0.117438, Training Accuracy= 0.96000
Iter 66000, Minibatch Loss= 0.086143, Training Accuracy= 0.97000
Iter 67000, Minibatch Loss= 0.074440, Training Accuracy= 0.98000
Iter 68000, Minibatch Loss= 0.099668, Training Accuracy= 0.97000
Iter 69000, Minibatch Loss= 0.169603, Training Accuracy= 0.92000
Iter 70000, Minibatch Loss= 0.159168, Training Accuracy= 0.95000
Iter 71000, Minibatch Loss= 0.162972, Training Accuracy= 0.97000
Iter 72000, Minibatch Loss= 0.126981, Training Accuracy= 0.94000
Iter 73000, Minibatch Loss= 0.074411, Training Accuracy= 0.97000
Iter 74000, Minibatch Loss= 0.108263, Training Accuracy= 0.95000
Iter 75000, Minibatch Loss= 0.092773, Training Accuracy= 0.95000
Iter 76000, Minibatch Loss= 0.110137, Training Accuracy= 0.98000
Iter 77000, Minibatch Loss= 0.117668, Training Accuracy= 0.95000
Iter 78000, Minibatch Loss= 0.067477, Training Accuracy= 0.98000
Iter 79000, Minibatch Loss= 0.098639, Training Accuracy= 0.96000
Iter 80000, Minibatch Loss= 0.069231, Training Accuracy= 0.98000
Iter 81000, Minibatch Loss= 0.144012, Training Accuracy= 0.97000
Iter 82000, Minibatch Loss= 0.117523, Training Accuracy= 0.97000
Iter 83000, Minibatch Loss= 0.102269, Training Accuracy= 0.96000
Iter 84000, Minibatch Loss= 0.088203, Training Accuracy= 0.98000
Iter 85000, Minibatch Loss= 0.135140, Training Accuracy= 0.94000
Iter 86000, Minibatch Loss= 0.164327, Training Accuracy= 0.93000
Iter 87000, Minibatch Loss= 0.132425, Training Accuracy= 0.94000
Iter 88000, Minibatch Loss= 0.042271, Training Accuracy= 0.98000
Iter 89000, Minibatch Loss= 0.143239, Training Accuracy= 0.95000
Iter 90000, Minibatch Loss= 0.075762, Training Accuracy= 0.98000
Iter 91000, Minibatch Loss= 0.042989, Training Accuracy= 0.99000
Iter 92000, Minibatch Loss= 0.095229, Training Accuracy= 0.95000
Iter 93000, Minibatch Loss= 0.029802, Training Accuracy= 0.99000
Iter 94000, Minibatch Loss= 0.054088, Training Accuracy= 0.99000
Iter 95000, Minibatch Loss= 0.078777, Training Accuracy= 0.97000
Iter 96000, Minibatch Loss= 0.038222, Training Accuracy= 0.99000
Iter 97000, Minibatch Loss= 0.018318, Training Accuracy= 1.00000
Iter 98000, Minibatch Loss= 0.103747, Training Accuracy= 0.96000
Iter 99000, Minibatch Loss= 0.050270, Training Accuracy= 0.98000
Optimization Finished!
Testing Accuracy: 0.975

In [ ]: