''' A Reccurent Neural Network (LSTM) implementation example using TensorFlow library. This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/) Long Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf

Author: Aymeric Damien Project: https://github.com/aymericdamien/TensorFlow-Examples/ '''


In [ ]:
import tensorflow as tf
from tensorflow.contrib import rnn
import numpy as np

# Import MINST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

''' To classify images using a reccurent neural network, we consider every image row as a sequence of pixels. Because MNIST image shape is 28*28px, we will then handle 28 sequences of 28 steps for every sample. '''


In [2]:
# Parameters
learning_rate = 0.001
training_iters = 100000
batch_size = 128
display_step = 10

# Network Parameters
n_input = 28 # MNIST data input (img shape: 28*28)
n_steps = 28 # timesteps
n_hidden = 128 # hidden layer num of features
n_classes = 10 # MNIST total classes (0-9 digits)

# tf Graph input
x = tf.placeholder("float", [None, n_steps, n_input])
y = tf.placeholder("float", [None, n_classes])

# Define weights
weights = {
    'out': tf.Variable(tf.random_normal([n_hidden, n_classes]))
}
biases = {
    'out': tf.Variable(tf.random_normal([n_classes]))
}

In [3]:
def RNN(x, weights, biases):

    # Prepare data shape to match `rnn` function requirements
    # Current data input shape: (batch_size, n_steps, n_input)
    # Required shape: 'n_steps' tensors list of shape (batch_size, n_input)
    
    # Unstack to get a list of 'n_steps' tensors of shape (batch_size, n_input)
    x = tf.unstack(x, n_steps, 1)

    # Define a lstm cell with tensorflow
    lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)

    # Get lstm cell output
    outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)

    # Linear activation, using rnn inner loop last output
    return tf.matmul(outputs[-1], weights['out']) + biases['out']

pred = RNN(x, weights, biases)

# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

# Evaluate model
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

# Initializing the variables
init = tf.global_variables_initializer()

In [4]:
# Launch the graph
with tf.Session() as sess:
    sess.run(init)
    step = 1
    # Keep training until reach max iterations
    while step * batch_size < training_iters:
        batch_x, batch_y = mnist.train.next_batch(batch_size)
        # Reshape data to get 28 seq of 28 elements
        batch_x = batch_x.reshape((batch_size, n_steps, n_input))
        # Run optimization op (backprop)
        sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
        if step % display_step == 0:
            # Calculate batch accuracy
            acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})
            # Calculate batch loss
            loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
            print "Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
                  "{:.6f}".format(loss) + ", Training Accuracy= " + \
                  "{:.5f}".format(acc)
        step += 1
    print "Optimization Finished!"

    # Calculate accuracy for 128 mnist test images
    test_len = 128
    test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input))
    test_label = mnist.test.labels[:test_len]
    print "Testing Accuracy:", \
        sess.run(accuracy, feed_dict={x: test_data, y: test_label})


Iter 1280, Minibatch Loss= 1.576423, Training Accuracy= 0.51562
Iter 2560, Minibatch Loss= 1.450179, Training Accuracy= 0.53906
Iter 3840, Minibatch Loss= 1.160066, Training Accuracy= 0.64844
Iter 5120, Minibatch Loss= 0.898589, Training Accuracy= 0.73438
Iter 6400, Minibatch Loss= 0.685712, Training Accuracy= 0.75781
Iter 7680, Minibatch Loss= 1.085666, Training Accuracy= 0.64844
Iter 8960, Minibatch Loss= 0.681488, Training Accuracy= 0.73438
Iter 10240, Minibatch Loss= 0.557049, Training Accuracy= 0.82812
Iter 11520, Minibatch Loss= 0.340857, Training Accuracy= 0.92188
Iter 12800, Minibatch Loss= 0.596482, Training Accuracy= 0.78906
Iter 14080, Minibatch Loss= 0.486564, Training Accuracy= 0.84375
Iter 15360, Minibatch Loss= 0.302493, Training Accuracy= 0.90625
Iter 16640, Minibatch Loss= 0.334277, Training Accuracy= 0.92188
Iter 17920, Minibatch Loss= 0.222026, Training Accuracy= 0.90625
Iter 19200, Minibatch Loss= 0.228581, Training Accuracy= 0.92188
Iter 20480, Minibatch Loss= 0.150356, Training Accuracy= 0.96094
Iter 21760, Minibatch Loss= 0.415417, Training Accuracy= 0.86719
Iter 23040, Minibatch Loss= 0.159742, Training Accuracy= 0.94531
Iter 24320, Minibatch Loss= 0.333764, Training Accuracy= 0.89844
Iter 25600, Minibatch Loss= 0.379070, Training Accuracy= 0.88281
Iter 26880, Minibatch Loss= 0.241612, Training Accuracy= 0.91406
Iter 28160, Minibatch Loss= 0.200397, Training Accuracy= 0.93750
Iter 29440, Minibatch Loss= 0.197994, Training Accuracy= 0.93750
Iter 30720, Minibatch Loss= 0.330214, Training Accuracy= 0.89062
Iter 32000, Minibatch Loss= 0.174626, Training Accuracy= 0.92969
Iter 33280, Minibatch Loss= 0.202369, Training Accuracy= 0.93750
Iter 34560, Minibatch Loss= 0.240835, Training Accuracy= 0.94531
Iter 35840, Minibatch Loss= 0.207867, Training Accuracy= 0.93750
Iter 37120, Minibatch Loss= 0.313306, Training Accuracy= 0.90625
Iter 38400, Minibatch Loss= 0.089850, Training Accuracy= 0.96875
Iter 39680, Minibatch Loss= 0.184803, Training Accuracy= 0.92188
Iter 40960, Minibatch Loss= 0.236523, Training Accuracy= 0.92969
Iter 42240, Minibatch Loss= 0.174834, Training Accuracy= 0.94531
Iter 43520, Minibatch Loss= 0.127905, Training Accuracy= 0.93750
Iter 44800, Minibatch Loss= 0.120045, Training Accuracy= 0.96875
Iter 46080, Minibatch Loss= 0.068337, Training Accuracy= 0.98438
Iter 47360, Minibatch Loss= 0.141118, Training Accuracy= 0.95312
Iter 48640, Minibatch Loss= 0.182404, Training Accuracy= 0.92188
Iter 49920, Minibatch Loss= 0.176778, Training Accuracy= 0.93750
Iter 51200, Minibatch Loss= 0.098927, Training Accuracy= 0.97656
Iter 52480, Minibatch Loss= 0.158776, Training Accuracy= 0.96094
Iter 53760, Minibatch Loss= 0.031863, Training Accuracy= 0.99219
Iter 55040, Minibatch Loss= 0.101799, Training Accuracy= 0.96094
Iter 56320, Minibatch Loss= 0.176387, Training Accuracy= 0.96094
Iter 57600, Minibatch Loss= 0.096277, Training Accuracy= 0.96875
Iter 58880, Minibatch Loss= 0.137416, Training Accuracy= 0.94531
Iter 60160, Minibatch Loss= 0.062801, Training Accuracy= 0.97656
Iter 61440, Minibatch Loss= 0.036346, Training Accuracy= 0.98438
Iter 62720, Minibatch Loss= 0.153030, Training Accuracy= 0.92969
Iter 64000, Minibatch Loss= 0.117716, Training Accuracy= 0.95312
Iter 65280, Minibatch Loss= 0.048387, Training Accuracy= 0.99219
Iter 66560, Minibatch Loss= 0.070802, Training Accuracy= 0.97656
Iter 67840, Minibatch Loss= 0.221085, Training Accuracy= 0.96875
Iter 69120, Minibatch Loss= 0.184049, Training Accuracy= 0.93750
Iter 70400, Minibatch Loss= 0.094883, Training Accuracy= 0.95312
Iter 71680, Minibatch Loss= 0.087278, Training Accuracy= 0.96875
Iter 72960, Minibatch Loss= 0.153267, Training Accuracy= 0.95312
Iter 74240, Minibatch Loss= 0.161794, Training Accuracy= 0.94531
Iter 75520, Minibatch Loss= 0.103779, Training Accuracy= 0.96875
Iter 76800, Minibatch Loss= 0.165586, Training Accuracy= 0.96094
Iter 78080, Minibatch Loss= 0.137721, Training Accuracy= 0.95312
Iter 79360, Minibatch Loss= 0.124014, Training Accuracy= 0.96094
Iter 80640, Minibatch Loss= 0.051460, Training Accuracy= 0.99219
Iter 81920, Minibatch Loss= 0.185836, Training Accuracy= 0.96094
Iter 83200, Minibatch Loss= 0.147694, Training Accuracy= 0.94531
Iter 84480, Minibatch Loss= 0.061550, Training Accuracy= 0.98438
Iter 85760, Minibatch Loss= 0.093457, Training Accuracy= 0.96875
Iter 87040, Minibatch Loss= 0.094497, Training Accuracy= 0.98438
Iter 88320, Minibatch Loss= 0.093934, Training Accuracy= 0.96094
Iter 89600, Minibatch Loss= 0.061550, Training Accuracy= 0.96875
Iter 90880, Minibatch Loss= 0.082452, Training Accuracy= 0.97656
Iter 92160, Minibatch Loss= 0.087423, Training Accuracy= 0.97656
Iter 93440, Minibatch Loss= 0.032694, Training Accuracy= 0.99219
Iter 94720, Minibatch Loss= 0.069597, Training Accuracy= 0.97656
Iter 96000, Minibatch Loss= 0.193636, Training Accuracy= 0.96094
Iter 97280, Minibatch Loss= 0.134405, Training Accuracy= 0.96094
Iter 98560, Minibatch Loss= 0.072992, Training Accuracy= 0.96875
Iter 99840, Minibatch Loss= 0.041049, Training Accuracy= 0.99219
Optimization Finished!
Testing Accuracy: 0.960938

In [ ]: