''' A Bidirectional Reccurent Neural Network (LSTM) implementation example using TensorFlow library. This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/) Long Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf

Author: Aymeric Damien Project: https://github.com/aymericdamien/TensorFlow-Examples/ '''


In [ ]:
import tensorflow as tf
from tensorflow.contrib import rnn
import numpy as np

# Import MINST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

''' To classify images using a bidirectional reccurent neural network, we consider every image row as a sequence of pixels. Because MNIST image shape is 28*28px, we will then handle 28 sequences of 28 steps for every sample. '''


In [2]:
# Parameters
learning_rate = 0.001
training_iters = 100000
batch_size = 128
display_step = 10

# Network Parameters
n_input = 28 # MNIST data input (img shape: 28*28)
n_steps = 28 # timesteps
n_hidden = 128 # hidden layer num of features
n_classes = 10 # MNIST total classes (0-9 digits)

# tf Graph input
x = tf.placeholder("float", [None, n_steps, n_input])
y = tf.placeholder("float", [None, n_classes])

# Define weights
weights = {
    # Hidden layer weights => 2*n_hidden because of foward + backward cells
    'out': tf.Variable(tf.random_normal([2*n_hidden, n_classes]))
}
biases = {
    'out': tf.Variable(tf.random_normal([n_classes]))
}

In [3]:
def BiRNN(x, weights, biases):

    # Prepare data shape to match `bidirectional_rnn` function requirements
    # Current data input shape: (batch_size, n_steps, n_input)
    # Required shape: 'n_steps' tensors list of shape (batch_size, n_input)
    
    # Unstack to get a list of 'n_steps' tensors of shape (batch_size, n_input)
    x = tf.unstack(x, n_steps, 1)

    # Define lstm cells with tensorflow
    # Forward direction cell
    lstm_fw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
    # Backward direction cell
    lstm_bw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)

    # Get lstm cell output
    try:
        outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
                                              dtype=tf.float32)
    except Exception: # Old TensorFlow version only returns outputs not states
        outputs = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
                                        dtype=tf.float32)

    # Linear activation, using rnn inner loop last output
    return tf.matmul(outputs[-1], weights['out']) + biases['out']

pred = BiRNN(x, weights, biases)

# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

# Evaluate model
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

# Initializing the variables
init = tf.global_variables_initializer()

In [4]:
# Launch the graph
with tf.Session() as sess:
    sess.run(init)
    step = 1
    # Keep training until reach max iterations
    while step * batch_size < training_iters:
        batch_x, batch_y = mnist.train.next_batch(batch_size)
        # Reshape data to get 28 seq of 28 elements
        batch_x = batch_x.reshape((batch_size, n_steps, n_input))
        # Run optimization op (backprop)
        sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
        if step % display_step == 0:
            # Calculate batch accuracy
            acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})
            # Calculate batch loss
            loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
            print "Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
                  "{:.6f}".format(loss) + ", Training Accuracy= " + \
                  "{:.5f}".format(acc)
        step += 1
    print "Optimization Finished!"

    # Calculate accuracy for 128 mnist test images
    test_len = 128
    test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input))
    test_label = mnist.test.labels[:test_len]
    print "Testing Accuracy:", \
        sess.run(accuracy, feed_dict={x: test_data, y: test_label})


Iter 1280, Minibatch Loss= 1.557283, Training Accuracy= 0.49219
Iter 2560, Minibatch Loss= 1.358445, Training Accuracy= 0.56250
Iter 3840, Minibatch Loss= 1.043732, Training Accuracy= 0.64062
Iter 5120, Minibatch Loss= 0.796770, Training Accuracy= 0.72656
Iter 6400, Minibatch Loss= 0.626206, Training Accuracy= 0.72656
Iter 7680, Minibatch Loss= 1.025919, Training Accuracy= 0.65625
Iter 8960, Minibatch Loss= 0.744850, Training Accuracy= 0.76562
Iter 10240, Minibatch Loss= 0.530111, Training Accuracy= 0.84375
Iter 11520, Minibatch Loss= 0.383806, Training Accuracy= 0.86719
Iter 12800, Minibatch Loss= 0.607816, Training Accuracy= 0.82812
Iter 14080, Minibatch Loss= 0.410879, Training Accuracy= 0.89062
Iter 15360, Minibatch Loss= 0.335351, Training Accuracy= 0.89844
Iter 16640, Minibatch Loss= 0.428004, Training Accuracy= 0.91406
Iter 17920, Minibatch Loss= 0.307468, Training Accuracy= 0.91406
Iter 19200, Minibatch Loss= 0.249527, Training Accuracy= 0.92188
Iter 20480, Minibatch Loss= 0.148163, Training Accuracy= 0.96094
Iter 21760, Minibatch Loss= 0.445275, Training Accuracy= 0.83594
Iter 23040, Minibatch Loss= 0.173083, Training Accuracy= 0.93750
Iter 24320, Minibatch Loss= 0.373696, Training Accuracy= 0.87500
Iter 25600, Minibatch Loss= 0.509869, Training Accuracy= 0.85938
Iter 26880, Minibatch Loss= 0.198096, Training Accuracy= 0.92969
Iter 28160, Minibatch Loss= 0.228221, Training Accuracy= 0.92188
Iter 29440, Minibatch Loss= 0.280088, Training Accuracy= 0.89844
Iter 30720, Minibatch Loss= 0.300495, Training Accuracy= 0.91406
Iter 32000, Minibatch Loss= 0.171746, Training Accuracy= 0.95312
Iter 33280, Minibatch Loss= 0.263745, Training Accuracy= 0.89844
Iter 34560, Minibatch Loss= 0.177300, Training Accuracy= 0.93750
Iter 35840, Minibatch Loss= 0.160621, Training Accuracy= 0.95312
Iter 37120, Minibatch Loss= 0.321745, Training Accuracy= 0.91406
Iter 38400, Minibatch Loss= 0.188322, Training Accuracy= 0.93750
Iter 39680, Minibatch Loss= 0.104025, Training Accuracy= 0.96875
Iter 40960, Minibatch Loss= 0.291053, Training Accuracy= 0.89062
Iter 42240, Minibatch Loss= 0.131189, Training Accuracy= 0.95312
Iter 43520, Minibatch Loss= 0.154949, Training Accuracy= 0.92969
Iter 44800, Minibatch Loss= 0.150411, Training Accuracy= 0.93750
Iter 46080, Minibatch Loss= 0.117008, Training Accuracy= 0.96094
Iter 47360, Minibatch Loss= 0.181344, Training Accuracy= 0.96094
Iter 48640, Minibatch Loss= 0.209197, Training Accuracy= 0.94531
Iter 49920, Minibatch Loss= 0.159350, Training Accuracy= 0.96094
Iter 51200, Minibatch Loss= 0.124001, Training Accuracy= 0.95312
Iter 52480, Minibatch Loss= 0.165183, Training Accuracy= 0.94531
Iter 53760, Minibatch Loss= 0.046438, Training Accuracy= 0.97656
Iter 55040, Minibatch Loss= 0.199995, Training Accuracy= 0.91406
Iter 56320, Minibatch Loss= 0.057071, Training Accuracy= 0.97656
Iter 57600, Minibatch Loss= 0.177065, Training Accuracy= 0.92188
Iter 58880, Minibatch Loss= 0.091666, Training Accuracy= 0.96094
Iter 60160, Minibatch Loss= 0.069232, Training Accuracy= 0.96875
Iter 61440, Minibatch Loss= 0.127353, Training Accuracy= 0.94531
Iter 62720, Minibatch Loss= 0.095795, Training Accuracy= 0.96094
Iter 64000, Minibatch Loss= 0.202651, Training Accuracy= 0.96875
Iter 65280, Minibatch Loss= 0.118779, Training Accuracy= 0.95312
Iter 66560, Minibatch Loss= 0.043173, Training Accuracy= 0.98438
Iter 67840, Minibatch Loss= 0.152280, Training Accuracy= 0.95312
Iter 69120, Minibatch Loss= 0.085301, Training Accuracy= 0.96875
Iter 70400, Minibatch Loss= 0.093421, Training Accuracy= 0.96094
Iter 71680, Minibatch Loss= 0.096358, Training Accuracy= 0.96875
Iter 72960, Minibatch Loss= 0.053386, Training Accuracy= 0.98438
Iter 74240, Minibatch Loss= 0.065237, Training Accuracy= 0.97656
Iter 75520, Minibatch Loss= 0.228090, Training Accuracy= 0.92188
Iter 76800, Minibatch Loss= 0.106751, Training Accuracy= 0.95312
Iter 78080, Minibatch Loss= 0.187795, Training Accuracy= 0.94531
Iter 79360, Minibatch Loss= 0.092611, Training Accuracy= 0.96094
Iter 80640, Minibatch Loss= 0.137386, Training Accuracy= 0.96875
Iter 81920, Minibatch Loss= 0.106634, Training Accuracy= 0.98438
Iter 83200, Minibatch Loss= 0.111749, Training Accuracy= 0.94531
Iter 84480, Minibatch Loss= 0.191184, Training Accuracy= 0.94531
Iter 85760, Minibatch Loss= 0.063982, Training Accuracy= 0.96094
Iter 87040, Minibatch Loss= 0.092380, Training Accuracy= 0.96875
Iter 88320, Minibatch Loss= 0.089899, Training Accuracy= 0.97656
Iter 89600, Minibatch Loss= 0.141107, Training Accuracy= 0.94531
Iter 90880, Minibatch Loss= 0.075549, Training Accuracy= 0.96094
Iter 92160, Minibatch Loss= 0.186539, Training Accuracy= 0.94531
Iter 93440, Minibatch Loss= 0.079639, Training Accuracy= 0.97656
Iter 94720, Minibatch Loss= 0.156895, Training Accuracy= 0.95312
Iter 96000, Minibatch Loss= 0.088042, Training Accuracy= 0.97656
Iter 97280, Minibatch Loss= 0.076670, Training Accuracy= 0.96875
Iter 98560, Minibatch Loss= 0.051336, Training Accuracy= 0.97656
Iter 99840, Minibatch Loss= 0.086923, Training Accuracy= 0.98438
Optimization Finished!
Testing Accuracy: 0.960938

In [ ]: