In [1]:
'''
A Reccurent Neural Network (LSTM) implementation example using TensorFlow library.
This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
Long Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf

Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''


Out[1]:
'\nA Reccurent Neural Network (LSTM) implementation example using TensorFlow library.\nThis example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)\nLong Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf\n\nAuthor: Aymeric Damien\nProject: https://github.com/aymericdamien/TensorFlow-Examples/\n'

In [2]:
import tensorflow as tf
import numpy as np

# Import MINST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("data/", one_hot=True)


Extracting data/train-images-idx3-ubyte.gz
Extracting data/train-labels-idx1-ubyte.gz
Extracting data/t10k-images-idx3-ubyte.gz
Extracting data/t10k-labels-idx1-ubyte.gz

In [3]:
'''
To classify images using a reccurent neural network, we consider every image
row as a sequence of pixels. Because MNIST image shape is 28*28px, we will then
handle 28 sequences of 28 steps for every sample.
'''


Out[3]:
'\nTo classify images using a reccurent neural network, we consider every image\nrow as a sequence of pixels. Because MNIST image shape is 28*28px, we will then\nhandle 28 sequences of 28 steps for every sample.\n'

In [4]:
# Parameters
learning_rate = 0.001
training_iters = 100000
batch_size = 128
display_step = 10

# Network Parameters
n_input = 28 # MNIST data input (img shape: 28*28)
n_steps = 28 # timesteps
n_hidden = 128 # hidden layer num of features
n_classes = 10 # MNIST total classes (0-9 digits)

# tf Graph input
x = tf.placeholder("float", [None, n_steps, n_input])
y = tf.placeholder("float", [None, n_classes])

# Define weights
weights = {
    'out': tf.Variable(tf.random_normal([n_hidden, n_classes]))
}
biases = {
    'out': tf.Variable(tf.random_normal([n_classes]))
}

In [5]:
def RNN(x, weights, biases):

    # Prepare data shape to match `rnn` function requirements
    # Current data input shape: (batch_size, n_steps, n_input)
    # Required shape: 'n_steps' tensors list of shape (batch_size, n_input)
    
    # Permuting batch_size and n_steps
    x = tf.transpose(x, [1, 0, 2])
    # Reshaping to (n_steps*batch_size, n_input)
    x = tf.reshape(x, [-1, n_input])
    # Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
    x = tf.split(x, n_steps, 0)

    # Define a lstm cell with tensorflow
    lstm_cell = tf.contrib.rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)

    # Get lstm cell output
    outputs, states = tf.contrib.rnn.static_rnn(lstm_cell, x, dtype=tf.float32)

    # Linear activation, using rnn inner loop last output
    return tf.matmul(outputs[-1], weights['out']) + biases['out']

pred = RNN(x, weights, biases)

# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

# Evaluate model
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

# Initializing the variables
init = tf.initialize_all_variables()

# Create a summary to monitor cost tensor
tf.summary.scalar("loss", cost)
# Create a summary to monitor accuracy tensor
tf.summary.scalar("accuracy", accuracy)
# Merge all summaries into a single op
merged_summary_op = tf.summary.merge_all()


WARNING:tensorflow:From <ipython-input-5-1caad09f2a6d>:34: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.

In [6]:
# Launch the graph
with tf.Session() as sess:
    sess.run(init)
    step = 1
    # Write logs at every iteration
    summary_writer = tf.summary.FileWriter('./logs', graph=tf.get_default_graph())
    # Keep training until reach max iterations
    while step * batch_size < training_iters:
        batch_x, batch_y = mnist.train.next_batch(batch_size)
        # Reshape data to get 28 seq of 28 elements
        batch_x = batch_x.reshape((batch_size, n_steps, n_input))
        # Run optimization op (backprop)
        sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
        if step % display_step == 0:
            # Calculate batch accuracy, loss, summary
            loss, acc, summary = sess.run([cost, accuracy, merged_summary_op], feed_dict={x: batch_x, y: batch_y})
            print("Iter {}, Minibatch Loss={:.6f}, Training Accuracy={:.5f}".format(step*batch_size,
                                                                                    loss, acc))
            # Write logs at every iteration
            summary_writer.add_summary(summary, step)
        step += 1
    print("Optimization Finished!")

    # Calculate accuracy for 128 mnist test images
    test_len = 128
    test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input))
    test_label = mnist.test.labels[:test_len]
    print("Testing Accuracy:{}", sess.run(accuracy, feed_dict={x: test_data, y: test_label}))


Iter 1280, Minibatch Loss=1.850991, Training Accuracy=0.37500
Iter 2560, Minibatch Loss=1.469731, Training Accuracy=0.51562
Iter 3840, Minibatch Loss=1.431035, Training Accuracy=0.51562
Iter 5120, Minibatch Loss=0.982424, Training Accuracy=0.69531
Iter 6400, Minibatch Loss=0.994084, Training Accuracy=0.69531
Iter 7680, Minibatch Loss=0.718169, Training Accuracy=0.78125
Iter 8960, Minibatch Loss=0.644436, Training Accuracy=0.78906
Iter 10240, Minibatch Loss=0.618100, Training Accuracy=0.78906
Iter 11520, Minibatch Loss=0.421887, Training Accuracy=0.88281
Iter 12800, Minibatch Loss=0.569169, Training Accuracy=0.82031
Iter 14080, Minibatch Loss=0.414553, Training Accuracy=0.85938
Iter 15360, Minibatch Loss=0.488589, Training Accuracy=0.85938
Iter 16640, Minibatch Loss=0.454605, Training Accuracy=0.85938
Iter 17920, Minibatch Loss=0.406162, Training Accuracy=0.89844
Iter 19200, Minibatch Loss=0.333620, Training Accuracy=0.85938
Iter 20480, Minibatch Loss=0.301693, Training Accuracy=0.91406
Iter 21760, Minibatch Loss=0.432863, Training Accuracy=0.87500
Iter 23040, Minibatch Loss=0.262039, Training Accuracy=0.90625
Iter 24320, Minibatch Loss=0.518962, Training Accuracy=0.87500
Iter 25600, Minibatch Loss=0.263870, Training Accuracy=0.93750
Iter 26880, Minibatch Loss=0.236379, Training Accuracy=0.94531
Iter 28160, Minibatch Loss=0.142551, Training Accuracy=0.94531
Iter 29440, Minibatch Loss=0.254552, Training Accuracy=0.90625
Iter 30720, Minibatch Loss=0.183150, Training Accuracy=0.94531
Iter 32000, Minibatch Loss=0.348286, Training Accuracy=0.91406
Iter 33280, Minibatch Loss=0.190060, Training Accuracy=0.93750
Iter 34560, Minibatch Loss=0.361382, Training Accuracy=0.92188
Iter 35840, Minibatch Loss=0.319349, Training Accuracy=0.89844
Iter 37120, Minibatch Loss=0.195606, Training Accuracy=0.93750
Iter 38400, Minibatch Loss=0.210143, Training Accuracy=0.92969
Iter 39680, Minibatch Loss=0.177194, Training Accuracy=0.95312
Iter 40960, Minibatch Loss=0.146507, Training Accuracy=0.95312
Iter 42240, Minibatch Loss=0.189851, Training Accuracy=0.94531
Iter 43520, Minibatch Loss=0.114747, Training Accuracy=0.95312
Iter 44800, Minibatch Loss=0.250252, Training Accuracy=0.92188
Iter 46080, Minibatch Loss=0.115071, Training Accuracy=0.95312
Iter 47360, Minibatch Loss=0.173876, Training Accuracy=0.96094
Iter 48640, Minibatch Loss=0.192100, Training Accuracy=0.96094
Iter 49920, Minibatch Loss=0.133873, Training Accuracy=0.95312
Iter 51200, Minibatch Loss=0.230836, Training Accuracy=0.92969
Iter 52480, Minibatch Loss=0.122366, Training Accuracy=0.96875
Iter 53760, Minibatch Loss=0.130333, Training Accuracy=0.94531
Iter 55040, Minibatch Loss=0.115707, Training Accuracy=0.93750
Iter 56320, Minibatch Loss=0.102408, Training Accuracy=0.96875
Iter 57600, Minibatch Loss=0.213717, Training Accuracy=0.93750
Iter 58880, Minibatch Loss=0.109637, Training Accuracy=0.96094
Iter 60160, Minibatch Loss=0.210731, Training Accuracy=0.94531
Iter 61440, Minibatch Loss=0.098948, Training Accuracy=0.96875
Iter 62720, Minibatch Loss=0.076676, Training Accuracy=0.96875
Iter 64000, Minibatch Loss=0.157073, Training Accuracy=0.96875
Iter 65280, Minibatch Loss=0.198886, Training Accuracy=0.95312
Iter 66560, Minibatch Loss=0.074782, Training Accuracy=0.98438
Iter 67840, Minibatch Loss=0.073292, Training Accuracy=0.96094
Iter 69120, Minibatch Loss=0.083488, Training Accuracy=0.96875
Iter 70400, Minibatch Loss=0.149059, Training Accuracy=0.96875
Iter 71680, Minibatch Loss=0.148837, Training Accuracy=0.96094
Iter 72960, Minibatch Loss=0.081009, Training Accuracy=0.97656
Iter 74240, Minibatch Loss=0.131656, Training Accuracy=0.95312
Iter 75520, Minibatch Loss=0.115235, Training Accuracy=0.94531
Iter 76800, Minibatch Loss=0.064070, Training Accuracy=0.99219
Iter 78080, Minibatch Loss=0.118000, Training Accuracy=0.96875
Iter 79360, Minibatch Loss=0.159600, Training Accuracy=0.96094
Iter 80640, Minibatch Loss=0.191849, Training Accuracy=0.96094
Iter 81920, Minibatch Loss=0.139820, Training Accuracy=0.96094
Iter 83200, Minibatch Loss=0.093283, Training Accuracy=0.98438
Iter 84480, Minibatch Loss=0.153460, Training Accuracy=0.95312
Iter 85760, Minibatch Loss=0.098471, Training Accuracy=0.96094
Iter 87040, Minibatch Loss=0.116068, Training Accuracy=0.96094
Iter 88320, Minibatch Loss=0.164814, Training Accuracy=0.96875
Iter 89600, Minibatch Loss=0.077761, Training Accuracy=0.97656
Iter 90880, Minibatch Loss=0.072286, Training Accuracy=0.96875
Iter 92160, Minibatch Loss=0.091345, Training Accuracy=0.97656
Iter 93440, Minibatch Loss=0.081065, Training Accuracy=0.96875
Iter 94720, Minibatch Loss=0.115442, Training Accuracy=0.96094
Iter 96000, Minibatch Loss=0.109781, Training Accuracy=0.95312
Iter 97280, Minibatch Loss=0.076925, Training Accuracy=0.96094
Iter 98560, Minibatch Loss=0.058535, Training Accuracy=0.98438
Iter 99840, Minibatch Loss=0.130346, Training Accuracy=0.96875
Optimization Finished!
('Testing Accuracy:{}', 0.984375)

In [7]:
test_data = mnist.test.images[:1].reshape((-1, n_steps, n_input))
print(test_data)
test_label = mnist.test.labels[:1]
print(test_label)


[[[ 0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.        ]
  [ 0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.        ]
  [ 0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.        ]
  [ 0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.        ]
  [ 0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.        ]
  [ 0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.        ]
  [ 0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.        ]
  [ 0.          0.          0.          0.          0.          0.
    0.32941177  0.72549021  0.62352943  0.59215689  0.23529413  0.14117648
    0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.
    0.          0.        ]
  [ 0.          0.          0.          0.          0.          0.
    0.8705883   0.99607849  0.99607849  0.99607849  0.99607849  0.9450981
    0.77647066  0.77647066  0.77647066  0.77647066  0.77647066  0.77647066
    0.77647066  0.77647066  0.66666669  0.20392159  0.          0.          0.
    0.          0.          0.        ]
  [ 0.          0.          0.          0.          0.          0.
    0.26274511  0.44705886  0.28235295  0.44705886  0.63921571  0.89019614
    0.99607849  0.88235301  0.99607849  0.99607849  0.99607849  0.98039222
    0.89803928  0.99607849  0.99607849  0.54901963  0.          0.          0.
    0.          0.          0.        ]
  [ 0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.06666667  0.25882354
    0.05490196  0.26274511  0.26274511  0.26274511  0.23137257  0.08235294
    0.92549026  0.99607849  0.41568631  0.          0.          0.          0.
    0.          0.        ]
  [ 0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.32549021  0.99215692
    0.81960791  0.07058824  0.          0.          0.          0.          0.
    0.        ]
  [ 0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.08627451  0.91372555  1.
    0.32549021  0.          0.          0.          0.          0.          0.
    0.        ]
  [ 0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.50588238  0.99607849  0.9333334
    0.17254902  0.          0.          0.          0.          0.          0.
    0.        ]
  [ 0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.23137257  0.97647065  0.99607849  0.24313727
    0.          0.          0.          0.          0.          0.          0.
    0.        ]
  [ 0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.52156866  0.99607849  0.73333335  0.01960784
    0.          0.          0.          0.          0.          0.          0.
    0.        ]
  [ 0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.
    0.          0.03529412  0.80392164  0.97254908  0.227451    0.          0.
    0.          0.          0.          0.          0.          0.          0.        ]
  [ 0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.
    0.          0.49411768  0.99607849  0.71372551  0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.        ]
  [ 0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.
    0.29411766  0.98431379  0.94117653  0.22352943  0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.        ]
  [ 0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.
    0.07450981  0.86666673  0.99607849  0.65098041  0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.
    0.        ]
  [ 0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.01176471
    0.7960785   0.99607849  0.8588236   0.13725491  0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.
    0.        ]
  [ 0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.14901961
    0.99607849  0.99607849  0.3019608   0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.
    0.        ]
  [ 0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.12156864  0.87843144
    0.99607849  0.45098042  0.00392157  0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.
    0.        ]
  [ 0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.52156866  0.99607849
    0.99607849  0.20392159  0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.
    0.        ]
  [ 0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.2392157   0.94901967  0.99607849
    0.99607849  0.20392159  0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.
    0.        ]
  [ 0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.47450984  0.99607849  0.99607849
    0.8588236   0.15686275  0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.
    0.        ]
  [ 0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.47450984  0.99607849  0.81176478
    0.07058824  0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.
    0.        ]
  [ 0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.
    0.          0.          0.          0.          0.          0.          0.        ]]]
[[ 0.  0.  0.  0.  0.  0.  0.  1.  0.  0.]]

In [8]:
print(test_data.shape)
%matplotlib inline
import matplotlib.pyplot as plt
first_array=test_data[0]
#Not sure you even have to do that if you just want to visualize it
#first_array=255*first_array
#first_array=first_array.astype("uint8")
plt.imshow(first_array)
print(np.argmax(test_label))


(1, 28, 28)
---------------------------------------------------------------------------
ImportError                               Traceback (most recent call last)
<ipython-input-8-04028daaa2d8> in <module>()
      1 print(test_data.shape)
----> 2 get_ipython().magic(u'matplotlib inline')
      3 import matplotlib.pyplot as plt
      4 first_array=test_data[0]
      5 #Not sure you even have to do that if you just want to visualize it

/home/yj/.virtualenvs/caffe/local/lib/python2.7/site-packages/IPython/core/interactiveshell.pyc in magic(self, arg_s)
   2161         magic_name, _, magic_arg_s = arg_s.partition(' ')
   2162         magic_name = magic_name.lstrip(prefilter.ESC_MAGIC)
-> 2163         return self.run_line_magic(magic_name, magic_arg_s)
   2164 
   2165     #-------------------------------------------------------------------------

/home/yj/.virtualenvs/caffe/local/lib/python2.7/site-packages/IPython/core/interactiveshell.pyc in run_line_magic(self, magic_name, line)
   2082                 kwargs['local_ns'] = sys._getframe(stack_depth).f_locals
   2083             with self.builtin_trap:
-> 2084                 result = fn(*args,**kwargs)
   2085             return result
   2086 

<decorator-gen-106> in matplotlib(self, line)

/home/yj/.virtualenvs/caffe/local/lib/python2.7/site-packages/IPython/core/magic.pyc in <lambda>(f, *a, **k)
    191     # but it's overkill for just that one bit of state.
    192     def magic_deco(arg):
--> 193         call = lambda f, *a, **k: f(*a, **k)
    194 
    195         if callable(arg):

/home/yj/.virtualenvs/caffe/local/lib/python2.7/site-packages/IPython/core/magics/pylab.pyc in matplotlib(self, line)
     98             print("Available matplotlib backends: %s" % backends_list)
     99         else:
--> 100             gui, backend = self.shell.enable_matplotlib(args.gui)
    101             self._show_matplotlib_backend(args.gui, backend)
    102 

/home/yj/.virtualenvs/caffe/local/lib/python2.7/site-packages/IPython/core/interactiveshell.pyc in enable_matplotlib(self, gui)
   2937         """
   2938         from IPython.core import pylabtools as pt
-> 2939         gui, backend = pt.find_gui_and_backend(gui, self.pylab_gui_select)
   2940 
   2941         if gui != 'inline':

/home/yj/.virtualenvs/caffe/local/lib/python2.7/site-packages/IPython/core/pylabtools.pyc in find_gui_and_backend(gui, gui_select)
    258     """
    259 
--> 260     import matplotlib
    261 
    262     if gui and gui != 'auto':

ImportError: No module named matplotlib

In [ ]: