MNIST Hierarchical RNN


In [ ]:
# from: https://github.com/fchollet/keras/blob/master/examples/mnist_hierarchical_rnn.py

In [1]:
from __future__ import print_function

from keras.datasets import mnist
from keras.models import Sequential, Model
from keras.layers import Input, Dense, TimeDistributed
from keras.layers import LSTM
from keras.utils import np_utils


Using TensorFlow backend.

In [2]:
# Training parameters.
batch_size = 32
nb_classes = 10
nb_epochs = 5

In [3]:
# Embedding dimensions.
row_hidden = 128
col_hidden = 128

In [4]:
# The data, shuffled and split between train and test sets.
(X_train, y_train), (X_test, y_test) = mnist.load_data()

In [5]:
X_train?

In [6]:
# Reshapes data to 4D for Hierarchical RNN.
X_train = X_train.reshape(X_train.shape[0], 28, 28, 1)
X_test = X_test.reshape(X_test.shape[0], 28, 28, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')


X_train shape: (60000, 28, 28, 1)
60000 train samples
10000 test samples

In [7]:
# Converts class vectors to binary class matrices.
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)

In [8]:
row, col, pixel = X_train.shape[1:]

In [9]:
# 4D input.
x = Input(shape=(row, col, pixel))

In [10]:
# Encodes a row of pixels using TimeDistributed Wrapper.
encoded_rows = TimeDistributed(LSTM(output_dim=row_hidden))(x)

In [11]:
# Encodes columns of encoded rows.
encoded_columns = LSTM(col_hidden)(encoded_rows)

In [12]:
# Final predictions and model.
prediction = Dense(nb_classes, activation='softmax')(encoded_columns)
model = Model(input=x, output=prediction)
model.compile(loss='categorical_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])

In [13]:
# Training.
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epochs,
          verbose=1, validation_data=(X_test, Y_test))


Train on 60000 samples, validate on 10000 samples
Epoch 1/5
60000/60000 [==============================] - 861s - loss: 0.2807 - acc: 0.9097 - val_loss: 0.1012 - val_acc: 0.9713
Epoch 2/5
60000/60000 [==============================] - 876s - loss: 0.0976 - acc: 0.9712 - val_loss: 0.0769 - val_acc: 0.9771
Epoch 3/5
60000/60000 [==============================] - 769s - loss: 0.0707 - acc: 0.9786 - val_loss: 0.0578 - val_acc: 0.9831
Epoch 4/5
60000/60000 [==============================] - 755s - loss: 0.0582 - acc: 0.9831 - val_loss: 0.0797 - val_acc: 0.9751
Epoch 5/5
60000/60000 [==============================] - 782s - loss: 0.0492 - acc: 0.9854 - val_loss: 0.0478 - val_acc: 0.9863
Out[13]:
<keras.callbacks.History at 0x7fd04c1e3ad0>

In [ ]: