In [1]:
import numpy as np
np.random.seed(1337)  # for reproducibility

from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
from keras.callbacks import ModelCheckpoint

import pickle

print "using ordering:", K.image_dim_ordering()


Using Theano backend.
using ordering: th

In [2]:
# load data from memory

import pickle

pickle_file = '-data.pickle'

with open(pickle_file, 'rb') as f:
    save = pickle.load(f)
    X = save['X']
    y = save['y']
    del save  # hint to help gc free up memory

In [3]:
# number of classes
num_classes = 4

# image dimensions
img_rows, img_cols = X.shape[1], X.shape[2]

if K.image_dim_ordering() == 'th':
    X = X.reshape(X.shape[0], 1, img_rows, img_cols)
    input_shape = (1, img_rows, img_cols)
else:
    X = X.reshape(X.shape[0], img_rows, img_cols, 1)
    input_shape = (img_rows, img_cols, 1)

y = np_utils.to_categorical(y, num_classes)

print X.shape
print y.shape


(73486, 1, 32, 32)
(73486, 4)

In [4]:
# preview one sample from the reloaded X dataset to make sure nothing happened along the way

%matplotlib inline
from matplotlib.pyplot import imshow
import matplotlib.pyplot as plt

img_num = 1000

if K.image_dim_ordering() == 'th':
    img = X[img_num][0,:,:]
else:
    img = X[img_num][:,:,0]

print img.shape
imshow(img, cmap = plt.get_cmap('gray'), vmin = 0, vmax = 1,  interpolation='nearest')


(32, 32)
Out[4]:
<matplotlib.image.AxesImage at 0x7f1d95f0b9d0>

In [5]:
# model hyperparameters
batch_size = 32
nb_epoch = 10

# network architecture
patch_size_1 = 3
patch_size_2 = 3
patch_size_3 = 3

depth_1 = 32
depth_2 = 64
depth_3 = 128

pool_size = 2

num_hidden_1 = 512
num_hidden_2 = 512

dropout = 0.25

In [6]:
model = Sequential()

model.add(Convolution2D(depth_1, patch_size_1, patch_size_1,
                        border_mode='valid',
                        input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))

model.add(Convolution2D(depth_2, patch_size_2, patch_size_2,
                        border_mode='valid'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))

model.add(Convolution2D(depth_3, patch_size_3, patch_size_3,
                        border_mode='valid'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))

model.add(Flatten())

model.add(Dense(num_hidden_1))
model.add(Activation('relu'))
model.add(Dropout(dropout))

model.add(Dense(num_hidden_2))
model.add(Activation('relu'))
model.add(Dropout(dropout))

model.add(Dense(num_classes))

model.add(Activation('softmax'))

In [7]:
checkpoint_name = "-model.hdf5"
checkpointer = ModelCheckpoint(checkpoint_name, verbose=0, save_best_only=True)

model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])

In [8]:
history = model.fit(X, y, validation_split=0.25, batch_size=batch_size, nb_epoch=nb_epoch,
          verbose=1, callbacks=[checkpointer])


Train on 55114 samples, validate on 18372 samples
Epoch 1/10
55114/55114 [==============================] - 185s - loss: 0.4447 - acc: 0.8178 - val_loss: 0.4975 - val_acc: 0.7934
Epoch 2/10
55114/55114 [==============================] - 177s - loss: 0.2826 - acc: 0.8846 - val_loss: 0.2481 - val_acc: 0.8965
Epoch 3/10
55114/55114 [==============================] - 146s - loss: 0.2326 - acc: 0.9073 - val_loss: 0.3796 - val_acc: 0.8525
Epoch 4/10
55114/55114 [==============================] - 146s - loss: 0.1892 - acc: 0.9264 - val_loss: 0.1924 - val_acc: 0.9217
Epoch 5/10
55114/55114 [==============================] - 145s - loss: 0.1515 - acc: 0.9412 - val_loss: 0.1553 - val_acc: 0.9397
Epoch 6/10
55114/55114 [==============================] - 149s - loss: 0.1225 - acc: 0.9540 - val_loss: 0.1914 - val_acc: 0.9236
Epoch 7/10
55114/55114 [==============================] - 148s - loss: 0.1022 - acc: 0.9606 - val_loss: 0.1074 - val_acc: 0.9575
Epoch 8/10
55114/55114 [==============================] - 148s - loss: 0.0866 - acc: 0.9670 - val_loss: 0.1137 - val_acc: 0.9571
Epoch 9/10
55114/55114 [==============================] - 145s - loss: 0.0741 - acc: 0.9718 - val_loss: 0.1309 - val_acc: 0.9536
Epoch 10/10
55114/55114 [==============================] - 144s - loss: 0.0638 - acc: 0.9762 - val_loss: 0.1818 - val_acc: 0.9442