In [1]:
import numpy as np

from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from mnist import load_data


Using Theano backend.
Using gpu device 0: GeForce GTX 960 (CNMeM is disabled)

In [2]:
batch_size = 128
nb_classes = 10
nb_epoch = 12

In [3]:
# input image dimensions
img_rows, img_cols = 28, 28
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
nb_conv = 3

In [4]:
datasets = load_data('mnist.pkl.gz')
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]


... loading data

In [5]:
imrows = 28; imcols=28
layer_filters = 32
pooling_size = 2
recep_field = 3

In [6]:
# reshape images
train_set_x = train_set_x.reshape(train_set_x.shape[0],1,imrows,imcols)
test_set_x = test_set_x.reshape(test_set_x.shape[0],1,imrows,imcols)

In [7]:
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(train_set_y, nb_classes)
Y_test = np_utils.to_categorical(test_set_y, nb_classes)

In [8]:
model = Sequential()

model.add(Convolution2D(nb_filters,recep_field,recep_field,border_mode='valid',input_shape=(1, img_rows, img_cols)))
model.add(Activation('relu'))

In [9]:
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))

In [10]:
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))

In [11]:
model.compile(loss='categorical_crossentropy', optimizer='adadelta')

In [12]:
model.fit(train_set_x, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
          show_accuracy=True, verbose=1, validation_data=(test_set_x, Y_test))


Train on 50000 samples, validate on 10000 samples
Epoch 1/12
50000/50000 [==============================] - 8s - loss: 0.2783 - acc: 0.9135 - val_loss: 0.0620 - val_acc: 0.9798
Epoch 2/12
50000/50000 [==============================] - 8s - loss: 0.1018 - acc: 0.9687 - val_loss: 0.0642 - val_acc: 0.9791
Epoch 3/12
50000/50000 [==============================] - 8s - loss: 0.0759 - acc: 0.9770 - val_loss: 0.0387 - val_acc: 0.9858
Epoch 4/12
50000/50000 [==============================] - 8s - loss: 0.0646 - acc: 0.9808 - val_loss: 0.0343 - val_acc: 0.9885
Epoch 5/12
50000/50000 [==============================] - 7s - loss: 0.0558 - acc: 0.9828 - val_loss: 0.0315 - val_acc: 0.9890
Epoch 6/12
50000/50000 [==============================] - 7s - loss: 0.0481 - acc: 0.9854 - val_loss: 0.0372 - val_acc: 0.9890
Epoch 7/12
50000/50000 [==============================] - 8s - loss: 0.0436 - acc: 0.9862 - val_loss: 0.0258 - val_acc: 0.9905
Epoch 8/12
50000/50000 [==============================] - 8s - loss: 0.0401 - acc: 0.9875 - val_loss: 0.0274 - val_acc: 0.9903
Epoch 9/12
50000/50000 [==============================] - 8s - loss: 0.0370 - acc: 0.9885 - val_loss: 0.0265 - val_acc: 0.9915
Epoch 10/12
50000/50000 [==============================] - 8s - loss: 0.0331 - acc: 0.9895 - val_loss: 0.0289 - val_acc: 0.9915
Epoch 11/12
50000/50000 [==============================] - 8s - loss: 0.0311 - acc: 0.9900 - val_loss: 0.0314 - val_acc: 0.9909
Epoch 12/12
50000/50000 [==============================] - 8s - loss: 0.0286 - acc: 0.9911 - val_loss: 0.0273 - val_acc: 0.9908
Out[12]:
<keras.callbacks.History at 0x7f3ae73b86d0>

In [15]:
score = model.evaluate(test_set_x, Y_test, show_accuracy=True, verbose=0)

In [16]:
print score


[0.027322418949444545, 0.99080000000000001]