In [1]:
import numpy as np
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
np.random.seed(1338)


Using Theano backend.

In [2]:
#Loading the training and testing data
(X_train, y_train), (X_test, y_test) = mnist.load_data()
img_rows, img_cols = 28, 28
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255

In [4]:
#Converting the classes to its binary categorical form
nb_classes = 10
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)

In [5]:
#Initializing the values for the convolution neural network
nb_epoch = 12
batch_size = 128
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
nb_conv = 3

In [6]:
#Function for constructing the convolution neural network
def build_model():
    
    np.random.seed(1338)
    
    model = Sequential()
    model.add(Convolution2D(nb_filters, nb_conv, nb_conv,
                        border_mode='valid',
                        input_shape=(1, img_rows, img_cols)))
    model.add(Activation('relu'))
    model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
    model.add(Dropout(0.25))
    
    model.add(Flatten())
    model.add(Dense(128))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))
    
    model.compile(loss='categorical_crossentropy',
              optimizer='adadelta',
              metrics=['accuracy'])

    model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,verbose=1,
              validation_data=(X_test, Y_test))
          

    #Evaluating the model on the test data    
    score = model.evaluate(X_test, Y_test, verbose=0)
    print('Test score:', score[0])
    print('Test accuracy:', score[1])

In [7]:
#Timing how long it takes to build the model and test it.
%timeit -n1 -r1 build_model()


Train on 60000 samples, validate on 10000 samples
Epoch 1/12
60000/60000 [==============================] - 124s - loss: 0.2407 - acc: 0.9261 - val_loss: 0.0632 - val_acc: 0.9799
Epoch 2/12
60000/60000 [==============================] - 129s - loss: 0.0904 - acc: 0.9728 - val_loss: 0.0531 - val_acc: 0.9826
Epoch 3/12
60000/60000 [==============================] - 124s - loss: 0.0682 - acc: 0.9792 - val_loss: 0.0380 - val_acc: 0.9878
Epoch 4/12
60000/60000 [==============================] - 123s - loss: 0.0569 - acc: 0.9828 - val_loss: 0.0324 - val_acc: 0.9888
Epoch 5/12
60000/60000 [==============================] - 132s - loss: 0.0501 - acc: 0.9848 - val_loss: 0.0348 - val_acc: 0.9880
Epoch 6/12
60000/60000 [==============================] - 125s - loss: 0.0443 - acc: 0.9863 - val_loss: 0.0276 - val_acc: 0.9907
Epoch 7/12
60000/60000 [==============================] - 136s - loss: 0.0392 - acc: 0.9879 - val_loss: 0.0312 - val_acc: 0.9903
Epoch 8/12
60000/60000 [==============================] - 144s - loss: 0.0364 - acc: 0.9881 - val_loss: 0.0281 - val_acc: 0.9906
Epoch 9/12
60000/60000 [==============================] - 140s - loss: 0.0335 - acc: 0.9895 - val_loss: 0.0317 - val_acc: 0.9901
Epoch 10/12
60000/60000 [==============================] - 128s - loss: 0.0312 - acc: 0.9905 - val_loss: 0.0266 - val_acc: 0.9914
Epoch 11/12
60000/60000 [==============================] - 128s - loss: 0.0282 - acc: 0.9913 - val_loss: 0.0292 - val_acc: 0.9909
Epoch 12/12
60000/60000 [==============================] - 137s - loss: 0.0266 - acc: 0.9914 - val_loss: 0.0317 - val_acc: 0.9903
Test score: 0.0316715610399
Test accuracy: 0.9903
1 loop, best of 1: 26min 24s per loop

In [ ]:


In [ ]: