In [1]:
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K


Using TensorFlow backend.
Couldn't import dot_parser, loading of dot files will not be possible.

In [2]:
batch_size = 128
num_classes = 10
epochs = 12

img_rows, img_cols = 28,28

In [3]:
(X_Train, y_Train), (X_Test, y_Test) = mnist.load_data()

In [4]:
K.image_data_format()


Out[4]:
'channels_last'

In [5]:
X_Train = X_Train.reshape(X_Train.shape[0], img_rows, img_cols, 1)
X_Test = X_Test.reshape(X_Test.shape[0], img_rows, img_cols, 1)

In [6]:
X_Train = X_Train.astype('float32')
X_Test = X_Test.astype('float32')
X_Train /= 255
X_Test /= 255

In [7]:
y_Train = keras.utils.to_categorical(y_Train, num_classes)
y_Test = keras.utils.to_categorical(y_Test, num_classes)

In [8]:
model = Sequential()
model.add(Conv2D(32, (3,3), activation='elu', input_shape=(img_rows,img_cols,1)))
model.add(Conv2D(65, (3,3), activation='elu'))
model.add(MaxPooling2D((2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='elu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
             optimizer=keras.optimizers.Adam(),
             metrics=['accuracy'])

In [9]:
model.fit(X_Train, y_Train,
         batch_size=batch_size,
         epochs=epochs,
         verbose=2,
         validation_data=(X_Test, y_Test))


Train on 60000 samples, validate on 10000 samples
Epoch 1/12
6s - loss: 0.2208 - acc: 0.9327 - val_loss: 0.0632 - val_acc: 0.9794
Epoch 2/12
5s - loss: 0.0939 - acc: 0.9711 - val_loss: 0.0487 - val_acc: 0.9846
Epoch 3/12
5s - loss: 0.0767 - acc: 0.9757 - val_loss: 0.0449 - val_acc: 0.9852
Epoch 4/12
5s - loss: 0.0657 - acc: 0.9798 - val_loss: 0.0464 - val_acc: 0.9846
Epoch 5/12
5s - loss: 0.0560 - acc: 0.9817 - val_loss: 0.0459 - val_acc: 0.9856
Epoch 6/12
5s - loss: 0.0506 - acc: 0.9840 - val_loss: 0.0452 - val_acc: 0.9861
Epoch 7/12
5s - loss: 0.0474 - acc: 0.9852 - val_loss: 0.0415 - val_acc: 0.9879
Epoch 8/12
5s - loss: 0.0429 - acc: 0.9856 - val_loss: 0.0508 - val_acc: 0.9853
Epoch 9/12
5s - loss: 0.0403 - acc: 0.9869 - val_loss: 0.0427 - val_acc: 0.9884
Epoch 10/12
6s - loss: 0.0374 - acc: 0.9874 - val_loss: 0.0426 - val_acc: 0.9884
Epoch 11/12
5s - loss: 0.0352 - acc: 0.9881 - val_loss: 0.0426 - val_acc: 0.9888
Epoch 12/12
5s - loss: 0.0371 - acc: 0.9879 - val_loss: 0.0422 - val_acc: 0.9888
Out[9]:
<keras.callbacks.History at 0x7f1685483fd0>

In [10]:
score = model.evaluate(X_Test, y_Test, verbose=0)
print("Test Loss:", score[0])
print("Test Accuracy:", score[1])


Test Loss: 0.0422385678928
Test Accuracy: 0.9888

In [11]:
model_json = model.to_json()
with open("model.json","w") as json_file:
    json_file.write(model_json)
model.save_weights("model.h5")
print("Saved model")


Saved model

In [ ]: