Imports


In [1]:
import matplotlib.pyplot as plt
import random as rn
import numpy as np
from sklearn.model_selection import train_test_split
import pickle
from keras.models import Sequential
from keras.layers import Conv2D, Dense, MaxPool2D, Input, Activation, Flatten, Dropout
from keras.utils import np_utils
%matplotlib inline


Using TensorFlow backend.

Loading Data


In [2]:
train_data = 'data/train.p'
test_data = 'data/test.p'

with open(train_data, 'rb') as f:
    train = pickle.load(f)

with open(test_data, 'rb') as f:
    test = pickle.load(f)

Data Info

Spliting the train data as train and validation set


In [3]:
X_train, X_val, Y_train, Y_val = train_test_split(train['features'], train['labels'], test_size=0.3, random_state=0)
X_test, Y_test = test['features'], test['labels']

n_train = X_train.shape[0]
n_val = X_val.shape[0]
n_test = X_test.shape[0]
image_shape = X_train.shape[1], X_train.shape[2]
n_channels = X_train.shape[3]
n_classes = np.unique(train['labels']).size

print('Train data size:\t\t\t', n_train)
print('Validation data size:\t\t\t', n_val)
print('test data size:\t\t\t\t', n_test)
print('Image shape:\t\t\t\t', image_shape)
print('Number of color channels in image:\t', n_channels)
print('Number of classes:\t\t\t', n_classes)


Train data size:			 27446
Validation data size:			 11763
test data size:				 12630
Image shape:				 (32, 32)
Number of color channels in image:	 3
Number of classes:			 43

Data Normalization

Process all the data as close as mean 0.0 and standard deviation 1.0.


In [4]:
def normalize(arr):
    arr = arr.astype('float32')
    return (arr - np.mean(arr))/np.std(arr)

X_train_norm = normalize(X_train)
X_val_norm = normalize(X_val)
X_test_norm = normalize(X_test)

def print_info(st, arr_1, arr_2):
    print('{} Data: Before normalization : type: {}, mean: {}, std: {}. After processing, type: {}, mean: {}, std: {}'. format(st, arr_1.dtype, round(np.mean(arr_1),2), round(np.std(arr_1),2), arr_2.dtype, round(np.mean(arr_2),2), round(np.std(arr_2),2)))

print_info('Train', X_train, X_train_norm)
print_info('Valdation', X_val, X_val_norm)
print_info('Test', X_test, X_test_norm)


Train Data: Before normalization : type: uint8, mean: 82.54, std: 68.96. After processing, type: float32, mean: 0.0, std: 1.0
Valdation Data: Before normalization : type: uint8, mean: 82.95, std: 69.22. After processing, type: float32, mean: -0.0, std: 1.0
Test Data: Before normalization : type: uint8, mean: 82.15, std: 68.74. After processing, type: float32, mean: -0.0, std: 1.0

Convert all the classes as one hot encode.


In [5]:
def make_categorical(arr):
    return np_utils.to_categorical(arr, n_classes)

Y_train_cat = make_categorical(Y_train)
Y_val_cat = make_categorical(Y_val)
Y_test_cat = make_categorical(Y_test)

Some Random Image

Before normalization


In [6]:
trc = rn.sample(range(n_test), 16)

In [7]:
def plot_images(arr_1, arr_2, pred):
    fig, axes = plt.subplots(4, 4, figsize=(10,10))
    fig.subplots_adjust(hspace=0.3, wspace=0.3)
    for i, ax in enumerate(axes.flat):
        if type(pred) != type(np.array([])):
            ax.imshow(arr_1[trc[i]])
            ax.set_xlabel('true:{}'.format(arr_2[trc[i]]))
        else:
            ax.imshow(arr_1[trc[i]])
            ax.set_xlabel('true:{}, pred:{}'.format(arr_2[trc[i]], pred[trc[i]]))
        ax.set_xticks([])
        ax.set_yticks([])
        
plot_images(X_test, Y_test, pred=False)


After normalization


In [8]:
plot_images(X_test_norm, Y_test, pred=False)


Build the Model with Keras


In [9]:
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(32, 32, 3), activation='relu'))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(43, activation='softmax'))

In [10]:
model.summary()


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d_1 (Conv2D)            (None, 30, 30, 32)        896       
_________________________________________________________________
flatten_1 (Flatten)          (None, 28800)             0         
_________________________________________________________________
dense_1 (Dense)              (None, 128)               3686528   
_________________________________________________________________
dense_2 (Dense)              (None, 43)                5547      
=================================================================
Total params: 3,692,971
Trainable params: 3,692,971
Non-trainable params: 0
_________________________________________________________________

Train the Model


In [11]:
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

In [12]:
history = model.fit(X_train_norm, Y_train_cat, batch_size=64, epochs=20, verbose=1, validation_data=(X_val_norm, Y_val_cat))


Train on 27446 samples, validate on 11763 samples
Epoch 1/20
27446/27446 [==============================] - 104s - loss: 0.9956 - acc: 0.7513 - val_loss: 0.3330 - val_acc: 0.9146
Epoch 2/20
27446/27446 [==============================] - 114s - loss: 0.2065 - acc: 0.9456 - val_loss: 0.2591 - val_acc: 0.9305
Epoch 3/20
27446/27446 [==============================] - 107s - loss: 0.1112 - acc: 0.9705 - val_loss: 0.1878 - val_acc: 0.9554
Epoch 4/20
27446/27446 [==============================] - 111s - loss: 0.0767 - acc: 0.9786 - val_loss: 0.1999 - val_acc: 0.9506
Epoch 5/20
27446/27446 [==============================] - 112s - loss: 0.0685 - acc: 0.9804 - val_loss: 0.1509 - val_acc: 0.9661
Epoch 6/20
27446/27446 [==============================] - 100s - loss: 0.0424 - acc: 0.9890 - val_loss: 0.1841 - val_acc: 0.9586
Epoch 7/20
27446/27446 [==============================] - 100s - loss: 0.0626 - acc: 0.9830 - val_loss: 0.1789 - val_acc: 0.9616
Epoch 8/20
27446/27446 [==============================] - 99s - loss: 0.0544 - acc: 0.9859 - val_loss: 0.1998 - val_acc: 0.9593
Epoch 9/20
27446/27446 [==============================] - 100s - loss: 0.0403 - acc: 0.9893 - val_loss: 0.1911 - val_acc: 0.9634
Epoch 10/20
27446/27446 [==============================] - 99s - loss: 0.0302 - acc: 0.9925 - val_loss: 0.1587 - val_acc: 0.9682
Epoch 11/20
27446/27446 [==============================] - 99s - loss: 0.0329 - acc: 0.9915 - val_loss: 0.1805 - val_acc: 0.9682
Epoch 12/20
27446/27446 [==============================] - 103s - loss: 0.0326 - acc: 0.9912 - val_loss: 0.2134 - val_acc: 0.9628
Epoch 13/20
27446/27446 [==============================] - 120s - loss: 0.0375 - acc: 0.9893 - val_loss: 0.1849 - val_acc: 0.9691
Epoch 14/20
27446/27446 [==============================] - 118s - loss: 0.0250 - acc: 0.9927 - val_loss: 0.1724 - val_acc: 0.9714
Epoch 15/20
27446/27446 [==============================] - 115s - loss: 0.0285 - acc: 0.9933 - val_loss: 0.2386 - val_acc: 0.9642
Epoch 16/20
27446/27446 [==============================] - 115s - loss: 0.0340 - acc: 0.9911 - val_loss: 0.2238 - val_acc: 0.9648
Epoch 17/20
27446/27446 [==============================] - 119s - loss: 0.0245 - acc: 0.9935 - val_loss: 0.2012 - val_acc: 0.9668
Epoch 18/20
27446/27446 [==============================] - 114s - loss: 0.0220 - acc: 0.9942 - val_loss: 0.2412 - val_acc: 0.9594
Epoch 19/20
27446/27446 [==============================] - 113s - loss: 0.0272 - acc: 0.9924 - val_loss: 0.1489 - val_acc: 0.9753
Epoch 20/20
27446/27446 [==============================] - 116s - loss: 0.0157 - acc: 0.9953 - val_loss: 0.2104 - val_acc: 0.9696

Model Evaluation


In [13]:
score, acc = model.evaluate(X_test_norm, Y_test_cat, batch_size=64, verbose=0)

In [14]:
print('Score: ', score)
print('Acc: ', acc)


Score:  0.879196501911
Acc:  0.888202691994

Predicted Classes


In [15]:
Y_pred = model.predict_classes(X_test_norm, batch_size=64, verbose=0)

In [16]:
plot_images(X_test, Y_test, Y_pred)


Build the Model with Pooling Layer


In [17]:
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(32, 32, 3), activation='relu'))
model.add(MaxPool2D((2,2)))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(43, activation='softmax'))

In [18]:
model.summary()


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d_2 (Conv2D)            (None, 30, 30, 32)        896       
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 15, 15, 32)        0         
_________________________________________________________________
activation_1 (Activation)    (None, 15, 15, 32)        0         
_________________________________________________________________
flatten_2 (Flatten)          (None, 7200)              0         
_________________________________________________________________
dense_3 (Dense)              (None, 128)               921728    
_________________________________________________________________
dense_4 (Dense)              (None, 43)                5547      
=================================================================
Total params: 928,171
Trainable params: 928,171
Non-trainable params: 0
_________________________________________________________________

Train the Model


In [19]:
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

In [20]:
history = model.fit(X_train_norm, Y_train_cat, batch_size=64, epochs=20, verbose=1, validation_data=(X_val_norm, Y_val_cat))


Train on 27446 samples, validate on 11763 samples
Epoch 1/20
27446/27446 [==============================] - 70s - loss: 0.9746 - acc: 0.7484 - val_loss: 0.3592 - val_acc: 0.9036
Epoch 2/20
27446/27446 [==============================] - 67s - loss: 0.2285 - acc: 0.9417 - val_loss: 0.2109 - val_acc: 0.9461
Epoch 3/20
27446/27446 [==============================] - 65s - loss: 0.1227 - acc: 0.9691 - val_loss: 0.2018 - val_acc: 0.9443
Epoch 4/20
27446/27446 [==============================] - 69s - loss: 0.1105 - acc: 0.9701 - val_loss: 0.1714 - val_acc: 0.9559
Epoch 5/20
27446/27446 [==============================] - 74s - loss: 0.0674 - acc: 0.9820 - val_loss: 0.1357 - val_acc: 0.9697
Epoch 6/20
27446/27446 [==============================] - 73s - loss: 0.0736 - acc: 0.9807 - val_loss: 0.1396 - val_acc: 0.9697
Epoch 7/20
27446/27446 [==============================] - 68s - loss: 0.0384 - acc: 0.9908 - val_loss: 0.1213 - val_acc: 0.9733
Epoch 8/20
27446/27446 [==============================] - 66s - loss: 0.0441 - acc: 0.9889 - val_loss: 0.1775 - val_acc: 0.9631
Epoch 9/20
27446/27446 [==============================] - 64s - loss: 0.0571 - acc: 0.9853 - val_loss: 0.2291 - val_acc: 0.9520
Epoch 10/20
27446/27446 [==============================] - 66s - loss: 0.0551 - acc: 0.9870 - val_loss: 0.1515 - val_acc: 0.9698
Epoch 11/20
27446/27446 [==============================] - 64s - loss: 0.0269 - acc: 0.9938 - val_loss: 0.1399 - val_acc: 0.9736
Epoch 12/20
27446/27446 [==============================] - 67s - loss: 0.0334 - acc: 0.9917 - val_loss: 0.1398 - val_acc: 0.9737
Epoch 13/20
27446/27446 [==============================] - 69s - loss: 0.0308 - acc: 0.9921 - val_loss: 0.1593 - val_acc: 0.9704
Epoch 14/20
27446/27446 [==============================] - 73s - loss: 0.0185 - acc: 0.9949 - val_loss: 0.1089 - val_acc: 0.9811
Epoch 15/20
27446/27446 [==============================] - 72s - loss: 0.0325 - acc: 0.9918 - val_loss: 0.1878 - val_acc: 0.9663
Epoch 16/20
27446/27446 [==============================] - 64s - loss: 0.0298 - acc: 0.9918 - val_loss: 0.1381 - val_acc: 0.9733
Epoch 17/20
27446/27446 [==============================] - 66s - loss: 0.0183 - acc: 0.9945 - val_loss: 0.1225 - val_acc: 0.9780
Epoch 18/20
27446/27446 [==============================] - 68s - loss: 0.0167 - acc: 0.9958 - val_loss: 0.1422 - val_acc: 0.9748
Epoch 19/20
27446/27446 [==============================] - 66s - loss: 0.0363 - acc: 0.9910 - val_loss: 0.1730 - val_acc: 0.9686
Epoch 20/20
27446/27446 [==============================] - 67s - loss: 0.0224 - acc: 0.9938 - val_loss: 0.1328 - val_acc: 0.9793

Model Evaluation


In [21]:
score, acc = model.evaluate(X_test_norm, Y_test_cat, batch_size=64, verbose=0)

In [22]:
print('Score: ', score)
print('Acc: ', acc)


Score:  0.601839751708
Acc:  0.913855898692

Predicted Classes


In [23]:
Y_pred = model.predict_classes(X_test_norm, batch_size=64, verbose=0)

In [24]:
plot_images(X_test, Y_test, Y_pred)



In [25]:
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(32, 32, 3), activation='relu'))
model.add(MaxPool2D((2,2)))
model.add((Dropout(0.5)))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(43, activation='softmax'))

In [26]:
model.summary()


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d_3 (Conv2D)            (None, 30, 30, 32)        896       
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 15, 15, 32)        0         
_________________________________________________________________
dropout_1 (Dropout)          (None, 15, 15, 32)        0         
_________________________________________________________________
activation_2 (Activation)    (None, 15, 15, 32)        0         
_________________________________________________________________
flatten_3 (Flatten)          (None, 7200)              0         
_________________________________________________________________
dense_5 (Dense)              (None, 128)               921728    
_________________________________________________________________
dense_6 (Dense)              (None, 43)                5547      
=================================================================
Total params: 928,171
Trainable params: 928,171
Non-trainable params: 0
_________________________________________________________________

In [27]:
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

In [28]:
history = model.fit(X_train_norm, Y_train_cat, batch_size=64, epochs=20, verbose=1, validation_data=(X_val_norm, Y_val_cat))


Train on 27446 samples, validate on 11763 samples
Epoch 1/20
27446/27446 [==============================] - 73s - loss: 1.0311 - acc: 0.7262 - val_loss: 0.3178 - val_acc: 0.9217
Epoch 2/20
27446/27446 [==============================] - 69s - loss: 0.3028 - acc: 0.9150 - val_loss: 0.2001 - val_acc: 0.9475
Epoch 3/20
27446/27446 [==============================] - 69s - loss: 0.2045 - acc: 0.9426 - val_loss: 0.1554 - val_acc: 0.9633
Epoch 4/20
27446/27446 [==============================] - 69s - loss: 0.1642 - acc: 0.9529 - val_loss: 0.1412 - val_acc: 0.9656
Epoch 5/20
27446/27446 [==============================] - 69s - loss: 0.1295 - acc: 0.9638 - val_loss: 0.1196 - val_acc: 0.9739
Epoch 6/20
27446/27446 [==============================] - 68s - loss: 0.1228 - acc: 0.9643 - val_loss: 0.1226 - val_acc: 0.9731
Epoch 7/20
27446/27446 [==============================] - 69s - loss: 0.1070 - acc: 0.9703 - val_loss: 0.1115 - val_acc: 0.9759
Epoch 8/20
27446/27446 [==============================] - 68s - loss: 0.0970 - acc: 0.9728 - val_loss: 0.0960 - val_acc: 0.9810
Epoch 9/20
27446/27446 [==============================] - 68s - loss: 0.0865 - acc: 0.9754 - val_loss: 0.1065 - val_acc: 0.9760
Epoch 10/20
27446/27446 [==============================] - 68s - loss: 0.0869 - acc: 0.9752 - val_loss: 0.1013 - val_acc: 0.9786
Epoch 11/20
27446/27446 [==============================] - 68s - loss: 0.0783 - acc: 0.9781 - val_loss: 0.1266 - val_acc: 0.9717
Epoch 12/20
27446/27446 [==============================] - 69s - loss: 0.0696 - acc: 0.9813 - val_loss: 0.1213 - val_acc: 0.9765
Epoch 13/20
27446/27446 [==============================] - 69s - loss: 0.0684 - acc: 0.9804 - val_loss: 0.1125 - val_acc: 0.9771
Epoch 14/20
27446/27446 [==============================] - 68s - loss: 0.0573 - acc: 0.9837 - val_loss: 0.1133 - val_acc: 0.9769
Epoch 15/20
27446/27446 [==============================] - 70s - loss: 0.0580 - acc: 0.9842 - val_loss: 0.0878 - val_acc: 0.9845
Epoch 16/20
27446/27446 [==============================] - 69s - loss: 0.0559 - acc: 0.9844 - val_loss: 0.1013 - val_acc: 0.9813
Epoch 17/20
27446/27446 [==============================] - 69s - loss: 0.0658 - acc: 0.9819 - val_loss: 0.0980 - val_acc: 0.9803
Epoch 18/20
27446/27446 [==============================] - 69s - loss: 0.0495 - acc: 0.9854 - val_loss: 0.0967 - val_acc: 0.9810
Epoch 19/20
27446/27446 [==============================] - 63s - loss: 0.0458 - acc: 0.9874 - val_loss: 0.0940 - val_acc: 0.9804
Epoch 20/20
27446/27446 [==============================] - 64s - loss: 0.0568 - acc: 0.9850 - val_loss: 0.1106 - val_acc: 0.9782

In [29]:
score, acc = model.evaluate(X_test_norm, Y_test_cat, batch_size=64, verbose=0)

In [30]:
print('Score: ', score)
print('Acc: ', acc)


Score:  0.68842889651
Acc:  0.899366587452

In [31]:
Y_pred = model.predict_classes(X_test_norm, batch_size=64, verbose=0)

In [32]:
plot_images(X_test, Y_test, Y_pred)



In [33]:
model = Sequential()

model.add(Conv2D(16, (5, 5), input_shape=(32, 32, 3), activation='relu'))
model.add(MaxPool2D((2,2)))
model.add(Activation('relu'))

model.add(Conv2D(32, (3, 3), input_shape=(16, 16, 16), activation='relu'))
model.add(MaxPool2D((2,2)))
model.add(Activation('relu'))

model.add(Conv2D(64, (3, 3), input_shape=(7, 7, 32), activation='relu'))
model.add(MaxPool2D((2,2)))
model.add(Activation('relu'))

model.add(Flatten())
model.add((Dropout(0.5)))
model.add(Dense(128, activation='relu'))
model.add(Dense(43, activation='softmax'))

In [34]:
model.summary()


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d_4 (Conv2D)            (None, 28, 28, 16)        1216      
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 (None, 14, 14, 16)        0         
_________________________________________________________________
activation_3 (Activation)    (None, 14, 14, 16)        0         
_________________________________________________________________
conv2d_5 (Conv2D)            (None, 12, 12, 32)        4640      
_________________________________________________________________
max_pooling2d_4 (MaxPooling2 (None, 6, 6, 32)          0         
_________________________________________________________________
activation_4 (Activation)    (None, 6, 6, 32)          0         
_________________________________________________________________
conv2d_6 (Conv2D)            (None, 4, 4, 64)          18496     
_________________________________________________________________
max_pooling2d_5 (MaxPooling2 (None, 2, 2, 64)          0         
_________________________________________________________________
activation_5 (Activation)    (None, 2, 2, 64)          0         
_________________________________________________________________
flatten_4 (Flatten)          (None, 256)               0         
_________________________________________________________________
dropout_2 (Dropout)          (None, 256)               0         
_________________________________________________________________
dense_7 (Dense)              (None, 128)               32896     
_________________________________________________________________
dense_8 (Dense)              (None, 43)                5547      
=================================================================
Total params: 62,795
Trainable params: 62,795
Non-trainable params: 0
_________________________________________________________________

In [35]:
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

In [36]:
history = model.fit(X_train_norm, Y_train_cat, batch_size=64, epochs=20, verbose=1, validation_data=(X_val_norm, Y_val_cat))


Train on 27446 samples, validate on 11763 samples
Epoch 1/20
27446/27446 [==============================] - 77s - loss: 1.8103 - acc: 0.4897 - val_loss: 0.4784 - val_acc: 0.8738
Epoch 2/20
27446/27446 [==============================] - 71s - loss: 0.4940 - acc: 0.8483 - val_loss: 0.1847 - val_acc: 0.9586
Epoch 3/20
27446/27446 [==============================] - 66s - loss: 0.2867 - acc: 0.9095 - val_loss: 0.1153 - val_acc: 0.9702
Epoch 4/20
27446/27446 [==============================] - 66s - loss: 0.2016 - acc: 0.9388 - val_loss: 0.0785 - val_acc: 0.9787
Epoch 5/20
27446/27446 [==============================] - 67s - loss: 0.1667 - acc: 0.9484 - val_loss: 0.0698 - val_acc: 0.9822
Epoch 6/20
27446/27446 [==============================] - 67s - loss: 0.1420 - acc: 0.9560 - val_loss: 0.0560 - val_acc: 0.9861
Epoch 7/20
27446/27446 [==============================] - 67s - loss: 0.1239 - acc: 0.9623 - val_loss: 0.0644 - val_acc: 0.9833
Epoch 8/20
27446/27446 [==============================] - 71s - loss: 0.1065 - acc: 0.9672 - val_loss: 0.0448 - val_acc: 0.9887
Epoch 9/20
27446/27446 [==============================] - 68s - loss: 0.0965 - acc: 0.9706 - val_loss: 0.0402 - val_acc: 0.9906
Epoch 10/20
27446/27446 [==============================] - 71s - loss: 0.0961 - acc: 0.9710 - val_loss: 0.0307 - val_acc: 0.9918
Epoch 11/20
27446/27446 [==============================] - 72s - loss: 0.0822 - acc: 0.9735 - val_loss: 0.0385 - val_acc: 0.9901
Epoch 12/20
27446/27446 [==============================] - 68s - loss: 0.0839 - acc: 0.9735 - val_loss: 0.0291 - val_acc: 0.9929
Epoch 13/20
27446/27446 [==============================] - 71s - loss: 0.0703 - acc: 0.9779 - val_loss: 0.0393 - val_acc: 0.9906
Epoch 14/20
27446/27446 [==============================] - 76s - loss: 0.0729 - acc: 0.9778 - val_loss: 0.0240 - val_acc: 0.9946
Epoch 15/20
27446/27446 [==============================] - 73s - loss: 0.0688 - acc: 0.9792 - val_loss: 0.0286 - val_acc: 0.9924
Epoch 16/20
27446/27446 [==============================] - 74s - loss: 0.0651 - acc: 0.9796 - val_loss: 0.0269 - val_acc: 0.9940
Epoch 17/20
27446/27446 [==============================] - 75s - loss: 0.0608 - acc: 0.9814 - val_loss: 0.0260 - val_acc: 0.9942
Epoch 18/20
27446/27446 [==============================] - 67s - loss: 0.0573 - acc: 0.9821 - val_loss: 0.0284 - val_acc: 0.9932
Epoch 19/20
27446/27446 [==============================] - 74s - loss: 0.0621 - acc: 0.9821 - val_loss: 0.0252 - val_acc: 0.9942
Epoch 20/20
27446/27446 [==============================] - 71s - loss: 0.0632 - acc: 0.9801 - val_loss: 0.0218 - val_acc: 0.9950

In [37]:
score, acc = model.evaluate(X_test_norm, Y_test_cat, batch_size=64, verbose=0)

In [38]:
print('Score: ', score)
print('Acc: ', acc)


Score:  0.197350208382
Acc:  0.961361836859

In [39]:
Y_pred = model.predict_classes(X_test_norm, batch_size=64, verbose=0)

In [40]:
plot_images(X_test, Y_test, Y_pred)



In [ ]:


In [ ]:


In [ ]: