In [1]:
import os
path = os.path.dirname(os.getcwd())

MNIST Data


In [2]:
import pandas as pd

In [5]:
pd_train_data = pd.read_csv(path + os.sep + "data/train.csv") # must clone and unzip!
pd_test_data = pd.read_csv(path + os.sep + "data/test.csv") # must clone and unzip!

In [6]:
train_data = pd_train_data.as_matrix()
test_data = pd_test_data.as_matrix()

In [7]:
x_train = train_data[:,0:784]
y_train = train_data[:,784]

In [8]:
x_test = test_data[:,0:784]
y_test = test_data[:,784]

In [9]:
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1).astype('float32')
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1).astype('float32')

In [10]:
# Normalize data
x_train /= 255
x_test /= 255

In [11]:
from keras.utils import np_utils
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]


/home/patrickh/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters
Using TensorFlow backend.

Build Model


In [12]:
import keras
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Activation, Flatten, Dense

In [13]:
model = Sequential()

In [14]:
# Convolution and pooling 1
model.add(Conv2D(filters=6, kernel_size=(2,2), input_shape=(28,28,1)))
model.add(MaxPooling2D(pool_size=2))
model.add(Activation("sigmoid"))

# Convolution and pooling 2
model.add(Conv2D(filters=16, kernel_size=(5,5)))
model.add(MaxPooling2D(pool_size=2))
model.add(Activation("sigmoid"))

# Convolution 3
model.add(Conv2D(filters=120, kernel_size=(4,4)))

# Fully-Connected
model.add(Flatten())
model.add(Dense(84))
model.add(Activation("tanh"))

# Output layer
model.add(Dense(10))
model.add(Activation('softmax'))

In [15]:
model.compile(loss="categorical_crossentropy", optimizer="sgd", metrics=["accuracy"])

In [16]:
model.fit(x_train, y_train, epochs=20, batch_size=128)


Epoch 1/20
60000/60000 [==============================] - 9s 145us/step - loss: 2.3032 - acc: 0.1104
Epoch 2/20
60000/60000 [==============================] - 9s 146us/step - loss: 2.3018 - acc: 0.1114
Epoch 3/20
60000/60000 [==============================] - 9s 152us/step - loss: 2.3008 - acc: 0.1129
Epoch 4/20
60000/60000 [==============================] - 9s 151us/step - loss: 2.2995 - acc: 0.1176
Epoch 5/20
60000/60000 [==============================] - 9s 154us/step - loss: 2.2970 - acc: 0.1242
Epoch 6/20
60000/60000 [==============================] - 11s 190us/step - loss: 2.2939 - acc: 0.1305
Epoch 7/20
60000/60000 [==============================] - 12s 203us/step - loss: 2.2885 - acc: 0.1375
Epoch 8/20
60000/60000 [==============================] - 11s 180us/step - loss: 2.2789 - acc: 0.1586
Epoch 9/20
60000/60000 [==============================] - 8s 135us/step - loss: 2.2578 - acc: 0.2244
Epoch 10/20
60000/60000 [==============================] - 9s 156us/step - loss: 2.1977 - acc: 0.3456
Epoch 11/20
60000/60000 [==============================] - 9s 148us/step - loss: 1.9397 - acc: 0.5439
Epoch 12/20
60000/60000 [==============================] - 9s 151us/step - loss: 1.2247 - acc: 0.7283
Epoch 13/20
60000/60000 [==============================] - 9s 150us/step - loss: 0.7466 - acc: 0.8088
Epoch 14/20
60000/60000 [==============================] - 9s 146us/step - loss: 0.5740 - acc: 0.8417
Epoch 15/20
60000/60000 [==============================] - 9s 151us/step - loss: 0.4883 - acc: 0.8618
Epoch 16/20
60000/60000 [==============================] - 9s 148us/step - loss: 0.4331 - acc: 0.8764
Epoch 17/20
60000/60000 [==============================] - 10s 173us/step - loss: 0.3927 - acc: 0.8861
Epoch 18/20
60000/60000 [==============================] - 9s 147us/step - loss: 0.3610 - acc: 0.8948
Epoch 19/20
60000/60000 [==============================] - 9s 149us/step - loss: 0.3356 - acc: 0.9022
Epoch 20/20
60000/60000 [==============================] - 10s 173us/step - loss: 0.3140 - acc: 0.9083
Out[16]:
<keras.callbacks.History at 0x7f0781359588>

In [17]:
model.evaluate(x_test, y_test, batch_size=128)


10000/10000 [==============================] - 1s 64us/step
Out[17]:
[0.2853667983055115, 0.9188]