In [1]:
"""
mnist_loader
~~~~~~~~~~~~

A library to load the MNIST image data.  For details of the data
structures that are returned, see the doc strings for ``load_data``
and ``load_data_wrapper``.  In practice, ``load_data_wrapper`` is the
function usually called by our neural network code.
"""

#### Libraries
# Standard library
import pickle as cPickle  # Sorry, workaround for Python 3.4
import gzip

# Third-party libraries
import numpy as np

def load_data():
    f = gzip.open('mnist.pkl.gz', 'rb')
    training_data, validation_data, test_data = cPickle.load(f, encoding='latin1')  # Make it work with Py3.x
    f.close()
    return (training_data, validation_data, test_data)

def load_data_wrapper():
    tr_d, va_d, te_d = load_data()
    training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]
    training_results = [vectorized_result(y) for y in tr_d[1]]
    training_data = zip(training_inputs, training_results)
    validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]
    validation_data = zip(validation_inputs, va_d[1])
    test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]
    test_data = zip(test_inputs, te_d[1])
    return (training_data, validation_data, test_data)

def vectorized_result(j):
    e = np.zeros((10, 1))
    e[j] = 1.0
    return e

In [16]:
%time training_data, validation_data, test_data = load_data_wrapper()


CPU times: user 1.87 s, sys: 751 ms, total: 2.62 s
Wall time: 2.71 s

In [6]:
%time x, y = zip(*training_data)

x = np.array(x)
x = x.reshape(50000, 784)

y = np.array(y)
y = y.reshape(50000, 10)


CPU times: user 57.2 ms, sys: 48.9 ms, total: 106 ms
Wall time: 106 ms

In [8]:
import keras


import numpy as np
import pandas as pd

from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import PReLU
from keras.utils import np_utils, generic_utils

from sklearn.preprocessing import StandardScaler

np.random.seed(1337) # for reproducibility


def preprocess_data(X, scaler=None):
    if not scaler:
        scaler = StandardScaler()
        scaler.fit(X)
    X = scaler.transform(X)
    return X, scaler

In [9]:
nb_classes = y.shape[1]
print(nb_classes, 'classes')

dims = x.shape[1]
print(dims, 'dims')


10 classes
784 dims

In [13]:
print("Building model...")

LAYER_ONE_SIZE = 1024

model = Sequential()
model.add(Dense(dims, LAYER_ONE_SIZE, init='glorot_uniform'))
model.add(PReLU((LAYER_ONE_SIZE,)))
model.add(BatchNormalization((LAYER_ONE_SIZE,)))
model.add(Dropout(0.5))

# model.add(Dense(LAYER_ONE_SIZE, LAYER_ONE_SIZE, init='glorot_uniform'))
# model.add(PReLU((LAYER_ONE_SIZE,)))
# model.add(BatchNormalization((LAYER_ONE_SIZE,)))
# model.add(Dropout(0.5))

# model.add(Dense(LAYER_ONE_SIZE, LAYER_ONE_SIZE, init='glorot_uniform'))
# model.add(PReLU((LAYER_ONE_SIZE,)))
# model.add(BatchNormalization((LAYER_ONE_SIZE,)))
# model.add(Dropout(0.5))

model.add(Dense(LAYER_ONE_SIZE, nb_classes, init='glorot_uniform'))
model.add(Activation('softmax'))

model.compile(loss='categorical_crossentropy', optimizer="adam")

print("Training model...")

%time model.fit(x, y, nb_epoch=8, batch_size=128, validation_split=0.15)


Building model...
Training model...
Train on 42500 samples, validate on 7500 samples
Epoch 0
42500/42500 [==============================] - 18s - loss: 0.4393 - val. loss: 0.1984
Epoch 1
42500/42500 [==============================] - 19s - loss: 0.1445 - val. loss: 0.1460
Epoch 2
42500/42500 [==============================] - 20s - loss: 0.1012 - val. loss: 0.1223
Epoch 3
42500/42500 [==============================] - 19s - loss: 0.0787 - val. loss: 0.1110
Epoch 4
42500/42500 [==============================] - 18s - loss: 0.0625 - val. loss: 0.1036
Epoch 5
42500/42500 [==============================] - 19s - loss: 0.0513 - val. loss: 0.0986
Epoch 6
42500/42500 [==============================] - 22s - loss: 0.0455 - val. loss: 0.0923
Epoch 7
42500/42500 [==============================] - 23s - loss: 0.0370 - val. loss: 0.0904
CPU times: user 3min 30s, sys: 21 s, total: 3min 51s
Wall time: 2min 42s
Out[13]:
<keras.callbacks.History at 0x11fa31048>

In [17]:
p_x, p_y = zip(*test_data)

p_x = np.array(p_x)
p_x = p_x.reshape(10000, 784)

p_y = np.array(p_y)
p_y = p_y.reshape(10000)

In [19]:
%time preds = model.predict(p_x)
print(preds[1])
print(np.argmax(preds[1]))


10000/10000 [==============================] - 1s     
CPU times: user 1.86 s, sys: 57.5 ms, total: 1.92 s
Wall time: 1.13 s
[  8.49545880e-05   1.98081166e-05   9.99864031e-01   9.35905320e-06
   3.46804955e-09   5.81769650e-07   6.40576057e-06   1.79009542e-08
   1.48370155e-05   1.73423005e-09]
2

In [21]:
pred_idx = [np.argmax(a) for a in preds]

In [27]:
pairs = zip(pred_idx, p_y)
print(len(p_y))


10000

In [23]:
number_correct = sum([int(a == b) for a, b in pairs])
print(number_correct)


9789

In [32]:
number_incorrect = len(p_y) - number_correct
print(number_incorrect)


211

In [33]:
eps = 0.000001  # avoid division by zero
success_rate = number_correct / float((number_correct + number_incorrect + eps))
print(success_rate)


0.97889999990211

In [31]:
4 !=2


Out[31]:
True

In [ ]: