In [1]:
"""
mnist_loader
~~~~~~~~~~~~

A library to load the MNIST image data.  For details of the data
structures that are returned, see the doc strings for ``load_data``
and ``load_data_wrapper``.  In practice, ``load_data_wrapper`` is the
function usually called by our neural network code.
"""

#### Libraries
# Standard library
import pickle as cPickle  # Sorry, workaround for Python 3.4
import gzip

# Third-party libraries
import numpy as np

def load_data():
    f = gzip.open('mnist.pkl.gz', 'rb')
    training_data, validation_data, test_data = cPickle.load(f, encoding='latin1')  # Make it work with Py3.x
    f.close()
    return (training_data, validation_data, test_data)

def load_data_wrapper():
    tr_d, va_d, te_d = load_data()
    training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]
    training_results = [vectorized_result(y) for y in tr_d[1]]
    training_data = zip(training_inputs, training_results)
    validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]
    validation_data = zip(validation_inputs, va_d[1])
    test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]
    test_data = zip(test_inputs, te_d[1])
    return (training_data, validation_data, test_data)

def vectorized_result(j):
    e = np.zeros((10, 1))
    e[j] = 1.0
    return e

In [2]:
%time training_data, validation_data, test_data = load_data_wrapper()


CPU times: user 2.08 s, sys: 910 ms, total: 2.99 s
Wall time: 3.57 s

In [3]:
%time x, y = zip(*training_data)

x = np.array(x)
x = x.reshape(50000, 784)

y = np.array(y)
y = y.reshape(50000, 10)


CPU times: user 76 ms, sys: 40.4 ms, total: 116 ms
Wall time: 145 ms

In [4]:
import keras


import numpy as np
import pandas as pd

from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import PReLU
from keras.utils import np_utils, generic_utils

from sklearn.preprocessing import StandardScaler

np.random.seed(1337) # for reproducibility


def preprocess_data(X, scaler=None):
    if not scaler:
        scaler = StandardScaler()
        scaler.fit(X)
    X = scaler.transform(X)
    return X, scaler

In [5]:
nb_classes = y.shape[1]
print(nb_classes, 'classes')

dims = x.shape[1]
print(dims, 'dims')


10 classes
784 dims

In [6]:
print("Building model...")

LAYER_ONE_SIZE = 1024

model = Sequential()
model.add(Dense(dims, LAYER_ONE_SIZE, init='glorot_uniform'))
model.add(PReLU((LAYER_ONE_SIZE,)))
model.add(BatchNormalization((LAYER_ONE_SIZE,)))
model.add(Dropout(0.5))

model.add(Dense(LAYER_ONE_SIZE, LAYER_ONE_SIZE, init='glorot_uniform'))
model.add(PReLU((LAYER_ONE_SIZE,)))
model.add(BatchNormalization((LAYER_ONE_SIZE,)))
model.add(Dropout(0.5))

model.add(Dense(LAYER_ONE_SIZE, LAYER_ONE_SIZE, init='glorot_uniform'))
model.add(PReLU((LAYER_ONE_SIZE,)))
model.add(BatchNormalization((LAYER_ONE_SIZE,)))
model.add(Dropout(0.5))

model.add(Dense(LAYER_ONE_SIZE, nb_classes, init='glorot_uniform'))
model.add(Activation('softmax'))

model.compile(loss='categorical_crossentropy', optimizer="adam")

print("Training model...")

%time model.fit(x, y, nb_epoch=8, batch_size=128, validation_split=0.15)


Building model...
Training model...
Train on 42500 samples, validate on 7500 samples
Epoch 0
42500/42500 [==============================] - 96s - loss: 0.3574 - val. loss: 0.1361
Epoch 1
42500/42500 [==============================] - 93s - loss: 0.1180 - val. loss: 0.1133
Epoch 2
42500/42500 [==============================] - 89s - loss: 0.0909 - val. loss: 0.1036
Epoch 3
42500/42500 [==============================] - 94s - loss: 0.0744 - val. loss: 0.0987
Epoch 4
42500/42500 [==============================] - 82s - loss: 0.0666 - val. loss: 0.0962
Epoch 5
42500/42500 [==============================] - 63s - loss: 0.0601 - val. loss: 0.0927
Epoch 6
42500/42500 [==============================] - 59s - loss: 0.0559 - val. loss: 0.0943
Epoch 7
42500/42500 [==============================] - 60s - loss: 0.0539 - val. loss: 0.0901
CPU times: user 13min 35s, sys: 1min 39s, total: 15min 14s
Wall time: 10min 40s
Out[6]:
<keras.callbacks.History at 0x121ba1518>

In [7]:
p_x, p_y = zip(*test_data)

p_x = np.array(p_x)
p_x = p_x.reshape(10000, 784)

p_y = np.array(p_y)
p_y = p_y.reshape(10000)

In [8]:
%time preds = model.predict(p_x)
print(preds[1])
print(np.argmax(preds[1]))


10000/10000 [==============================] - 4s     
CPU times: user 9.94 s, sys: 128 ms, total: 10.1 s
Wall time: 4.72 s
[  6.11931360e-06   1.80490054e-05   9.99958198e-01   6.21384831e-06
   7.29756289e-07   2.64006232e-07   1.15416223e-06   1.96012004e-06
   7.14573374e-06   1.65813741e-07]
2

In [9]:
pred_idx = [np.argmax(a) for a in preds]

In [10]:
pairs = zip(pred_idx, p_y)
print(len(p_y))


10000

In [11]:
number_correct = sum([int(a == b) for a, b in pairs])
print(number_correct)


9796

In [12]:
number_incorrect = len(p_y) - number_correct
print(number_incorrect)


204

In [13]:
eps = 0.000001  # avoid division by zero
success_rate = number_correct / float((number_correct + number_incorrect + eps))
print(success_rate)


0.9795999999020399

In [14]:
4 !=2


Out[14]:
True

In [ ]:


In [ ]: