In [1]:
"""
mnist_loader
~~~~~~~~~~~~
A library to load the MNIST image data. For details of the data
structures that are returned, see the doc strings for ``load_data``
and ``load_data_wrapper``. In practice, ``load_data_wrapper`` is the
function usually called by our neural network code.
"""
#### Libraries
# Standard library
import pickle as cPickle # Sorry, workaround for Python 3.4
import gzip
# Third-party libraries
import numpy as np
def load_data():
f = gzip.open('mnist.pkl.gz', 'rb')
training_data, validation_data, test_data = cPickle.load(f, encoding='latin1') # Make it work with Py3.x
f.close()
return (training_data, validation_data, test_data)
def load_data_wrapper():
tr_d, va_d, te_d = load_data()
training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]
training_results = [vectorized_result(y) for y in tr_d[1]]
training_data = zip(training_inputs, training_results)
validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]
validation_data = zip(validation_inputs, va_d[1])
test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]
test_data = zip(test_inputs, te_d[1])
return (training_data, validation_data, test_data)
def vectorized_result(j):
e = np.zeros((10, 1))
e[j] = 1.0
return e
In [16]:
%time training_data, validation_data, test_data = load_data_wrapper()
In [6]:
%time x, y = zip(*training_data)
x = np.array(x)
x = x.reshape(50000, 784)
y = np.array(y)
y = y.reshape(50000, 10)
In [8]:
import keras
import numpy as np
import pandas as pd
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import PReLU
from keras.utils import np_utils, generic_utils
from sklearn.preprocessing import StandardScaler
np.random.seed(1337) # for reproducibility
def preprocess_data(X, scaler=None):
if not scaler:
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
return X, scaler
In [9]:
nb_classes = y.shape[1]
print(nb_classes, 'classes')
dims = x.shape[1]
print(dims, 'dims')
In [13]:
print("Building model...")
LAYER_ONE_SIZE = 1024
model = Sequential()
model.add(Dense(dims, LAYER_ONE_SIZE, init='glorot_uniform'))
model.add(PReLU((LAYER_ONE_SIZE,)))
model.add(BatchNormalization((LAYER_ONE_SIZE,)))
model.add(Dropout(0.5))
# model.add(Dense(LAYER_ONE_SIZE, LAYER_ONE_SIZE, init='glorot_uniform'))
# model.add(PReLU((LAYER_ONE_SIZE,)))
# model.add(BatchNormalization((LAYER_ONE_SIZE,)))
# model.add(Dropout(0.5))
# model.add(Dense(LAYER_ONE_SIZE, LAYER_ONE_SIZE, init='glorot_uniform'))
# model.add(PReLU((LAYER_ONE_SIZE,)))
# model.add(BatchNormalization((LAYER_ONE_SIZE,)))
# model.add(Dropout(0.5))
model.add(Dense(LAYER_ONE_SIZE, nb_classes, init='glorot_uniform'))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer="adam")
print("Training model...")
%time model.fit(x, y, nb_epoch=8, batch_size=128, validation_split=0.15)
Out[13]:
In [17]:
p_x, p_y = zip(*test_data)
p_x = np.array(p_x)
p_x = p_x.reshape(10000, 784)
p_y = np.array(p_y)
p_y = p_y.reshape(10000)
In [19]:
%time preds = model.predict(p_x)
print(preds[1])
print(np.argmax(preds[1]))
In [21]:
pred_idx = [np.argmax(a) for a in preds]
In [27]:
pairs = zip(pred_idx, p_y)
print(len(p_y))
In [23]:
number_correct = sum([int(a == b) for a, b in pairs])
print(number_correct)
In [32]:
number_incorrect = len(p_y) - number_correct
print(number_incorrect)
In [33]:
eps = 0.000001 # avoid division by zero
success_rate = number_correct / float((number_correct + number_incorrect + eps))
print(success_rate)
In [31]:
4 !=2
Out[31]:
In [ ]: