In [ ]:
from __future__ import division, print_function
%matplotlib inline
from importlib import reload  # Python 3
import utils; reload(utils)
from utils import *

Setup


In [ ]:
batch_size=64

In [ ]:
from keras.datasets import mnist
(X_train, y_train), (X_test, y_test) = mnist.load_data()
(X_train.shape, y_train.shape, X_test.shape, y_test.shape)

In [ ]:
X_test = np.expand_dims(X_test,1)
X_train = np.expand_dims(X_train,1)

In [ ]:
X_train.shape

In [ ]:
y_train[:5]

In [ ]:
y_train = onehot(y_train)
y_test = onehot(y_test)

In [ ]:
y_train[:5]

In [ ]:
mean_px = X_train.mean().astype(np.float32)
std_px = X_train.std().astype(np.float32)

In [ ]:
def norm_input(x): return (x-mean_px)/std_px

Linear model


In [ ]:
def get_lin_model():
    model = Sequential([
        Lambda(norm_input, input_shape=(1,28,28)),
        Flatten(),
        Dense(10, activation='softmax')
        ])
    model.compile(Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
    return model

In [ ]:
lm = get_lin_model()

In [ ]:
gen = image.ImageDataGenerator()
batches = gen.flow(X_train, y_train, batch_size=batch_size)
test_batches = gen.flow(X_test, y_test, batch_size=batch_size)
steps_per_epoch = int(np.ceil(batches.n/batch_size))
validation_steps = int(np.ceil(test_batches.n/batch_size))

In [ ]:
lm.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=1, 
                    validation_data=test_batches, validation_steps=validation_steps)

In [ ]:
lm.optimizer.lr=0.1

In [ ]:
lm.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=1, 
                    validation_data=test_batches, validation_steps=validation_steps)

In [ ]:
lm.optimizer.lr=0.01

In [ ]:
lm.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=4, 
                    validation_data=test_batches, validation_steps=validation_steps)

Single dense layer


In [ ]:
def get_fc_model():
    model = Sequential([
        Lambda(norm_input, input_shape=(1,28,28)),
        Flatten(),
        Dense(512, activation='softmax'),
        Dense(10, activation='softmax')
        ])
    model.compile(Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
    return model

In [ ]:
fc = get_fc_model()

In [ ]:
fc.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=1, 
                    validation_data=test_batches, validation_steps=validation_steps)

In [ ]:
fc.optimizer.lr=0.1

In [ ]:
fc.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=4, 
                    validation_data=test_batches, validation_steps=validation_steps)

In [ ]:
fc.optimizer.lr=0.01

In [ ]:
fc.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=4, 
                    validation_data=test_batches, validation_steps=validation_steps)

Basic 'VGG-style' CNN


In [ ]:
def get_model():
    model = Sequential([
        Lambda(norm_input, input_shape=(1,28,28)),
        Conv2D(32,(3,3), activation='relu'),
        Conv2D(32,(3,3), activation='relu'),
        MaxPooling2D(),
        Conv2D(64,(3,3), activation='relu'),
        Conv2D(64,(3,3), activation='relu'),
        MaxPooling2D(),
        Flatten(),
        Dense(512, activation='relu'),
        Dense(10, activation='softmax')
        ])
    model.compile(Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
    return model

In [ ]:
model = get_model()

In [ ]:
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=1, 
                    validation_data=test_batches, validation_steps=validation_steps)

In [ ]:
model.optimizer.lr=0.1

In [ ]:
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=1, 
                    validation_data=test_batches, validation_steps=validation_steps)

In [ ]:
model.optimizer.lr=0.01

In [ ]:
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=8, 
                    validation_data=test_batches, validation_steps=validation_steps)

Data augmentation


In [ ]:
model = get_model()

In [ ]:
gen = image.ImageDataGenerator(rotation_range=8, width_shift_range=0.08, shear_range=0.3,
                               height_shift_range=0.08, zoom_range=0.08)
batches = gen.flow(X_train, y_train, batch_size=batch_size)
test_batches = gen.flow(X_test, y_test, batch_size=batch_size)
steps_per_epoch = int(np.ceil(batches.n/batch_size))
validation_steps = int(np.ceil(test_batches.n/batch_size))

In [ ]:
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=1, 
                    validation_data=test_batches, validation_steps=validation_steps)

In [ ]:
model.optimizer.lr=0.1

In [ ]:
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=4, 
                    validation_data=test_batches, validation_steps=validation_steps)

In [ ]:
model.optimizer.lr=0.01

In [ ]:
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=8, 
                    validation_data=test_batches, validation_steps=validation_steps)

In [ ]:
model.optimizer.lr=0.001

In [ ]:
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=14, 
                    validation_data=test_batches, validation_steps=validation_steps)

In [ ]:
model.optimizer.lr=0.0001

In [ ]:
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=10, 
                    validation_data=test_batches, validation_steps=validation_steps)

Batchnorm + data augmentation


In [ ]:
def get_model_bn():
    model = Sequential([
        Lambda(norm_input, input_shape=(1,28,28)),
        Conv2D(32,(3,3), activation='relu'),
        BatchNormalization(axis=1),
        Conv2D(32,(3,3), activation='relu'),
        MaxPooling2D(),
        BatchNormalization(axis=1),
        Conv2D(64,(3,3), activation='relu'),
        BatchNormalization(axis=1),
        Conv2D(64,(3,3), activation='relu'),
        MaxPooling2D(),
        Flatten(),
        BatchNormalization(),
        Dense(512, activation='relu'),
        BatchNormalization(),
        Dense(10, activation='softmax')
        ])
    model.compile(Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
    return model

In [ ]:
model = get_model_bn()

In [ ]:
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=1, 
                    validation_data=test_batches, validation_steps=validation_steps)

In [ ]:
model.optimizer.lr=0.1

In [ ]:
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=4, 
                    validation_data=test_batches, validation_steps=validation_steps)

In [ ]:
model.optimizer.lr=0.01

In [ ]:
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=12, 
                    validation_data=test_batches, validation_steps=validation_steps)

In [ ]:
model.optimizer.lr=0.001

In [ ]:
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=12, 
                    validation_data=test_batches, validation_steps=validation_steps)

Batchnorm + dropout + data augmentation


In [ ]:
def get_model_bn_do():
    model = Sequential([
        Lambda(norm_input, input_shape=(1,28,28)),
        Conv2D(32,(3,3), activation='relu'),
        BatchNormalization(axis=1),
        Conv2D(32,(3,3), activation='relu'),
        MaxPooling2D(),
        BatchNormalization(axis=1),
        Conv2D(64,(3,3), activation='relu'),
        BatchNormalization(axis=1),
        Conv2D(64,(3,3), activation='relu'),
        MaxPooling2D(),
        Flatten(),
        BatchNormalization(),
        Dense(512, activation='relu'),
        BatchNormalization(),
        Dropout(0.5),
        Dense(10, activation='softmax')
        ])
    model.compile(Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
    return model

In [ ]:
model = get_model_bn_do()

In [ ]:
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=1, 
                    validation_data=test_batches, validation_steps=validation_steps)

In [ ]:
model.optimizer.lr=0.1

In [ ]:
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=4, 
                    validation_data=test_batches, validation_steps=validation_steps)

In [ ]:
model.optimizer.lr=0.01

In [ ]:
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=12, 
                    validation_data=test_batches, validation_steps=validation_steps)

In [ ]:
model.optimizer.lr=0.001

In [ ]:
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=1, 
                    validation_data=test_batches, validation_steps=validation_steps)

Ensembling


In [ ]:
def fit_model():
    """
    vgg model with data aug, batchnorm, dropout, 35 epochs total
    Learning rate decreases gradually
    """
    model = get_model_bn_do()
    model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=1, verbose=0,
                        validation_data=test_batches, validation_steps=validation_steps)
    model.optimizer.lr=0.1
    model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=4, verbose=0,
                        validation_data=test_batches, validation_steps=validation_steps)
    model.optimizer.lr=0.01
    model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=12, verbose=0,
                        validation_data=test_batches, validation_steps=validation_steps)
    model.optimizer.lr=0.001
    model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=18, verbose=0,
                        validation_data=test_batches, validation_steps=validation_steps)
    return model

In [ ]:
# create 6 of the models above
models = [fit_model() for i in range(6)]

In [ ]:
import os
user_home = os.path.expanduser('~')
path = os.path.join(user_home, "pj/fastai/data/MNIST_data/")
model_path = path + 'models/'

# path = "data/mnist/"
# model_path = path + 'models/'

In [ ]:
# save the weight of 6 models in a file
for i,m in enumerate(models):
    m.save_weights(model_path+'cnn-mnist23-'+str(i)+'.pkl')

In [ ]:
eval_batch_size = 256

In [ ]:
evals = np.array([m.evaluate(X_test, y_test, batch_size=eval_batch_size) for m in models])

In [ ]:
evals.mean(axis=0)

In [ ]:
# for each models, predict the test sets. Stack all predictions together
all_preds = np.stack([m.predict(X_test, batch_size=eval_batch_size) for m in models])

In [ ]:
all_preds.shape

In [ ]:
avg_preds = all_preds.mean(axis=0)

In [ ]:
keras.metrics.categorical_accuracy(y_test, avg_preds).eval()

In [ ]: