In [1]:
    
import keras
from keras.datasets import mnist
from keras.models import Model
from keras.layers import Dense, Dropout, Flatten, Input, Conv2D, MaxPooling2D
from keras import backend as K
    
    
In [2]:
    
(x_train, y_train), (x_test, y_test) = mnist.load_data()
    
    
In [3]:
    
from PIL import Image
Image.fromarray(x_train[0]).resize((256,256))
    
    Out[3]:
In [4]:
    
y_train[0]
    
    Out[4]:
In [5]:
    
K.image_data_format()
    
    Out[5]:
In [6]:
    
batch_size = 128
num_classes = 10
epochs = 1
    
In [7]:
    
# Fiddle with X
# input image dimensions
img_rows, img_cols = 28, 28
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
    
In [8]:
    
x_train[0].max(), x_train[0].min()
    
    Out[8]:
In [9]:
    
# Fiddle with Y
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
    
In [10]:
    
y_train[0]
    
    Out[10]:
In [11]:
    
x_train.shape
    
    Out[11]:
In [12]:
    
# inputs
mnist_input = Input(shape=(28, 28, 1))
    
In [13]:
    
def makeDaModel():
    conv1 = Conv2D(32, kernel_size=(3, 3), activation='relu')(mnist_input)
    conv2 = Conv2D(64, (3, 3), activation='relu')(conv1)
    maxP1 = MaxPooling2D(pool_size=(2, 2))(conv2)
    drop1 = Dropout(0.25)(maxP1)
    flat = Flatten()(drop1)
    dense1 = Dense(128, activation='relu')(flat)
    drop2 = Dropout(0.5)(dense1)
    dense2 = Dense(num_classes, activation='softmax')(drop2)
    model = Model(inputs=mnist_input, outputs=dense2)
    model.compile(
        loss=keras.losses.categorical_crossentropy,
        optimizer=keras.optimizers.Adadelta(),
        metrics=['accuracy']
    )
    return model
    
In [14]:
    
model = makeDaModel()
model.fit(x_train, y_train,
          batch_size=batch_size,
          epochs=epochs,
          verbose=1,
          validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
    
    
In [15]:
    
model.summary()
    
    
In [16]:
    
model.layers[1].get_weights()[0].shape
    
    Out[16]:
In [17]:
    
import numpy as np
    
In [18]:
    
# first let's give outselves some vars
# need to give outselves some Xavier init'ed weights 
# http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf
from functools import reduce
def xavier(shape):
    fan_in = reduce(lambda x, y: x*y, list(shape)[:-2]) * list(shape)[-2] # number of input units
    fan_out = reduce(lambda x, y: x*y, list(shape)[:-2]) * list(shape)[-1] # number of output units
    limit = np.sqrt(3.0 / ((fan_in + fan_out) / 2))
    print("fan_in: {}, fan_out: {}, limit: {}".format(fan_in,fan_out,limit))
    return np.random.uniform(-limit, limit, shape)
# glorot_uniform((28, 28, 1), (26, 26, 32))
    
In [19]:
    
conv1W = xavier((3, 3, 1, 32))
    
    
In [20]:
    
conv1W.shape
    
    Out[20]:
Alright let's check my work
In [21]:
    
model = makeDaModel()
    
In [22]:
    
model.layers[1].get_weights()[0].mean(), conv1W.mean()
    
    Out[22]:
In [23]:
    
model.layers[1].get_weights()[0].max(), conv1W.max()
    
    Out[23]:
In [24]:
    
model.layers[1].get_weights()[0].min(), conv1W.min()
    
    Out[24]:
In [25]:
    
model.layers[1].get_weights()[0].std(), conv1W.std()
    
    Out[25]:
In [26]:
    
model.layers[1].get_weights()[0].shape == conv1W.shape
    
    Out[26]:
Seems legit
In [ ]:
    
    
In [ ]:
    
    
In [ ]:
    
    
In [ ]: