The MNIST dataset consists of handwritten digit images and it is divided in 60,000 examples for the training set and 10,000 examples for testing.the official training set of 60,000 is divided into an actual training set of 50,000 examples and 10,000 validation examples


In [128]:
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cPickle, gzip
import numpy as np
import keras
from __future__ import print_function
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.layers import Conv2D, MaxPooling2D
from keras.utils import plot_model
#from IPython.display import SVG
#from keras.utils.vis_utils import model_to_dot --- Error In Keras BugFix Req.
from keras_diagram import ascii

Model1 - Simple FeedForward Deep Net


In [130]:
(x_train, y_train), (x_test, y_test) = mnist.load_data()

# Visualize the images
# plt.imshow(x_train[0])

# Flatten 
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1]*x_train.shape[2])
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1]*x_test.shape[2])
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')


#convert target to one-hot
num_classes=np.size(np.unique(y_train))

y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

in_dim=784
model = Sequential()
model.add(Dense(in_dim*2, input_dim=in_dim, use_bias='true', kernel_initializer='lecun_uniform',
                bias_initializer='lecun_uniform',activation='relu' ))
#model.add(Dropout(0.2))
#model.add(Dense(in_dim*4, use_bias='true', kernel_initializer='lecun_uniform',
#                bias_initializer='lecun_uniform',activation='relu' ))
#model.add(Dropout(0.2))
#model.add(Dense(in_dim*8,use_bias='true', kernel_initializer='lecun_uniform',
#                bias_initializer='lecun_uniform',activation='relu' ))
#model.add(Dropout(0.2))
#model.add(Dense(in_dim*16, use_bias='true', kernel_initializer='lecun_uniform',
#                bias_initializer='lecun_uniform',activation='relu' ))
#model.add(Dropout(0.2))
#model.add(Dense(in_dim, use_bias='true', kernel_initializer='lecun_uniform',
#                bias_initializer='lecun_uniform',activation='relu' ))
model.add(Dense(in_dim/2, use_bias='true', kernel_initializer='lecun_uniform',
                bias_initializer='lecun_uniform',activation='relu' ))
model.add(Dropout(0.2))
model.add(Dense(in_dim/4, input_dim=in_dim, use_bias='true', kernel_initializer='lecun_uniform',
                bias_initializer='lecun_uniform', activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(in_dim/8, use_bias='true', kernel_initializer='lecun_uniform',
                bias_initializer='lecun_uniform', activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(in_dim/16, use_bias='true', kernel_initializer='lecun_uniform',
                bias_initializer='lecun_uniform', activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(10, use_bias='true', kernel_initializer='lecun_uniform',
                bias_initializer='lecun_uniform', activation='softmax'))


#model = Sequential()
#model.add(Dense(512, activation='relu', input_shape=(784,)))
#model.add(Dropout(0.2))
#model.add(Dense(512, activation='relu'))
#model.add(Dropout(0.2))
#model.add(Dense(10, activation='softmax'))
model.summary()

# Get Initial Weights and Biases, Visualize Network
#weights, biases = model.layers[0].get_weights()
#print(ascii(model))
#print('Layer-1 Weights Initialized\n', weights)
#print('Layer-1 Biases Initialized\n', biases)

# Training
sgd = keras.optimizers.sgd(lr=0.001, decay=1e-7, momentum=.9)
model.compile(loss='categorical_crossentropy',optimizer=sgd,
              metrics=['accuracy'])
history = model.fit(x_train,y_train,batch_size=128,epochs=10,validation_split=0.1)


#Calculating Test Accuracies
print('Test Accuracy ====>')
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])


60000 train samples
10000 test samples

In [49]:


In [ ]:


In [ ]: