In [19]:
# loading requirements
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
import matplotlib.pyplot as plt
import theano
import numpy as np
from keras import backend as K
K.set_image_dim_ordering('th')
%matplotlib inline

In [2]:
# input image dimensions
img_rows, img_cols = 28, 28

In [8]:
(X_train, y_train), (X_test, y_test) = mnist.load_data()

In [9]:
# lets print data size of all four section
print ("X_train Size: ", len(X_train)," y_train Size :" ,len(y_train)," X_test Size : ",len(X_test)," y_testSize : ",len(y_test))


('X_train Size: ', 60000, ' y_train Size :', 60000, ' X_test Size : ', 10000, ' y_testSize : ', 10000)

In [10]:
# reshaping arrays
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)

In [11]:
X_train[0]


Out[11]:
array([[[  0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
           0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
           0,   0],
        [  0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
           0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
           0,   0],
        [  0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
           0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
           0,   0],
        [  0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
           0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
           0,   0],
        [  0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
           0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
           0,   0],
        [  0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   3,
          18,  18,  18, 126, 136, 175,  26, 166, 255, 247, 127,   0,   0,
           0,   0],
        [  0,   0,   0,   0,   0,   0,   0,   0,  30,  36,  94, 154, 170,
         253, 253, 253, 253, 253, 225, 172, 253, 242, 195,  64,   0,   0,
           0,   0],
        [  0,   0,   0,   0,   0,   0,   0,  49, 238, 253, 253, 253, 253,
         253, 253, 253, 253, 251,  93,  82,  82,  56,  39,   0,   0,   0,
           0,   0],
        [  0,   0,   0,   0,   0,   0,   0,  18, 219, 253, 253, 253, 253,
         253, 198, 182, 247, 241,   0,   0,   0,   0,   0,   0,   0,   0,
           0,   0],
        [  0,   0,   0,   0,   0,   0,   0,   0,  80, 156, 107, 253, 253,
         205,  11,   0,  43, 154,   0,   0,   0,   0,   0,   0,   0,   0,
           0,   0],
        [  0,   0,   0,   0,   0,   0,   0,   0,   0,  14,   1, 154, 253,
          90,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
           0,   0],
        [  0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0, 139, 253,
         190,   2,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
           0,   0],
        [  0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,  11, 190,
         253,  70,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
           0,   0],
        [  0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,  35,
         241, 225, 160, 108,   1,   0,   0,   0,   0,   0,   0,   0,   0,
           0,   0],
        [  0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
          81, 240, 253, 253, 119,  25,   0,   0,   0,   0,   0,   0,   0,
           0,   0],
        [  0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
           0,  45, 186, 253, 253, 150,  27,   0,   0,   0,   0,   0,   0,
           0,   0],
        [  0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
           0,   0,  16,  93, 252, 253, 187,   0,   0,   0,   0,   0,   0,
           0,   0],
        [  0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
           0,   0,   0,   0, 249, 253, 249,  64,   0,   0,   0,   0,   0,
           0,   0],
        [  0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
           0,  46, 130, 183, 253, 253, 207,   2,   0,   0,   0,   0,   0,
           0,   0],
        [  0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,  39,
         148, 229, 253, 253, 253, 250, 182,   0,   0,   0,   0,   0,   0,
           0,   0],
        [  0,   0,   0,   0,   0,   0,   0,   0,   0,   0,  24, 114, 221,
         253, 253, 253, 253, 201,  78,   0,   0,   0,   0,   0,   0,   0,
           0,   0],
        [  0,   0,   0,   0,   0,   0,   0,   0,  23,  66, 213, 253, 253,
         253, 253, 198,  81,   2,   0,   0,   0,   0,   0,   0,   0,   0,
           0,   0],
        [  0,   0,   0,   0,   0,   0,  18, 171, 219, 253, 253, 253, 253,
         195,  80,   9,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
           0,   0],
        [  0,   0,   0,   0,  55, 172, 226, 253, 253, 253, 253, 244, 133,
          11,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
           0,   0],
        [  0,   0,   0,   0, 136, 253, 253, 253, 212, 135, 132,  16,   0,
           0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
           0,   0],
        [  0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
           0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
           0,   0],
        [  0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
           0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
           0,   0],
        [  0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
           0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
           0,   0]]], dtype=uint8)

In [12]:
# Converting to float
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')

In [13]:
# Rescalling images between 0 and 1
X_train /= 255
X_test /= 255

In [14]:
# converting to categorical variables
Y_train = np_utils.to_categorical(y_train, 10)
Y_test = np_utils.to_categorical(y_test, 10)

In [15]:
###########visualize##############
plt.imshow(X_train[1].squeeze())
plt.show()



In [16]:
batch_size = 128
nb_classes = 10
nb_epoch = 1
# input image dimensions
img_rows, img_cols = 28, 28
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
kernel_size = (3, 3)

In [23]:
model = Sequential()

model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
                        border_mode='valid',
                        input_shape=(1, img_rows, img_cols)))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))

In [24]:
model.compile(loss='categorical_crossentropy',
              optimizer='adadelta',
              metrics=['accuracy'])

In [25]:
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
          verbose=1, validation_data=(X_test, Y_test))


Train on 60000 samples, validate on 10000 samples
Epoch 1/1
60000/60000 [==============================] - 17s - loss: 0.3788 - acc: 0.8853 - val_loss: 0.0905 - val_acc: 0.9727
Out[25]:
<keras.callbacks.History at 0x7f8a6f01c150>

In [28]:
for layerNo in range(0,len(model.layers)):
    print layerNo,model.layers[layerNo]


0 <keras.layers.convolutional.Convolution2D object at 0x7f8a766c5510>
1 <keras.layers.core.Activation object at 0x7f8a766c5590>
2 <keras.layers.convolutional.Convolution2D object at 0x7f8a766bd7d0>
3 <keras.layers.core.Activation object at 0x7f8a766ba850>
4 <keras.layers.pooling.MaxPooling2D object at 0x7f8a76719d10>
5 <keras.layers.core.Dropout object at 0x7f8a766ee050>
6 <keras.layers.core.Flatten object at 0x7f8a766ee390>
7 <keras.layers.core.Dense object at 0x7f8a766eea10>
8 <keras.layers.core.Activation object at 0x7f8a7675f790>
9 <keras.layers.core.Dropout object at 0x7f8a76719f90>
10 <keras.layers.core.Dense object at 0x7f8a76719dd0>
11 <keras.layers.core.Activation object at 0x7f8a766d3490>

In [31]:
"""

TO VISUALISE ONE FILTER

"""
big_array = []
# out of all above shown layers, I am showing you whats there in convolution layer 2 <keras.layers.convolutional.Convolution2D object at 0x7f8a766bd7d0>
layer = model.layers[2] 

g=layer.get_config()
"""
g is having configuration about current layer as shown:
{'W_constraint': None, 'b_constraint': None, 'name': 'convolution2d_7', 'activity_regularizer': None, 'trainable': True, 'dim_ordering': 'th', 'nb_col': 3, 'subsample': (1, 1), 'init': 'glorot_uniform', 'bias': True, 'nb_filter': 32, 'b_regularizer': None, 'W_regularizer': None, 'nb_row': 3, 'activation': 'linear', 'border_mode': 'valid'}
"""
h=layer.get_weights()
"""
h is having weights of current layer
[array([[[[ 0.07282107, -0.03278938,  0.11698628],
         [ 0.05283581,  0.05940171, -0.0462735 ],
         [-0.01122687,  0.05821611,  0.08387677]],
         .....................................]]]

"""
# shape of first element in current layer
h[0].shape #(32, 32, 3, 3)
for outShapeNo in h[0]:
    for innerShapeNo in outShapeNo:
        # removing border from image
        fig = plt.figure(frameon=False)
        # setting image size
        fig.set_size_inches(3,3)
        #setting image axis to off (not to show axis)
        ax = plt.Axes(fig, [0., 0., 1., 1.])
        ax.set_axis_off()
        fig.add_axes(ax)
        # creating columns inside html
        html.writelines("<td>")
        plt.axis('off')
        ax.imshow(innerShapeNo.squeeze(),interpolation='nearest', aspect='normal') #,interpolation='nearest'
        break
    break



In [27]:
"""

TO VISUALISE ALL FILTER

"""

big_array = []
# out of all above shown layers, I am showing you whats there in convolution layer 2 <keras.layers.convolutional.Convolution2D object at 0x7f8a766bd7d0>
layer = model.layers[2] 

g=layer.get_config()
"""
g is having configuration about current layer as shown:
{'W_constraint': None, 'b_constraint': None, 'name': 'convolution2d_7', 'activity_regularizer': None, 'trainable': True, 'dim_ordering': 'th', 'nb_col': 3, 'subsample': (1, 1), 'init': 'glorot_uniform', 'bias': True, 'nb_filter': 32, 'b_regularizer': None, 'W_regularizer': None, 'nb_row': 3, 'activation': 'linear', 'border_mode': 'valid'}
"""
h=layer.get_weights()
"""
h is having weights of current layer
[array([[[[ 0.07282107, -0.03278938,  0.11698628],
         [ 0.05283581,  0.05940171, -0.0462735 ],
         [-0.01122687,  0.05821611,  0.08387677]],
         .....................................]]]

"""
# shape of first element in current layer
h[0].shape #(32, 32, 3, 3)
"""
this dimention corelates with our networks setting
(32, 32, 3, 3) says there are 32 images 3*3 size in one layer and such 32 filters exists
3*3 was our kernal size and 32 was out filter size
"""
# next we will create all these images in "images" folder and simultaneously create a html file "html.html"
#which will show these image file combinely in form of row and cols
html =  open('html.html','w')
# creating table in html file to put image inside shell
html.writelines("<table>")
count=0
for outShapeNo in h[0]:
    # creating roe inside html
    html.writelines("<tr>")
    for innerShapeNo in outShapeNo:
        # removing border from image
        fig = plt.figure(frameon=False)
        # setting image size
        fig.set_size_inches(3,3)
        #setting image axis to off (not to show axis)
        ax = plt.Axes(fig, [0., 0., 1., 1.])
        ax.set_axis_off()
        fig.add_axes(ax)
        # creating columns inside html
        html.writelines("<td>")
        plt.axis('off')
        ax.imshow(innerShapeNo.squeeze(),interpolation='nearest', aspect='normal') #,interpolation='nearest'
        # writting image to file
        fig.savefig("images/"+str(count)+".png")
        # provideing the same image path to html table cell
        html.writelines("<img src='images/"+str(count)+".png"+"'")
        html.writelines("</td>")
        count =  count+1
        plt.close()
    html.writelines("</tr>")
html.writelines("</table>")


/home/sunil/anaconda2/lib/python2.7/site-packages/matplotlib/cbook.py:137: MatplotlibDeprecationWarning: The normal aspect was deprecated in version 1.2. Use auto instead.
  warnings.warn(message, mplDeprecation, stacklevel=1)