In [19]:
# loading requirements
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
import matplotlib.pyplot as plt
import theano
import numpy as np
from keras import backend as K
K.set_image_dim_ordering('th')
%matplotlib inline
In [2]:
# input image dimensions
img_rows, img_cols = 28, 28
In [8]:
(X_train, y_train), (X_test, y_test) = mnist.load_data()
In [9]:
# lets print data size of all four section
print ("X_train Size: ", len(X_train)," y_train Size :" ,len(y_train)," X_test Size : ",len(X_test)," y_testSize : ",len(y_test))
In [10]:
# reshaping arrays
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
In [11]:
X_train[0]
Out[11]:
In [12]:
# Converting to float
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
In [13]:
# Rescalling images between 0 and 1
X_train /= 255
X_test /= 255
In [14]:
# converting to categorical variables
Y_train = np_utils.to_categorical(y_train, 10)
Y_test = np_utils.to_categorical(y_test, 10)
In [15]:
###########visualize##############
plt.imshow(X_train[1].squeeze())
plt.show()
In [16]:
batch_size = 128
nb_classes = 10
nb_epoch = 1
# input image dimensions
img_rows, img_cols = 28, 28
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
kernel_size = (3, 3)
In [23]:
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=(1, img_rows, img_cols)))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
In [24]:
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
In [25]:
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
Out[25]:
In [28]:
for layerNo in range(0,len(model.layers)):
print layerNo,model.layers[layerNo]
In [31]:
"""
TO VISUALISE ONE FILTER
"""
big_array = []
# out of all above shown layers, I am showing you whats there in convolution layer 2 <keras.layers.convolutional.Convolution2D object at 0x7f8a766bd7d0>
layer = model.layers[2]
g=layer.get_config()
"""
g is having configuration about current layer as shown:
{'W_constraint': None, 'b_constraint': None, 'name': 'convolution2d_7', 'activity_regularizer': None, 'trainable': True, 'dim_ordering': 'th', 'nb_col': 3, 'subsample': (1, 1), 'init': 'glorot_uniform', 'bias': True, 'nb_filter': 32, 'b_regularizer': None, 'W_regularizer': None, 'nb_row': 3, 'activation': 'linear', 'border_mode': 'valid'}
"""
h=layer.get_weights()
"""
h is having weights of current layer
[array([[[[ 0.07282107, -0.03278938, 0.11698628],
[ 0.05283581, 0.05940171, -0.0462735 ],
[-0.01122687, 0.05821611, 0.08387677]],
.....................................]]]
"""
# shape of first element in current layer
h[0].shape #(32, 32, 3, 3)
for outShapeNo in h[0]:
for innerShapeNo in outShapeNo:
# removing border from image
fig = plt.figure(frameon=False)
# setting image size
fig.set_size_inches(3,3)
#setting image axis to off (not to show axis)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# creating columns inside html
html.writelines("<td>")
plt.axis('off')
ax.imshow(innerShapeNo.squeeze(),interpolation='nearest', aspect='normal') #,interpolation='nearest'
break
break
In [27]:
"""
TO VISUALISE ALL FILTER
"""
big_array = []
# out of all above shown layers, I am showing you whats there in convolution layer 2 <keras.layers.convolutional.Convolution2D object at 0x7f8a766bd7d0>
layer = model.layers[2]
g=layer.get_config()
"""
g is having configuration about current layer as shown:
{'W_constraint': None, 'b_constraint': None, 'name': 'convolution2d_7', 'activity_regularizer': None, 'trainable': True, 'dim_ordering': 'th', 'nb_col': 3, 'subsample': (1, 1), 'init': 'glorot_uniform', 'bias': True, 'nb_filter': 32, 'b_regularizer': None, 'W_regularizer': None, 'nb_row': 3, 'activation': 'linear', 'border_mode': 'valid'}
"""
h=layer.get_weights()
"""
h is having weights of current layer
[array([[[[ 0.07282107, -0.03278938, 0.11698628],
[ 0.05283581, 0.05940171, -0.0462735 ],
[-0.01122687, 0.05821611, 0.08387677]],
.....................................]]]
"""
# shape of first element in current layer
h[0].shape #(32, 32, 3, 3)
"""
this dimention corelates with our networks setting
(32, 32, 3, 3) says there are 32 images 3*3 size in one layer and such 32 filters exists
3*3 was our kernal size and 32 was out filter size
"""
# next we will create all these images in "images" folder and simultaneously create a html file "html.html"
#which will show these image file combinely in form of row and cols
html = open('html.html','w')
# creating table in html file to put image inside shell
html.writelines("<table>")
count=0
for outShapeNo in h[0]:
# creating roe inside html
html.writelines("<tr>")
for innerShapeNo in outShapeNo:
# removing border from image
fig = plt.figure(frameon=False)
# setting image size
fig.set_size_inches(3,3)
#setting image axis to off (not to show axis)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# creating columns inside html
html.writelines("<td>")
plt.axis('off')
ax.imshow(innerShapeNo.squeeze(),interpolation='nearest', aspect='normal') #,interpolation='nearest'
# writting image to file
fig.savefig("images/"+str(count)+".png")
# provideing the same image path to html table cell
html.writelines("<img src='images/"+str(count)+".png"+"'")
html.writelines("</td>")
count = count+1
plt.close()
html.writelines("</tr>")
html.writelines("</table>")