In [1]:
##############################################################################
#
# Workshop: How to develop a personalised machine learning-based application
#
# Notebook 4: Image Classification with Deep Learning Networks
#
##############################################################################

In [1]:
# jupyter notebook instructions:
# - Every cell can be executed seperately from the rest.
# - You can execute cells in a non-sequential order (but be carefull of 
#   the dependencies between them).
# - Execute a cell by pressing the play button or Shift+Enter.

In [2]:
# You can play with a mini deep learning model with Tensorflow's characteristics in the following URL
# http://playground.tensorflow.org

In [4]:
# Import necessary modules
import numpy as np

from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing.image import array_to_img, img_to_array
from keras.preprocessing.image import load_img

from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense

In [14]:
# dimensions of our images.
img_width, img_height = 150, 150

train_data_dir = '../data/catsanddogs/train/'
validation_data_dir = '../data/catsanddogs/validation/'
nb_train_samples = 2000
nb_validation_samples = 100
nb_epoch = 50

In [15]:
# Specify the model's structure
model = Sequential()
model.add(Convolution2D(32, 3, 3, input_shape=(3, img_width, img_height)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))

In [16]:
model.compile(loss='binary_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])

# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
        rescale=1./255,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True)

# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1./255)

train_generator = train_datagen.flow_from_directory(
        train_data_dir,
        target_size=(img_width, img_height),
        batch_size=32,
        class_mode='binary')

validation_generator = test_datagen.flow_from_directory(
        validation_data_dir,
        target_size=(img_width, img_height),
        batch_size=32,
        class_mode='binary')


Found 2000 images belonging to 2 classes.
Found 800 images belonging to 2 classes.

In [17]:
model.fit_generator(
        train_generator,
        samples_per_epoch=nb_train_samples,
        nb_epoch=nb_epoch,
        validation_data=validation_generator,
        nb_val_samples=nb_validation_samples)

model.save_weights('cats_dogs.h5')


Epoch 1/50
2000/2000 [==============================] - 77s - loss: 0.7115 - acc: 0.5040 - val_loss: 0.6855 - val_acc: 0.6328
Epoch 2/50
2000/2000 [==============================] - 76s - loss: 0.6848 - acc: 0.5715 - val_loss: 0.6706 - val_acc: 0.5703
Epoch 3/50
2000/2000 [==============================] - 76s - loss: 0.6547 - acc: 0.6170 - val_loss: 0.6324 - val_acc: 0.6719
Epoch 4/50
2000/2000 [==============================] - 77s - loss: 0.6332 - acc: 0.6555 - val_loss: 0.5828 - val_acc: 0.7109
Epoch 5/50
2000/2000 [==============================] - 77s - loss: 0.5977 - acc: 0.6905 - val_loss: 0.5850 - val_acc: 0.7266
Epoch 6/50
2000/2000 [==============================] - 77s - loss: 0.5916 - acc: 0.6935 - val_loss: 0.8845 - val_acc: 0.5625
Epoch 7/50
2000/2000 [==============================] - 77s - loss: 0.5745 - acc: 0.7070 - val_loss: 0.6510 - val_acc: 0.6484
Epoch 8/50
2000/2000 [==============================] - 78s - loss: 0.5496 - acc: 0.7185 - val_loss: 0.5336 - val_acc: 0.7344
Epoch 9/50
2000/2000 [==============================] - 77s - loss: 0.5508 - acc: 0.7275 - val_loss: 0.5470 - val_acc: 0.7031
Epoch 10/50
2000/2000 [==============================] - 77s - loss: 0.5442 - acc: 0.7365 - val_loss: 0.5891 - val_acc: 0.7109
Epoch 11/50
2000/2000 [==============================] - 78s - loss: 0.5243 - acc: 0.7395 - val_loss: 0.6157 - val_acc: 0.6719
Epoch 12/50
2000/2000 [==============================] - 79s - loss: 0.5247 - acc: 0.7480 - val_loss: 0.6219 - val_acc: 0.6562
Epoch 13/50
2000/2000 [==============================] - 79s - loss: 0.5020 - acc: 0.7535 - val_loss: 0.5419 - val_acc: 0.6797
Epoch 14/50
2000/2000 [==============================] - 79s - loss: 0.4918 - acc: 0.7675 - val_loss: 0.4964 - val_acc: 0.7969
Epoch 15/50
2000/2000 [==============================] - 80s - loss: 0.4826 - acc: 0.7705 - val_loss: 0.4766 - val_acc: 0.7969
Epoch 16/50
2000/2000 [==============================] - 79s - loss: 0.4865 - acc: 0.7690 - val_loss: 0.5431 - val_acc: 0.7422
Epoch 17/50
2000/2000 [==============================] - 80s - loss: 0.4621 - acc: 0.7825 - val_loss: 0.6426 - val_acc: 0.7578
Epoch 18/50
2000/2000 [==============================] - 81s - loss: 0.4576 - acc: 0.7885 - val_loss: 0.4237 - val_acc: 0.7734
Epoch 19/50
2000/2000 [==============================] - 81s - loss: 0.4490 - acc: 0.7980 - val_loss: 0.3893 - val_acc: 0.8438
Epoch 20/50
2000/2000 [==============================] - 80s - loss: 0.4417 - acc: 0.8045 - val_loss: 0.4938 - val_acc: 0.7344
Epoch 21/50
2000/2000 [==============================] - 80s - loss: 0.4371 - acc: 0.7975 - val_loss: 0.5098 - val_acc: 0.7656
Epoch 22/50
2000/2000 [==============================] - 80s - loss: 0.4254 - acc: 0.8080 - val_loss: 0.6611 - val_acc: 0.7578
Epoch 23/50
2000/2000 [==============================] - 80s - loss: 0.4168 - acc: 0.8085 - val_loss: 0.4493 - val_acc: 0.7812
Epoch 24/50
2000/2000 [==============================] - 80s - loss: 0.4184 - acc: 0.8080 - val_loss: 0.4377 - val_acc: 0.8359
Epoch 25/50
2000/2000 [==============================] - 80s - loss: 0.4178 - acc: 0.8120 - val_loss: 0.4217 - val_acc: 0.8047
Epoch 26/50
2000/2000 [==============================] - 80s - loss: 0.4316 - acc: 0.8070 - val_loss: 0.6632 - val_acc: 0.7500
Epoch 27/50
2000/2000 [==============================] - 80s - loss: 0.4270 - acc: 0.8240 - val_loss: 0.6157 - val_acc: 0.7500
Epoch 28/50
2000/2000 [==============================] - 80s - loss: 0.3823 - acc: 0.8305 - val_loss: 0.3989 - val_acc: 0.8047
Epoch 29/50
2000/2000 [==============================] - 80s - loss: 0.3869 - acc: 0.8255 - val_loss: 0.4857 - val_acc: 0.8359
Epoch 30/50
2000/2000 [==============================] - 81s - loss: 0.3758 - acc: 0.8360 - val_loss: 0.4571 - val_acc: 0.7734
Epoch 31/50
2000/2000 [==============================] - 81s - loss: 0.4147 - acc: 0.8225 - val_loss: 0.5811 - val_acc: 0.7188
Epoch 32/50
2000/2000 [==============================] - 80s - loss: 0.3863 - acc: 0.8315 - val_loss: 0.6059 - val_acc: 0.7578
Epoch 33/50
2000/2000 [==============================] - 80s - loss: 0.3804 - acc: 0.8410 - val_loss: 0.4127 - val_acc: 0.7656
Epoch 34/50
2000/2000 [==============================] - 80s - loss: 0.3891 - acc: 0.8250 - val_loss: 0.4479 - val_acc: 0.7969
Epoch 35/50
2000/2000 [==============================] - 80s - loss: 0.3613 - acc: 0.8420 - val_loss: 0.5466 - val_acc: 0.7891
Epoch 36/50
2000/2000 [==============================] - 80s - loss: 0.3995 - acc: 0.8235 - val_loss: 0.6275 - val_acc: 0.7656
Epoch 37/50
2000/2000 [==============================] - 80s - loss: 0.3534 - acc: 0.8515 - val_loss: 0.7073 - val_acc: 0.7812
Epoch 38/50
2000/2000 [==============================] - 80s - loss: 0.3650 - acc: 0.8435 - val_loss: 0.3808 - val_acc: 0.8047
Epoch 39/50
2000/2000 [==============================] - 81s - loss: 0.3793 - acc: 0.8420 - val_loss: 0.4568 - val_acc: 0.8125
Epoch 40/50
2000/2000 [==============================] - 80s - loss: 0.3427 - acc: 0.8420 - val_loss: 0.3798 - val_acc: 0.7891
Epoch 41/50
2000/2000 [==============================] - 80s - loss: 0.3669 - acc: 0.8445 - val_loss: 1.1931 - val_acc: 0.6797
Epoch 42/50
2000/2000 [==============================] - 80s - loss: 0.3802 - acc: 0.8320 - val_loss: 0.8047 - val_acc: 0.7188
Epoch 43/50
2000/2000 [==============================] - 81s - loss: 0.3256 - acc: 0.8575 - val_loss: 0.4321 - val_acc: 0.8047
Epoch 44/50
2000/2000 [==============================] - 79s - loss: 0.3299 - acc: 0.8570 - val_loss: 0.4672 - val_acc: 0.8438
Epoch 45/50
2000/2000 [==============================] - 80s - loss: 0.3383 - acc: 0.8510 - val_loss: 0.5386 - val_acc: 0.7969
Epoch 46/50
2000/2000 [==============================] - 79s - loss: 0.3497 - acc: 0.8615 - val_loss: 1.0127 - val_acc: 0.7188
Epoch 47/50
2000/2000 [==============================] - 79s - loss: 0.3421 - acc: 0.8565 - val_loss: 0.7045 - val_acc: 0.6562
Epoch 48/50
2000/2000 [==============================] - 79s - loss: 0.3362 - acc: 0.8515 - val_loss: 0.6270 - val_acc: 0.7812
Epoch 49/50
2000/2000 [==============================] - 79s - loss: 0.3592 - acc: 0.8420 - val_loss: 0.4948 - val_acc: 0.7812
Epoch 50/50
2000/2000 [==============================] - 79s - loss: 0.3228 - acc: 0.8650 - val_loss: 0.4144 - val_acc: 0.7969

In [31]:
img1 = load_img("../data/catsanddogs/test/resize-3.jpg")
x = img_to_array(img1)
x = x.reshape((1,) + x.shape)

In [43]:
print model.predict_classes(x, batch_size=1, verbose=0)


[[0]]

In [47]:
img2 = load_img("../data/catsanddogs/test/11.jpg")
x = img_to_array(img1)
x = x.reshape((1,) + x.shape)
print model.predict(x, batch_size=1, verbose=1)


1/1 [==============================] - 0s
[[ 0.]]

In [ ]: