Image Classification With Keras Convolutional Neural Network


In [1]:
from __future__ import print_function, division

import numpy as np
import random
import os
import glob
import cv2
import datetime
import pandas as pd
import time
import h5py
import csv

from scipy.misc import imresize, imsave

from sklearn.cross_validation import KFold, train_test_split
from sklearn.metrics import log_loss, confusion_matrix
from sklearn.utils import shuffle

from PIL import Image, ImageChops, ImageOps

import matplotlib.pyplot as plt

from keras import backend as K
from keras.callbacks import EarlyStopping, Callback
from keras.utils import np_utils
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
from keras import optimizers
from keras.models import Sequential, model_from_json
from keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D, Activation, Dropout, Flatten, Dense

%matplotlib inline


c:\python27\lib\site-packages\sklearn\cross_validation.py:44: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.
  "This module will be removed in 0.20.", DeprecationWarning)
Using Theano backend.
Using gpu device 0: GeForce GTX 850M (CNMeM is enabled with initial size: 80.0% of memory, cuDNN 5005)

Configuration and Hyperparameters

First let's go ahead and define our custom early stopping class, which will be used in the hyperparameters


In [2]:
class EarlyStoppingByLossVal(Callback):
    """Custom class to set a val loss target for early stopping"""
    def __init__(self, monitor='val_loss', value=0.45, verbose=0):
        super(Callback, self).__init__()
        self.monitor = monitor
        self.value = value
        self.verbose = verbose

    def on_epoch_end(self, epoch, logs={}):
        current = logs.get(self.monitor)
        if current is None:
            warnings.warn("Early stopping requires %s available!" % self.monitor, RuntimeWarning)

        if current < self.value:
            if self.verbose > 0:
                print("Epoch %05d: early stopping THR" % epoch)
            self.model.stop_training = True

Then we'll set all the relevant paths and configurations


In [3]:
### paths to training and testing data
train_path = 'C:/Projects/playground/kaggle/dogs_vs_cats/data_no_split/train'
test_path = 'C:/Projects/playground/kaggle/dogs_vs_cats/data_no_split/test'

### path for preloaded vgg16 weights
weights_path = 'C:/Projects/playground/kaggle/dogs_vs_cats/vgg16_weights.h5'
bottleneck_model_weights_path = 'C:/Projects/playground/kaggle/dogs_vs_cats/bottleneck_weights.h5'

### settings for keras early stopping callback
early_stopping = EarlyStopping(monitor='val_loss', patience=1, mode='auto')
# early_stopping = EarlyStoppingByLossVal(verbose=2, value=0.3)

### other hyperparameters
n_folds = 2
batch_size = 16
nb_epoch = 50
bottleneck_epoch = 3  # used when training bottleneck model
val_split = .15  # if not using kfold cv
classes = ["dog", "cat"]
num_classes = len(classes)

### image dimensions
img_width, img_height = 250, 250
num_channels = 3

Helper Functions For Loading Data


In [4]:
def load_images(path):
    img = cv2.imread(path)
    resized = cv2.resize(img, (img_width, img_height), cv2.INTER_LINEAR)
    return resized

In [5]:
def load_train():
    X_train = []
    X_train_id = []
    y_train = []
    start_time = time.time()

    print('Loading training images...')
    folders = ["dogs", "cats"]
    for fld in folders:
        index = folders.index(fld)
        print('Loading {} files (Index: {})'.format(fld, index))
        path = os.path.join(train_path, fld, '*g')
        files = glob.glob(path)
        for fl in files:
            flbase = os.path.basename(fl)
            img = load_images(fl)
            X_train.append(img)
            X_train_id.append(flbase)
            y_train.append(index)

    print('Training data load time: {} seconds'.format(round(time.time() - start_time, 2)))
    return X_train, y_train, X_train_id

In [6]:
def load_test():
    path = os.path.join(test_path, 'test', '*.jpg')
    files = sorted(glob.glob(path))

    X_test = []
    X_test_id = []
    for fl in files:
        flbase = os.path.basename(fl)
        img = load_images(fl)
        X_test.append(img)
        X_test_id.append(flbase)

    return X_test, X_test_id

In [7]:
def normalize_train_data():
    train_data, train_target, train_id = load_train()

    train_data = np.array(train_data, dtype=np.uint8)
    train_target = np.array(train_target, dtype=np.uint8)

    train_data = train_data.transpose((0, 3, 1, 2))

    train_data = train_data.astype('float32')
    train_data = train_data / 255
    train_target = np_utils.to_categorical(train_target, num_classes)

    print('Shape of training data:', train_data.shape)
    return train_data, train_target, train_id

In [8]:
def normalize_test_data():
    start_time = time.time()
    test_data, test_id = load_test()

    test_data = np.array(test_data, dtype=np.uint8)
    test_data = test_data.transpose((0, 3, 1, 2))

    test_data = test_data.astype('float32')
    test_data = test_data / 255

    print('Shape of testing data:', test_data.shape)
    return test_data, test_id

In [50]:
train_data, train_target, train_id = normalize_train_data()


Loading training images...
Loading dogs files (Index: 0)
Loading cats files (Index: 1)
Training data load time: 340.11 seconds
Shape of training data: (25000L, 3L, 250L, 250L)

Helper Function For Plotting Images

Function used to plot 9 images in a 3x3 grid (or fewer, depending on how many images are passed), and writing the true and predicted classes below each image.


In [9]:
def plot_images(images, cls_true, cls_pred=None):
    
    if len(images) == 0:
        print("no images to show")
        return 
    else:
        random_indices = random.sample(range(len(images)), min(len(images), 9))
            
    images, cls_true  = zip(*[(images[i], cls_true[i]) for i in random_indices])
    
    # Create figure with 3x3 sub-plots.
    fig, axes = plt.subplots(3, 3)
    fig.subplots_adjust(hspace=0.3, wspace=0.3)

    for i, ax in enumerate(axes.flat):
        # Plot image.
        image = images[i].transpose((1, 2, 0))
        ax.imshow(image)

        # Show true and predicted classes.
        if cls_pred is None:
            xlabel = "True: {0}".format(cls_true[i])
        else:
            xlabel = "True: {0}, Pred: {1}".format(cls_true[i], cls_pred[i])

        # Show the classes as the label on the x-axis.
        ax.set_xlabel(xlabel)
        
        # Remove ticks from the plot.
        ax.set_xticks([])
        ax.set_yticks([])
    
    # Ensure the plot is shown correctly with multiple plots
    # in a single Notebook cell.
    plt.show()

Build Model

We use the VGG16 model and pretrained weights for its simplicity and consistent performance


In [ ]:
def build_model():
    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height)))

    model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))
    
    # load the weights of the VGG16 networks
    f = h5py.File(weights_path)
    for k in range(f.attrs['nb_layers']):
        if k >= len(model.layers):
            # we don't look at the last (fully-connected) layers in the savefile
            break
        g = f['layer_{}'.format(k)]
        weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
        model.layers[k].set_weights(weights)
    f.close()
    
    # build a classifier model to put on top of the convolutional model
    bottleneck_model = Sequential()
    bottleneck_model.add(Flatten(input_shape=model.output_shape[1:]))
    bottleneck_model.add(Dense(256, activation='relu'))
    bottleneck_model.add(Dropout(0.5))
    bottleneck_model.add(Dense(num_classes, activation='softmax'))
    
    # load weights from bottleneck model
    bottleneck_model.load_weights(bottleneck_model_weights_path)

    # add the model on top of the convolutional base
    model.add(bottleneck_model)

    # set the first 25 layers (up to the last conv block)
    # to non-trainable (weights will not be updated)
    for layer in model.layers[:25]:
        layer.trainable = False
        
    # compile the model with a SGD/momentum optimizer
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizers.SGD(lr=1e-4, momentum=0.9))
    return model

Before we start training, we use the bottleneck method to extract features from the images in our dataset. We save them as .npy files.


In [ ]:
def save_bottleneck_features():
    datagen = ImageDataGenerator(rescale=1./255)

    # build the VGG16 network
    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height)))

    model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    # load the weights of the VGG16 networks
    f = h5py.File(weights_path)
    for k in range(f.attrs['nb_layers']):
        if k >= len(model.layers):
            # we don't look at the last (fully-connected) layers in the savefile
            break
        g = f['layer_{}'.format(k)]
        weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
        model.layers[k].set_weights(weights)
    f.close()
    print('Model loaded.')
    
    # create validation split
    X_train, X_valid, Y_train, Y_valid = train_test_split(train_data, train_target, test_size=val_split)

    # create generator for train data
    generator = datagen.flow(
            X_train,
            Y_train,
            batch_size=batch_size,
            shuffle=False)
    
    # save train features to .npy file
    bottleneck_features_train = model.predict_generator(generator, X_train.shape[0])
    np.save(open('bottleneck_features_train.npy', 'wb'), bottleneck_features_train)

    # create generator for validation data
    generator = datagen.flow(
            X_valid,
            Y_valid,
            batch_size=batch_size,
            shuffle=False)
    
    # save validation features to .npy file
    bottleneck_features_validation = model.predict_generator(generator, X_valid.shape[0])
    np.save(open('bottleneck_features_validation.npy', 'wb'), bottleneck_features_validation)
    return Y_train, Y_valid

Then we train a base model on these features.


In [ ]:
def train_bottleneck_model():
    train_labels, validation_labels = save_bottleneck_features()

    train_data = np.load(open('bottleneck_features_train.npy', 'rb'))
    validation_data = np.load(open('bottleneck_features_validation.npy', 'rb'))
    
    model = Sequential()
    model.add(Flatten(input_shape=train_data.shape[1:]))
    model.add(Dense(256, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(num_classes, activation='softmax'))

    model.compile(optimizer='rmsprop', loss='categorical_crossentropy')

    model.fit(train_data,
              train_labels,
              nb_epoch=bottleneck_epoch,
              batch_size=batch_size,
              validation_data=(validation_data, validation_labels),
              callbacks=[early_stopping],
              verbose=2)
    
    model.save_weights(bottleneck_model_weights_path)
    return model

In [ ]:
#  train_bottleneck_model()  # leave this commented out once it's been done once -- takes a while to run

Main Training Function


In [ ]:
def run_train(n_folds=n_folds):
    num_fold = 0
#     sum_score = 0
    models = []   
    callbacks = [
        early_stopping
    ]
    
    ### if we just want to train a single model without cross-validation, set n_folds to 0 or None
    if not n_folds:
        model = build_model()
        
        X_train, X_valid, Y_train, Y_valid = train_test_split(train_data, train_target, test_size=val_split)
        print('Training...')
        print('Size of train split: ', len(X_train), len(Y_train))
        print('Size of validation split: ', len(X_valid), len(Y_valid))
              
        model.fit(X_train,
          Y_train,
          batch_size=batch_size,
          nb_epoch=nb_epoch,
          shuffle=True,
          verbose=1,
          validation_data=(X_valid, Y_valid),
          callbacks=callbacks)

        predictions_valid = model.predict(X_valid.astype('float32'), batch_size=batch_size, verbose=2)
#         score = log_loss(Y_valid, predictions_valid)
#         print('Loss: ', score)
#         sum_score += score
        models.append(model)
                     
    else:
        kf = KFold(len(train_id), n_folds=n_folds, shuffle=True, random_state=7)

        for train_index, test_index in kf:
            model = build_model()
            X_train = train_data[train_index]
            Y_train = train_target[train_index]
            X_valid = train_data[test_index]
            Y_valid = train_target[test_index]

            num_fold += 1
            print('Training on fold {} of {}...'.format(num_fold, n_folds))
            print('Size of train split: ', len(X_train), len(Y_train))
            print('Size of validation split: ', len(X_valid), len(Y_valid))

            model.fit(X_train,
                      Y_train,
                      batch_size=batch_size,
                      nb_epoch=nb_epoch,
                      shuffle=True,
                      verbose=1,
                      validation_data=(X_valid, Y_valid),
                      callbacks=callbacks)

#             predictions_valid = model.predict(X_valid.astype('float32'), batch_size=batch_size, verbose=2)
#             score = log_loss(Y_valid, predictions_valid)
#             print('Loss for fold {0}: '.format(num_fold), score)
#             sum_score += score*len(test_index)
            models.append(model)
#         score = sum_score/len(train_data)
        
#     print("Average loss across folds: ", score)
    
    info_string = "{0}fold_{1}x{2}_{3}epoch_patience_vgg16".format(n_folds, img_width, img_height, nb_epoch)
    return info_string, models

Helper Functions For Making Predictions


In [45]:
def create_submission(predictions, test_id, info):
    result = pd.DataFrame(predictions, columns=classes)
    result.loc[:, 'id'] = pd.Series(test_id, index=result.index)
    result = result[["id", "dog"]].rename(columns={"dog": "label"})
    now = datetime.datetime.now()
    sub_file = info + '.csv'
    result.to_csv(sub_file, index=False)

In [11]:
def merge_several_folds_mean(data, n_folds):
    a = np.array(data[0])
    for i in range(1, n_folds):
        a += np.array(data[i])
    a /= n_folds
    return a.tolist()

In [25]:
def ensemble_predict(info_string, models):
    num_fold = 0
    yfull_test = []
    test_id = []
    n_folds = len(models)

    for i in range(n_folds):
        model = models[i]
        num_fold += 1
        print('Predicting on fold {} of {}'.format(num_fold, n_folds))
        test_data, test_id = normalize_test_data()
        test_prediction = model.predict(test_data, batch_size=batch_size, verbose=2)
        yfull_test.append(test_prediction)

    preds = merge_several_folds_mean(yfull_test, n_folds)
    create_submission(preds, test_id, info_string)

Run the training and prediction code


In [47]:
info_string, models = run_train()
ensemble_predict(info_string)

Performance Metrics


In [48]:
model = random.choice(models)

### or choose one manually...

# model = models[1]

In [65]:
# perm = np.arange(int(val_split*len(train_target)))
# np.random.shuffle(perm)
# sample_valid = train_data[perm]
# labels_valid = train_target[perm]

ixs = [random.randint(0, len(train_target)) for i in range(1000)]
sample_valid = np.array([train_data[ix] for ix in ixs])
labels_valid = np.array([train_target[ix] for ix in ixs])

In [61]:
def plot_example_errors(cls_pred, correct):
    # This function is called from print_validation_accuracy() below.

    # cls_pred is an array of the predicted class-number for
    # all images in the validation set.

    # correct is a boolean array whether the predicted class
    # is equal to the true class for each image in the validation set.

    # Negate the boolean array.
    incorrect = (correct == False)
    
    # Get the images from the validation set that have been
    # incorrectly classified.
    images = sample_valid[incorrect]
    
    # Get the predicted classes for those images.
    cls_pred = cls_pred[incorrect]

    # Get the true classes for those images.
    labels = np.array([classes[np.argmax(x)] for x in labels_valid])
    cls_true = labels[incorrect]
    
    # Plot the first 9 images.
    plot_images(images=images[0:9],
                cls_true=cls_true[0:9],
                cls_pred=cls_pred[0:9])

In [62]:
def plot_confusion_matrix(cls_pred):
    # This is called from print_validation_accuracy() below.

    # cls_pred is an array of the predicted class-number for
    # all images in the validation set.

    # Get the true classifications for the test-set.
    cls_true = [classes[np.argmax(x)] for x in labels_valid]
    
    # Get the confusion matrix using sklearn.
    cm = confusion_matrix(y_true=cls_true,
                          y_pred=cls_pred,
                          labels=classes)

    # Print the confusion matrix as text.
    print(cm)

    # Plot the confusion matrix as an image.
    plt.matshow(cm)

    # Make various adjustments to the plot.
    plt.colorbar()
    tick_marks = np.arange(num_classes)
    plt.xticks(tick_marks, classes)
    plt.yticks(tick_marks, classes)
    plt.xlabel('Predicted')
    plt.ylabel('True')

    # Ensure the plot is shown correctly with multiple plots
    # in a single Notebook cell.
    plt.show()

In [63]:
def print_validation_accuracy(show_example_errors=False,
                              show_confusion_matrix=False):
    
    test_batch_size = 4
    
    # Number of images in the validation set.
    num_test = len(labels_valid)
    
    cls_pred = np.zeros(shape=num_test, dtype=np.int)
    
    i = 0
    # iterate through batches and create list of predictions
    while i < num_test:
        # The ending index for the next batch is denoted j.
        j = min(i + test_batch_size, num_test)

        # Get the images from the test-set between index i and j.
        images = sample_valid[i:j, :]

        # Calculate the predicted class using TensorFlow.
        cls_pred[i:j] = [np.argmax(x) for x in model.predict(images)]

        # Set the start-index for the next batch to the
        # end-index of the current batch.
        i = j
    
    # Convenience variable for the true class-numbers of the validation set.
    cls_pred = np.array([classes[x] for x in cls_pred])
    cls_true = np.array([classes[np.argmax(x)] for x in labels_valid])

    # Create a boolean array whether each image is correctly classified.
    correct = (cls_true == cls_pred)

    # Calculate the number of correctly classified images.
    # When summing a boolean array, False means 0 and True means 1.
    correct_sum = correct.sum()

    # Classification accuracy is the number of correctly classified
    # images divided by the total number of images in the test-set.
    acc = float(correct_sum) / num_test

    # Print the accuracy.
    msg = "Accuracy on validation set: {0:.1%} ({1} / {2})"
    print(msg.format(acc, correct_sum, num_test))

    # Plot some examples of mis-classifications, if desired.
    if show_example_errors:
        print("Example errors:")
        plot_example_errors(cls_pred=cls_pred, correct=correct)

    # Plot the confusion matrix, if desired.
    if show_confusion_matrix:
        print("Confusion Matrix:")
        plot_confusion_matrix(cls_pred=cls_pred)

In [66]:
print_validation_accuracy(show_example_errors=True, show_confusion_matrix=True)


Accuracy on validation set: 97.0% (970 / 1000)
Example errors:
Confusion Matrix:
[[479   6]
 [ 24 491]]

Model Summary & Feature Visualization


In [56]:
model.summary()


____________________________________________________________________________________________________
Layer (type)                     Output Shape          Param #     Connected to                     
====================================================================================================
zeropadding2d_1 (ZeroPadding2D)  (None, 3, 252, 252)   0           zeropadding2d_input_5[0][0]      
____________________________________________________________________________________________________
conv1_1 (Convolution2D)          (None, 64, 250, 250)  0           zeropadding2d_1[0][0]            
____________________________________________________________________________________________________
zeropadding2d_2 (ZeroPadding2D)  (None, 64, 252, 252)  0           conv1_1[0][0]                    
____________________________________________________________________________________________________
conv1_2 (Convolution2D)          (None, 64, 250, 250)  0           zeropadding2d_2[0][0]            
____________________________________________________________________________________________________
maxpooling2d_1 (MaxPooling2D)    (None, 64, 125, 125)  0           conv1_2[0][0]                    
____________________________________________________________________________________________________
zeropadding2d_3 (ZeroPadding2D)  (None, 64, 127, 127)  0           maxpooling2d_1[0][0]             
____________________________________________________________________________________________________
conv2_1 (Convolution2D)          (None, 128, 125, 125) 0           zeropadding2d_3[0][0]            
____________________________________________________________________________________________________
zeropadding2d_4 (ZeroPadding2D)  (None, 128, 127, 127) 0           conv2_1[0][0]                    
____________________________________________________________________________________________________
conv2_2 (Convolution2D)          (None, 128, 125, 125) 0           zeropadding2d_4[0][0]            
____________________________________________________________________________________________________
maxpooling2d_2 (MaxPooling2D)    (None, 128, 62, 62)   0           conv2_2[0][0]                    
____________________________________________________________________________________________________
zeropadding2d_5 (ZeroPadding2D)  (None, 128, 64, 64)   0           maxpooling2d_2[0][0]             
____________________________________________________________________________________________________
conv3_1 (Convolution2D)          (None, 256, 62, 62)   0           zeropadding2d_5[0][0]            
____________________________________________________________________________________________________
zeropadding2d_6 (ZeroPadding2D)  (None, 256, 64, 64)   0           conv3_1[0][0]                    
____________________________________________________________________________________________________
conv3_2 (Convolution2D)          (None, 256, 62, 62)   0           zeropadding2d_6[0][0]            
____________________________________________________________________________________________________
zeropadding2d_7 (ZeroPadding2D)  (None, 256, 64, 64)   0           conv3_2[0][0]                    
____________________________________________________________________________________________________
conv3_3 (Convolution2D)          (None, 256, 62, 62)   0           zeropadding2d_7[0][0]            
____________________________________________________________________________________________________
maxpooling2d_3 (MaxPooling2D)    (None, 256, 31, 31)   0           conv3_3[0][0]                    
____________________________________________________________________________________________________
zeropadding2d_8 (ZeroPadding2D)  (None, 256, 33, 33)   0           maxpooling2d_3[0][0]             
____________________________________________________________________________________________________
conv4_1 (Convolution2D)          (None, 512, 31, 31)   0           zeropadding2d_8[0][0]            
____________________________________________________________________________________________________
zeropadding2d_9 (ZeroPadding2D)  (None, 512, 33, 33)   0           conv4_1[0][0]                    
____________________________________________________________________________________________________
conv4_2 (Convolution2D)          (None, 512, 31, 31)   0           zeropadding2d_9[0][0]            
____________________________________________________________________________________________________
zeropadding2d_10 (ZeroPadding2D) (None, 512, 33, 33)   0           conv4_2[0][0]                    
____________________________________________________________________________________________________
conv4_3 (Convolution2D)          (None, 512, 31, 31)   0           zeropadding2d_10[0][0]           
____________________________________________________________________________________________________
maxpooling2d_4 (MaxPooling2D)    (None, 512, 15, 15)   0           conv4_3[0][0]                    
____________________________________________________________________________________________________
zeropadding2d_11 (ZeroPadding2D) (None, 512, 17, 17)   0           maxpooling2d_4[0][0]             
____________________________________________________________________________________________________
conv5_1 (Convolution2D)          (None, 512, 15, 15)   2359808     zeropadding2d_11[0][0]           
____________________________________________________________________________________________________
zeropadding2d_12 (ZeroPadding2D) (None, 512, 17, 17)   0           conv5_1[0][0]                    
____________________________________________________________________________________________________
conv5_2 (Convolution2D)          (None, 512, 15, 15)   2359808     zeropadding2d_12[0][0]           
____________________________________________________________________________________________________
zeropadding2d_13 (ZeroPadding2D) (None, 512, 17, 17)   0           conv5_2[0][0]                    
____________________________________________________________________________________________________
conv5_3 (Convolution2D)          (None, 512, 15, 15)   2359808     zeropadding2d_13[0][0]           
____________________________________________________________________________________________________
maxpooling2d_5 (MaxPooling2D)    (None, 512, 7, 7)     0           conv5_3[0][0]                    
____________________________________________________________________________________________________
sequential_10 (Sequential)       (None, 2)             6423298     maxpooling2d_5[0][0]             
====================================================================================================
Total params: 13502722
____________________________________________________________________________________________________

In [57]:
layer_name = 'conv5_3'


# util function to convert a tensor into a valid image
def deprocess_image(x):
    # normalize tensor: center on 0., ensure std is 0.1
    x -= x.mean()
    x /= (x.std() + 1e-5)
    x *= 0.1

    # clip to [0, 1]
    x += 0.5
    x = np.clip(x, 0, 1)

    # convert to RGB array
    x *= 255
    if K.image_dim_ordering() == 'th':
        x = x.transpose((1, 2, 0))
    x = np.clip(x, 0, 255).astype('uint8')
    return x


# this is the placeholder for the input images
input_img = model.input

# get the symbolic outputs of each "key" layer (we gave them unique names).
layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])


def normalize(x):
    # utility function to normalize a tensor by its L2 norm
    return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)


kept_filters = []
for filter_index in range(0, 512):
    print('Processing filter %d' % filter_index)
    start_time = time.time()

    # we build a loss function that maximizes the activation
    # of the nth filter of the layer considered
    layer_output = layer_dict[layer_name].output
    if K.image_dim_ordering() == 'th':
        loss = K.mean(layer_output[:, filter_index, :, :])
    else:
        loss = K.mean(layer_output[:, :, :, filter_index])

    # we compute the gradient of the input picture wrt this loss
    grads = K.gradients(loss, input_img)[0]

    # normalization trick: we normalize the gradient
    grads = normalize(grads)

    # this function returns the loss and grads given the input picture
    iterate = K.function([input_img], [loss, grads])

    # step size for gradient ascent
    step = 1.

    # we start from a gray image with some random noise
    if K.image_dim_ordering() == 'th':
        input_img_data = np.random.random((1, 3, img_width, img_height))
    else:
        input_img_data = np.random.random((1, img_width, img_height, 3))
    input_img_data = (input_img_data - 0.5) * 20 + 128

    # we run gradient ascent for 20 steps
    for i in range(20):
        loss_value, grads_value = iterate([input_img_data])
        input_img_data += grads_value * step

        if loss_value <= 0.:
            # some filters get stuck to 0, we can skip them
            break

    # decode the resulting input image
    if loss_value > 0:
        img = deprocess_image(input_img_data[0])
        kept_filters.append((img, loss_value))
    end_time = time.time()
    print('Filter %d processed in %ds' % (filter_index, end_time - start_time))

# we will stich the best n**2 filters on a n x n grid.
n = 5

# the filters that have the highest loss are assumed to be better-looking.
# we will only keep the top n**2 filters.
kept_filters.sort(key=lambda x: x[1], reverse=True)
kept_filters = kept_filters[:n * n]

# build a black picture with enough space for
# our n x n filters of size with a 5px margin in between
margin = 5
width = n * img_width + (n - 1) * margin
height = n * img_height + (n - 1) * margin
stitched_filters = np.zeros((width, height, 3))

# fill the picture with our saved filters
for i in range(n):
    for j in range(n):
        img, loss = kept_filters[i * n + j]
        stitched_filters[(img_width + margin) * i: (img_width + margin) * i + img_width,
                         (img_height + margin) * j: (img_height + margin) * j + img_height, :] = img

# save image and display
imsave('feats.jpg', stitched_filters)
plt.imshow(stitched_filters)


Processing filter 0
Filter 0 processed in 35s
Processing filter 1
Filter 1 processed in 15s
Processing filter 2
Filter 2 processed in 15s
Processing filter 3
Filter 3 processed in 14s
Processing filter 4
Filter 4 processed in 17s
Processing filter 5
Filter 5 processed in 14s
Processing filter 6
Filter 6 processed in 14s
Processing filter 7
Filter 7 processed in 14s
Processing filter 8
Filter 8 processed in 14s
Processing filter 9
Filter 9 processed in 14s
Processing filter 10
Filter 10 processed in 14s
Processing filter 11
Filter 11 processed in 14s
Processing filter 12
Filter 12 processed in 17s
Processing filter 13
Filter 13 processed in 14s
Processing filter 14
Filter 14 processed in 14s
Processing filter 15
Filter 15 processed in 16s
Processing filter 16
Filter 16 processed in 14s
Processing filter 17
Filter 17 processed in 14s
Processing filter 18
Filter 18 processed in 14s
Processing filter 19
Filter 19 processed in 17s
Processing filter 20
Filter 20 processed in 14s
Processing filter 21
Filter 21 processed in 17s
Processing filter 22
Filter 22 processed in 14s
Processing filter 23
Filter 23 processed in 14s
Processing filter 24
Filter 24 processed in 14s
Processing filter 25
Filter 25 processed in 13s
Processing filter 26
Filter 26 processed in 14s
Processing filter 27
Filter 27 processed in 13s
Processing filter 28
Filter 28 processed in 14s
Processing filter 29
Filter 29 processed in 13s
Processing filter 30
Filter 30 processed in 14s
Processing filter 31
Filter 31 processed in 14s
Processing filter 32
Filter 32 processed in 16s
Processing filter 33
Filter 33 processed in 14s
Processing filter 34
Filter 34 processed in 13s
Processing filter 35
Filter 35 processed in 14s
Processing filter 36
Filter 36 processed in 16s
Processing filter 37
Filter 37 processed in 14s
Processing filter 38
Filter 38 processed in 13s
Processing filter 39
Filter 39 processed in 14s
Processing filter 40
Filter 40 processed in 14s
Processing filter 41
Filter 41 processed in 14s
Processing filter 42
Filter 42 processed in 16s
Processing filter 43
Filter 43 processed in 14s
Processing filter 44
Filter 44 processed in 14s
Processing filter 45
Filter 45 processed in 14s
Processing filter 46
Filter 46 processed in 17s
Processing filter 47
Filter 47 processed in 14s
Processing filter 48
Filter 48 processed in 14s
Processing filter 49
Filter 49 processed in 13s
Processing filter 50
Filter 50 processed in 17s
Processing filter 51
Filter 51 processed in 16s
Processing filter 52
Filter 52 processed in 14s
Processing filter 53
Filter 53 processed in 14s
Processing filter 54
Filter 54 processed in 14s
Processing filter 55
Filter 55 processed in 16s
Processing filter 56
Filter 56 processed in 17s
Processing filter 57
Filter 57 processed in 13s
Processing filter 58
Filter 58 processed in 14s
Processing filter 59
Filter 59 processed in 13s
Processing filter 60
Filter 60 processed in 14s
Processing filter 61
Filter 61 processed in 13s
Processing filter 62
Filter 62 processed in 14s
Processing filter 63
Filter 63 processed in 14s
Processing filter 64
Filter 64 processed in 14s
Processing filter 65
Filter 65 processed in 16s
Processing filter 66
Filter 66 processed in 14s
Processing filter 67
Filter 67 processed in 14s
Processing filter 68
Filter 68 processed in 14s
Processing filter 69
Filter 69 processed in 13s
Processing filter 70
Filter 70 processed in 14s
Processing filter 71
Filter 71 processed in 14s
Processing filter 72
Filter 72 processed in 14s
Processing filter 73
Filter 73 processed in 14s
Processing filter 74
Filter 74 processed in 14s
Processing filter 75
Filter 75 processed in 14s
Processing filter 76
Filter 76 processed in 14s
Processing filter 77
Filter 77 processed in 14s
Processing filter 78
Filter 78 processed in 14s
Processing filter 79
Filter 79 processed in 14s
Processing filter 80
Filter 80 processed in 14s
Processing filter 81
Filter 81 processed in 14s
Processing filter 82
Filter 82 processed in 14s
Processing filter 83
Filter 83 processed in 14s
Processing filter 84
Filter 84 processed in 14s
Processing filter 85
Filter 85 processed in 13s
Processing filter 86
Filter 86 processed in 14s
Processing filter 87
Filter 87 processed in 14s
Processing filter 88
Filter 88 processed in 14s
Processing filter 89
Filter 89 processed in 17s
Processing filter 90
Filter 90 processed in 14s
Processing filter 91
Filter 91 processed in 14s
Processing filter 92
Filter 92 processed in 14s
Processing filter 93
Filter 93 processed in 14s
Processing filter 94
Filter 94 processed in 14s
Processing filter 95
Filter 95 processed in 14s
Processing filter 96
Filter 96 processed in 14s
Processing filter 97
Filter 97 processed in 14s
Processing filter 98
Filter 98 processed in 14s
Processing filter 99
Filter 99 processed in 14s
Processing filter 100
Filter 100 processed in 14s
Processing filter 101
Filter 101 processed in 14s
Processing filter 102
Filter 102 processed in 14s
Processing filter 103
Filter 103 processed in 17s
Processing filter 104
Filter 104 processed in 14s
Processing filter 105
Filter 105 processed in 14s
Processing filter 106
Filter 106 processed in 14s
Processing filter 107
Filter 107 processed in 14s
Processing filter 108
Filter 108 processed in 14s
Processing filter 109
Filter 109 processed in 14s
Processing filter 110
Filter 110 processed in 14s
Processing filter 111
Filter 111 processed in 14s
Processing filter 112
Filter 112 processed in 14s
Processing filter 113
Filter 113 processed in 17s
Processing filter 114
Filter 114 processed in 13s
Processing filter 115
Filter 115 processed in 14s
Processing filter 116
Filter 116 processed in 17s
Processing filter 117
Filter 117 processed in 17s
Processing filter 118
Filter 118 processed in 16s
Processing filter 119
Filter 119 processed in 17s
Processing filter 120
Filter 120 processed in 14s
Processing filter 121
Filter 121 processed in 14s
Processing filter 122
Filter 122 processed in 14s
Processing filter 123
Filter 123 processed in 16s
Processing filter 124
Filter 124 processed in 14s
Processing filter 125
Filter 125 processed in 14s
Processing filter 126
Filter 126 processed in 14s
Processing filter 127
Filter 127 processed in 13s
Processing filter 128
Filter 128 processed in 15s
Processing filter 129
Filter 129 processed in 14s
Processing filter 130
Filter 130 processed in 14s
Processing filter 131
Filter 131 processed in 14s
Processing filter 132
Filter 132 processed in 14s
Processing filter 133
Filter 133 processed in 17s
Processing filter 134
Filter 134 processed in 14s
Processing filter 135
Filter 135 processed in 14s
Processing filter 136
Filter 136 processed in 14s
Processing filter 137
Filter 137 processed in 14s
Processing filter 138
Filter 138 processed in 16s
Processing filter 139
Filter 139 processed in 17s
Processing filter 140
Filter 140 processed in 14s
Processing filter 141
Filter 141 processed in 14s
Processing filter 142
Filter 142 processed in 14s
Processing filter 143
Filter 143 processed in 17s
Processing filter 144
Filter 144 processed in 14s
Processing filter 145
Filter 145 processed in 14s
Processing filter 146
Filter 146 processed in 14s
Processing filter 147
Filter 147 processed in 13s
Processing filter 148
Filter 148 processed in 14s
Processing filter 149
Filter 149 processed in 14s
Processing filter 150
Filter 150 processed in 13s
Processing filter 151
Filter 151 processed in 14s
Processing filter 152
Filter 152 processed in 14s
Processing filter 153
Filter 153 processed in 14s
Processing filter 154
Filter 154 processed in 14s
Processing filter 155
Filter 155 processed in 16s
Processing filter 156
Filter 156 processed in 14s
Processing filter 157
Filter 157 processed in 14s
Processing filter 158
Filter 158 processed in 14s
Processing filter 159
Filter 159 processed in 13s
Processing filter 160
Filter 160 processed in 14s
Processing filter 161
Filter 161 processed in 14s
Processing filter 162
Filter 162 processed in 13s
Processing filter 163
Filter 163 processed in 14s
Processing filter 164
Filter 164 processed in 17s
Processing filter 165
Filter 165 processed in 14s
Processing filter 166
Filter 166 processed in 17s
Processing filter 167
Filter 167 processed in 13s
Processing filter 168
Filter 168 processed in 17s
Processing filter 169
Filter 169 processed in 13s
Processing filter 170
Filter 170 processed in 13s
Processing filter 171
Filter 171 processed in 17s
Processing filter 172
Filter 172 processed in 17s
Processing filter 173
Filter 173 processed in 13s
Processing filter 174
Filter 174 processed in 14s
Processing filter 175
Filter 175 processed in 13s
Processing filter 176
Filter 176 processed in 14s
Processing filter 177
Filter 177 processed in 13s
Processing filter 178
Filter 178 processed in 14s
Processing filter 179
Filter 179 processed in 17s
Processing filter 180
Filter 180 processed in 17s
Processing filter 181
Filter 181 processed in 13s
Processing filter 182
Filter 182 processed in 14s
Processing filter 183
Filter 183 processed in 13s
Processing filter 184
Filter 184 processed in 14s
Processing filter 185
Filter 185 processed in 13s
Processing filter 186
Filter 186 processed in 14s
Processing filter 187
Filter 187 processed in 13s
Processing filter 188
Filter 188 processed in 14s
Processing filter 189
Filter 189 processed in 13s
Processing filter 190
Filter 190 processed in 14s
Processing filter 191
Filter 191 processed in 13s
Processing filter 192
Filter 192 processed in 13s
Processing filter 193
Filter 193 processed in 14s
Processing filter 194
Filter 194 processed in 14s
Processing filter 195
Filter 195 processed in 13s
Processing filter 196
Filter 196 processed in 14s
Processing filter 197
Filter 197 processed in 14s
Processing filter 198
Filter 198 processed in 14s
Processing filter 199
Filter 199 processed in 16s
Processing filter 200
Filter 200 processed in 14s
Processing filter 201
Filter 201 processed in 13s
Processing filter 202
Filter 202 processed in 14s
Processing filter 203
Filter 203 processed in 14s
Processing filter 204
Filter 204 processed in 13s
Processing filter 205
Filter 205 processed in 14s
Processing filter 206
Filter 206 processed in 14s
Processing filter 207
Filter 207 processed in 15s
Processing filter 208
Filter 208 processed in 14s
Processing filter 209
Filter 209 processed in 14s
Processing filter 210
Filter 210 processed in 14s
Processing filter 211
Filter 211 processed in 15s
Processing filter 212
Filter 212 processed in 14s
Processing filter 213
Filter 213 processed in 14s
Processing filter 214
Filter 214 processed in 13s
Processing filter 215
Filter 215 processed in 13s
Processing filter 216
Filter 216 processed in 14s
Processing filter 217
Filter 217 processed in 14s
Processing filter 218
Filter 218 processed in 13s
Processing filter 219
Filter 219 processed in 14s
Processing filter 220
Filter 220 processed in 13s
Processing filter 221
Filter 221 processed in 14s
Processing filter 222
Filter 222 processed in 14s
Processing filter 223
Filter 223 processed in 14s
Processing filter 224
Filter 224 processed in 14s
Processing filter 225
Filter 225 processed in 13s
Processing filter 226
Filter 226 processed in 14s
Processing filter 227
Filter 227 processed in 13s
Processing filter 228
Filter 228 processed in 14s
Processing filter 229
Filter 229 processed in 16s
Processing filter 230
Filter 230 processed in 17s
Processing filter 231
Filter 231 processed in 14s
Processing filter 232
Filter 232 processed in 16s
Processing filter 233
Filter 233 processed in 14s
Processing filter 234
Filter 234 processed in 14s
Processing filter 235
Filter 235 processed in 15s
Processing filter 236
Filter 236 processed in 13s
Processing filter 237
Filter 237 processed in 17s
Processing filter 238
Filter 238 processed in 13s
Processing filter 239
Filter 239 processed in 14s
Processing filter 240
Filter 240 processed in 14s
Processing filter 241
Filter 241 processed in 13s
Processing filter 242
Filter 242 processed in 14s
Processing filter 243
Filter 243 processed in 14s
Processing filter 244
Filter 244 processed in 17s
Processing filter 245
Filter 245 processed in 16s
Processing filter 246
Filter 246 processed in 16s
Processing filter 247
Filter 247 processed in 14s
Processing filter 248
Filter 248 processed in 17s
Processing filter 249
Filter 249 processed in 13s
Processing filter 250
Filter 250 processed in 17s
Processing filter 251
Filter 251 processed in 14s
Processing filter 252
Filter 252 processed in 14s
Processing filter 253
Filter 253 processed in 13s
Processing filter 254
Filter 254 processed in 14s
Processing filter 255
Filter 255 processed in 17s
Processing filter 256
Filter 256 processed in 16s
Processing filter 257
Filter 257 processed in 15s
Processing filter 258
Filter 258 processed in 14s
Processing filter 259
Filter 259 processed in 17s
Processing filter 260
Filter 260 processed in 14s
Processing filter 261
Filter 261 processed in 16s
Processing filter 262
Filter 262 processed in 14s
Processing filter 263
Filter 263 processed in 14s
Processing filter 264
Filter 264 processed in 13s
Processing filter 265
Filter 265 processed in 17s
Processing filter 266
Filter 266 processed in 16s
Processing filter 267
Filter 267 processed in 14s
Processing filter 268
Filter 268 processed in 14s
Processing filter 269
Filter 269 processed in 14s
Processing filter 270
Filter 270 processed in 13s
Processing filter 271
Filter 271 processed in 14s
Processing filter 272
Filter 272 processed in 17s
Processing filter 273
Filter 273 processed in 13s
Processing filter 274
Filter 274 processed in 14s
Processing filter 275
Filter 275 processed in 14s
Processing filter 276
Filter 276 processed in 16s
Processing filter 277
Filter 277 processed in 14s
Processing filter 278
Filter 278 processed in 14s
Processing filter 279
Filter 279 processed in 13s
Processing filter 280
Filter 280 processed in 15s
Processing filter 281
Filter 281 processed in 14s
Processing filter 282
Filter 282 processed in 14s
Processing filter 283
Filter 283 processed in 14s
Processing filter 284
Filter 284 processed in 13s
Processing filter 285
Filter 285 processed in 14s
Processing filter 286
Filter 286 processed in 15s
Processing filter 287
Filter 287 processed in 15s
Processing filter 288
Filter 288 processed in 14s
Processing filter 289
Filter 289 processed in 14s
Processing filter 290
Filter 290 processed in 14s
Processing filter 291
Filter 291 processed in 15s
Processing filter 292
Filter 292 processed in 16s
Processing filter 293
Filter 293 processed in 17s
Processing filter 294
Filter 294 processed in 14s
Processing filter 295
Filter 295 processed in 14s
Processing filter 296
Filter 296 processed in 14s
Processing filter 297
Filter 297 processed in 14s
Processing filter 298
Filter 298 processed in 16s
Processing filter 299
Filter 299 processed in 14s
Processing filter 300
Filter 300 processed in 13s
Processing filter 301
Filter 301 processed in 14s
Processing filter 302
Filter 302 processed in 14s
Processing filter 303
Filter 303 processed in 13s
Processing filter 304
Filter 304 processed in 14s
Processing filter 305
Filter 305 processed in 14s
Processing filter 306
Filter 306 processed in 14s
Processing filter 307
Filter 307 processed in 14s
Processing filter 308
Filter 308 processed in 14s
Processing filter 309
Filter 309 processed in 15s
Processing filter 310
Filter 310 processed in 14s
Processing filter 311
Filter 311 processed in 15s
Processing filter 312
Filter 312 processed in 14s
Processing filter 313
Filter 313 processed in 14s
Processing filter 314
Filter 314 processed in 13s
Processing filter 315
Filter 315 processed in 17s
Processing filter 316
Filter 316 processed in 17s
Processing filter 317
Filter 317 processed in 16s
Processing filter 318
Filter 318 processed in 14s
Processing filter 319
Filter 319 processed in 14s
Processing filter 320
Filter 320 processed in 14s
Processing filter 321
Filter 321 processed in 17s
Processing filter 322
Filter 322 processed in 17s
Processing filter 323
Filter 323 processed in 17s
Processing filter 324
Filter 324 processed in 16s
Processing filter 325
Filter 325 processed in 14s
Processing filter 326
Filter 326 processed in 16s
Processing filter 327
Filter 327 processed in 16s
Processing filter 328
Filter 328 processed in 17s
Processing filter 329
Filter 329 processed in 14s
Processing filter 330
Filter 330 processed in 13s
Processing filter 331
Filter 331 processed in 13s
Processing filter 332
Filter 332 processed in 14s
Processing filter 333
Filter 333 processed in 14s
Processing filter 334
Filter 334 processed in 13s
Processing filter 335
Filter 335 processed in 17s
Processing filter 336
Filter 336 processed in 16s
Processing filter 337
Filter 337 processed in 14s
Processing filter 338
Filter 338 processed in 13s
Processing filter 339
Filter 339 processed in 14s
Processing filter 340
Filter 340 processed in 14s
Processing filter 341
Filter 341 processed in 16s
Processing filter 342
Filter 342 processed in 14s
Processing filter 343
Filter 343 processed in 17s
Processing filter 344
Filter 344 processed in 14s
Processing filter 345
Filter 345 processed in 14s
Processing filter 346
Filter 346 processed in 14s
Processing filter 347
Filter 347 processed in 15s
Processing filter 348
Filter 348 processed in 13s
Processing filter 349
Filter 349 processed in 14s
Processing filter 350
Filter 350 processed in 13s
Processing filter 351
Filter 351 processed in 14s
Processing filter 352
Filter 352 processed in 16s
Processing filter 353
Filter 353 processed in 13s
Processing filter 354
Filter 354 processed in 14s
Processing filter 355
Filter 355 processed in 14s
Processing filter 356
Filter 356 processed in 16s
Processing filter 357
Filter 357 processed in 14s
Processing filter 358
Filter 358 processed in 14s
Processing filter 359
Filter 359 processed in 13s
Processing filter 360
Filter 360 processed in 14s
Processing filter 361
Filter 361 processed in 13s
Processing filter 362
Filter 362 processed in 14s
Processing filter 363
Filter 363 processed in 13s
Processing filter 364
Filter 364 processed in 14s
Processing filter 365
Filter 365 processed in 14s
Processing filter 366
Filter 366 processed in 14s
Processing filter 367
Filter 367 processed in 14s
Processing filter 368
Filter 368 processed in 14s
Processing filter 369
Filter 369 processed in 16s
Processing filter 370
Filter 370 processed in 14s
Processing filter 371
Filter 371 processed in 13s
Processing filter 372
Filter 372 processed in 14s
Processing filter 373
Filter 373 processed in 13s
Processing filter 374
Filter 374 processed in 16s
Processing filter 375
Filter 375 processed in 14s
Processing filter 376
Filter 376 processed in 14s
Processing filter 377
Filter 377 processed in 13s
Processing filter 378
Filter 378 processed in 14s
Processing filter 379
Filter 379 processed in 13s
Processing filter 380
Filter 380 processed in 14s
Processing filter 381
Filter 381 processed in 16s
Processing filter 382
Filter 382 processed in 17s
Processing filter 383
Filter 383 processed in 17s
Processing filter 384
Filter 384 processed in 13s
Processing filter 385
Filter 385 processed in 14s
Processing filter 386
Filter 386 processed in 14s
Processing filter 387
Filter 387 processed in 16s
Processing filter 388
Filter 388 processed in 14s
Processing filter 389
Filter 389 processed in 13s
Processing filter 390
Filter 390 processed in 14s
Processing filter 391
Filter 391 processed in 13s
Processing filter 392
Filter 392 processed in 14s
Processing filter 393
Filter 393 processed in 13s
Processing filter 394
Filter 394 processed in 14s
Processing filter 395
Filter 395 processed in 13s
Processing filter 396
Filter 396 processed in 17s
Processing filter 397
Filter 397 processed in 13s
Processing filter 398
Filter 398 processed in 13s
Processing filter 399
Filter 399 processed in 14s
Processing filter 400
Filter 400 processed in 14s
Processing filter 401
Filter 401 processed in 13s
Processing filter 402
Filter 402 processed in 14s
Processing filter 403
Filter 403 processed in 14s
Processing filter 404
Filter 404 processed in 16s
Processing filter 405
Filter 405 processed in 17s
Processing filter 406
Filter 406 processed in 13s
Processing filter 407
Filter 407 processed in 17s
Processing filter 408
Filter 408 processed in 14s
Processing filter 409
Filter 409 processed in 13s
Processing filter 410
Filter 410 processed in 14s
Processing filter 411
Filter 411 processed in 14s
Processing filter 412
Filter 412 processed in 13s
Processing filter 413
Filter 413 processed in 14s
Processing filter 414
Filter 414 processed in 13s
Processing filter 415
Filter 415 processed in 15s
Processing filter 416
Filter 416 processed in 17s
Processing filter 417
Filter 417 processed in 16s
Processing filter 418
Filter 418 processed in 14s
Processing filter 419
Filter 419 processed in 14s
Processing filter 420
Filter 420 processed in 14s
Processing filter 421
Filter 421 processed in 15s
Processing filter 422
Filter 422 processed in 15s
Processing filter 423
Filter 423 processed in 14s
Processing filter 424
Filter 424 processed in 14s
Processing filter 425
Filter 425 processed in 14s
Processing filter 426
Filter 426 processed in 14s
Processing filter 427
Filter 427 processed in 14s
Processing filter 428
Filter 428 processed in 14s
Processing filter 429
Filter 429 processed in 14s
Processing filter 430
Filter 430 processed in 13s
Processing filter 431
Filter 431 processed in 14s
Processing filter 432
Filter 432 processed in 16s
Processing filter 433
Filter 433 processed in 14s
Processing filter 434
Filter 434 processed in 13s
Processing filter 435
Filter 435 processed in 13s
Processing filter 436
Filter 436 processed in 14s
Processing filter 437
Filter 437 processed in 14s
Processing filter 438
Filter 438 processed in 16s
Processing filter 439
Filter 439 processed in 14s
Processing filter 440
Filter 440 processed in 16s
Processing filter 441
Filter 441 processed in 17s
Processing filter 442
Filter 442 processed in 13s
Processing filter 443
Filter 443 processed in 14s
Processing filter 444
Filter 444 processed in 17s
Processing filter 445
Filter 445 processed in 14s
Processing filter 446
Filter 446 processed in 13s
Processing filter 447
Filter 447 processed in 14s
Processing filter 448
Filter 448 processed in 14s
Processing filter 449
Filter 449 processed in 13s
Processing filter 450
Filter 450 processed in 14s
Processing filter 451
Filter 451 processed in 13s
Processing filter 452
Filter 452 processed in 14s
Processing filter 453
Filter 453 processed in 14s
Processing filter 454
Filter 454 processed in 16s
Processing filter 455
Filter 455 processed in 14s
Processing filter 456
Filter 456 processed in 13s
Processing filter 457
Filter 457 processed in 14s
Processing filter 458
Filter 458 processed in 14s
Processing filter 459
Filter 459 processed in 13s
Processing filter 460
Filter 460 processed in 14s
Processing filter 461
Filter 461 processed in 13s
Processing filter 462
Filter 462 processed in 14s
Processing filter 463
Filter 463 processed in 14s
Processing filter 464
Filter 464 processed in 16s
Processing filter 465
Filter 465 processed in 14s
Processing filter 466
Filter 466 processed in 13s
Processing filter 467
Filter 467 processed in 14s
Processing filter 468
Filter 468 processed in 14s
Processing filter 469
Filter 469 processed in 14s
Processing filter 470
Filter 470 processed in 14s
Processing filter 471
Filter 471 processed in 14s
Processing filter 472
Filter 472 processed in 14s
Processing filter 473
Filter 473 processed in 13s
Processing filter 474
Filter 474 processed in 14s
Processing filter 475
Filter 475 processed in 14s
Processing filter 476
Filter 476 processed in 14s
Processing filter 477
Filter 477 processed in 14s
Processing filter 478
Filter 478 processed in 14s
Processing filter 479
Filter 479 processed in 14s
Processing filter 480
Filter 480 processed in 17s
Processing filter 481
Filter 481 processed in 14s
Processing filter 482
Filter 482 processed in 13s
Processing filter 483
Filter 483 processed in 14s
Processing filter 484
Filter 484 processed in 14s
Processing filter 485
Filter 485 processed in 16s
Processing filter 486
Filter 486 processed in 17s
Processing filter 487
Filter 487 processed in 14s
Processing filter 488
Filter 488 processed in 13s
Processing filter 489
Filter 489 processed in 14s
Processing filter 490
Filter 490 processed in 13s
Processing filter 491
Filter 491 processed in 17s
Processing filter 492
Filter 492 processed in 17s
Processing filter 493
Filter 493 processed in 14s
Processing filter 494
Filter 494 processed in 17s
Processing filter 495
Filter 495 processed in 14s
Processing filter 496
Filter 496 processed in 13s
Processing filter 497
Filter 497 processed in 17s
Processing filter 498
Filter 498 processed in 14s
Processing filter 499
Filter 499 processed in 17s
Processing filter 500
Filter 500 processed in 16s
Processing filter 501
Filter 501 processed in 14s
Processing filter 502
Filter 502 processed in 16s
Processing filter 503
Filter 503 processed in 14s
Processing filter 504
Filter 504 processed in 16s
Processing filter 505
Filter 505 processed in 14s
Processing filter 506
Filter 506 processed in 14s
Processing filter 507
Filter 507 processed in 16s
Processing filter 508
Filter 508 processed in 14s
Processing filter 509
Filter 509 processed in 13s
Processing filter 510
Filter 510 processed in 14s
Processing filter 511
Filter 511 processed in 14s
Out[57]:
<matplotlib.image.AxesImage at 0x130ea3470>

Save Model


In [ ]:
### if we like this model, save the weights

model.save_weights("favorite_model.h5")

MISC

Script for adding augmented images to dataset using keras ImageDataGenerator


In [ ]:
### augmentation script

# train_path = 'C:/Projects/playground/kaggle/fish/data_aug/train/YFT/'

# ## define data preparation
# datagen = ImageDataGenerator(
#                              width_shift_range=.1,
#                              )

# ## fit parameters from data
# generator = datagen.flow_from_directory(
#                            train_path,
#                            target_size=(512, 512),
#                            class_mode=None,
#                            batch_size=335,
#                            shuffle=True,
#                            save_to_dir=train_path,
#                            save_prefix="aug_"
#                            )


# for X_batch, y_batch in generator:
#     break

In [74]:
### Test on single image

path_to_lucy = "C:/Projects/playground/neural_style_transfer/images/inputs/content/loo_grass.jpg"

img = load_img(path_to_lucy)
plt.imshow(img)


Out[74]:
<matplotlib.image.AxesImage at 0x1507eef98>

In [76]:
img = imresize(img, (img_width, img_height))
img = img_to_array(img)

img.shape


Out[76]:
(3L, 250L, 250L)

In [85]:
img = img.reshape(1, 3, 250, 250)

print("This is a {0}.".format(classes[model.predict_classes(img)[0]]))


1/1 [==============================] - 0s
This is a dog.