In [1]:
import warnings
warnings.filterwarnings('ignore')

In [2]:
%matplotlib inline
%pylab inline


Populating the interactive namespace from numpy and matplotlib

In [3]:
import matplotlib.pylab as plt
# https://docs.scipy.org/doc/numpy/reference/routines.math.html
import numpy as np

In [4]:
from distutils.version import StrictVersion

In [5]:
import sklearn

assert StrictVersion(sklearn.__version__ ) >= StrictVersion('0.18.1')

sklearn.__version__


Out[5]:
'0.18.2'

In [6]:
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)

assert StrictVersion(tf.__version__) >= StrictVersion('1.1.0')

tf.__version__


Out[6]:
'1.2.1'

In [7]:
import keras

assert StrictVersion(keras.__version__) >= StrictVersion('2.0.0')

keras.__version__


Using TensorFlow backend.
Out[7]:
'2.0.4'

In [8]:
# !curl -O https://raw.githubusercontent.com/DJCordhose/speed-limit-signs/master/data/speed-limit-signs.zip
# !curl -O https://raw.githubusercontent.com/DJCordhose/speed-limit-signs/master/data/augmented-signs.zip

In [9]:
# https://docs.python.org/3/library/zipfile.html
# from zipfile import ZipFile
# zip = ZipFile(r'speed-limit-signs.zip')
# zip.extractall('.')
# zip = ZipFile(r'augmented-signs.zip')
# zip.extractall('.')

In [10]:
!ls -l speed-limit-signs


total 35012
drwxrwxrwx 0 root root 512 Jul  4 08:44 0
drwxrwxrwx 0 root root 512 Jul  4 08:44 1
drwxrwxrwx 0 root root 512 Jul  4 08:44 2
drwxrwxrwx 0 root root 512 Jul  4 08:44 3
drwxrwxrwx 0 root root 512 Jul  4 08:44 4
drwxrwxrwx 0 root root 512 Jul  4 08:44 5
-rwxrwxrwx 1 root root 380 Jul  4 08:44 README.md

In [11]:
!ls -l augmented-signs


total 458755
drwxrwxrwx 0 root root 512 Jul  4 08:46 0
drwxrwxrwx 0 root root 512 Jul  4 08:46 1
drwxrwxrwx 0 root root 512 Jul  4 08:46 2
drwxrwxrwx 0 root root 512 Jul  4 08:46 3
drwxrwxrwx 0 root root 512 Jul  4 08:46 4
drwxrwxrwx 0 root root 512 Jul  4 08:46 5

In [13]:
import os
import skimage.data

def load_data(data_dir, type=".ppm"):
    """Loads a data set and returns two lists:
    
    images: a list of Numpy arrays, each representing an image.
    labels: a list of numbers that represent the images labels.
    """
    # Get all subdirectories of data_dir. Each represents a label.
    directories = [d for d in os.listdir(data_dir) 
                   if os.path.isdir(os.path.join(data_dir, d))]
    # Loop through the label directories and collect the data in
    # two lists, labels and images.
    labels = []
    images = []
    for d in directories:
        label_dir = os.path.join(data_dir, d)
        file_names = [os.path.join(label_dir, f) for f in os.listdir(label_dir) if f.endswith(type)]
        # For each label, load it's images and add them to the images list.
        # And add the label number (i.e. directory name) to the labels list.
        for f in file_names:
            images.append(skimage.data.imread(f))
            labels.append(int(d))
    return images, labels

In [14]:
# Load datasets.
ROOT_PATH = "./"
# data_dir = os.path.join(ROOT_PATH, "speed-limit-signs")
# images, labels = load_data(data_dir, type=".ppm")

data_dir = os.path.join(ROOT_PATH, "augmented-signs")
images, labels = load_data(data_dir, type=".png")

In [15]:
import matplotlib
import matplotlib.pyplot as plt

def display_images_and_labels(images, labels):
    """Display the first image of each label."""
    unique_labels = set(labels)
    plt.figure(figsize=(15, 15))
    i = 1
    for label in unique_labels:
        # Pick the first image for each label.
        image = images[labels.index(label)]
        plt.subplot(8, 8, i)  # A grid of 8 rows x 8 columns
        plt.axis('off')
        plt.title("Label {0} ({1})".format(label, labels.count(label)))
        i += 1
        _ = plt.imshow(image)
    plt.show()

In [16]:
display_images_and_labels(images, labels)



In [17]:
for image in images[:5]:
    print("shape: {0}, min: {1}, max: {2}".format(image.shape, image.min(), image.max()))


shape: (64, 64, 3), min: 0, max: 255
shape: (64, 64, 3), min: 0, max: 255
shape: (64, 64, 3), min: 0, max: 255
shape: (64, 64, 3), min: 0, max: 255
shape: (64, 64, 3), min: 0, max: 255

In [18]:
import skimage.transform

# Resize images
images64 = [skimage.transform.resize(image, (64, 64))
                for image in images]

In [19]:
for image in images64[:5]:
    print("shape: {0}, min: {1}, max: {2}".format(image.shape, image.min(), image.max()))


shape: (64, 64, 3), min: 0.0, max: 1.0
shape: (64, 64, 3), min: 1.323559997984492e-15, max: 1.0
shape: (64, 64, 3), min: 0.0, max: 1.0
shape: (64, 64, 3), min: 2.953628627081201e-15, max: 1.0
shape: (64, 64, 3), min: 2.2152214703109006e-15, max: 1.0

In [20]:
import numpy as np

y = np.array(labels)
X = np.array(images64)
y.shape, X.shape


Out[20]:
((3790,), (3790, 64, 64, 3))

In [21]:
from keras.utils.np_utils import to_categorical

num_categories = 6

y = to_categorical(y, num_categories)

Zuerst testen wir unser Modell, ob wir es überhaupt trainiert bekommen, indem wir auf einer kleinen Menge von Daten overfitten


In [22]:
from sklearn.model_selection import train_test_split

In [105]:
# http://cs231n.github.io/neural-networks-3/#sanitycheck
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99, random_state=42)
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.90, random_state=42)

In [106]:
X_train.shape, y_train.shape


Out[106]:
((37, 64, 64, 3), (37, 6))

In [107]:
from keras.models import Model
from keras.layers import Dense, Dropout, Activation, Flatten, Input
from keras.layers import Convolution2D, MaxPooling2D

# input tensor for a 3-channel 64x64 image
inputs = Input(shape=(64, 64, 3))

# one block of convolutional layers

# 32 filters with a 3x3 kernel, outputs 64x64x32 tensor
x = Convolution2D(32, 3, 3, activation='relu')(inputs)
x = Convolution2D(32, 3, 3, activation='relu')(x)
x = Convolution2D(32, 3, 3, activation='relu')(x)

# max pooling with 2x2 window, reducing data to a fourth, reduces risk of overfitting
x = MaxPooling2D(pool_size=(2, 2))(x)

# drops 50% of all nodes at training (but not for test/prediction), also reduces risk of overfitting
# x = Dropout(0.5)(x)
# http://cs231n.github.io/neural-networks-2/#reg

# one more block
x = Convolution2D(64, 3, 3, activation='relu')(x)
x = Convolution2D(64, 3, 3, activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# x = Dropout(0.5)(x)

# one more block
x = Convolution2D(128, 3, 3, activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# x = Dropout(0.5)(x)

x = Flatten()(x)
x = Dense(256, activation='relu')(x)
# x = Dropout(0.50)(x)

# softmax activation, 6 categories
predictions = Dense(6, activation='softmax')(x)
model = Model(input=inputs, output=predictions)
model.summary()
model.compile(optimizer='rmsprop',
              loss='categorical_crossentropy',
              metrics=['accuracy'])

In [111]:
!rm -rf ../tmp/tf_log
!rm -rf ../tmp/model-checkpoints

!mkdir ../tmp/model-checkpoints
!mkdir ../tmp/tf_log

In [112]:
checkpoint_callback = keras.callbacks.ModelCheckpoint('../tmp/model-checkpoints/weights.epoch-{epoch:02d}-val_loss-{val_loss:.2f}.hdf5');

In [113]:
early_stopping_callback = keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, verbose=1)

In [114]:
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tensorboard/README.md
# https://keras.io/callbacks/#tensorboard
# http://stackoverflow.com/questions/42112260/how-do-i-use-the-tensorboard-callback-of-keras
tb_callback = keras.callbacks.TensorBoard(log_dir='../tmp/tf_log')
#                                          histogram_freq=1, write_graph=True, write_images=True)
#                                          histogram_freq=1, write_graph=True, write_images=True)
# tbCallBack = keras.callbacks.TensorBoard(log_dir='./logs', histogram_freq=0, batch_size=32, write_graph=True, write_grads=False, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None)
# To start tensorboard
# tensorboard --logdir=/mnt/c/Users/olive/Development/ml/tf_log
# open http://localhost:6006

In [115]:
# model.fit(X_train, y_train, epochs=50, batch_size=5, validation_split=0.3)
model.fit(X_train, y_train, epochs=50, batch_size=5, validation_split=0.3, 
#           callbacks=[tb_callback, checkpoint_callback, early_stopping_callback])
          callbacks=[tb_callback]) 
# model.fit(X_train, y_train, epochs=50, batch_size=5, validation_split=0.3, callbacks=[checkpointCallback])


Train on 25 samples, validate on 12 samples
Epoch 1/50
25/25 [==============================] - 2s - loss: 5.5456 - acc: 0.2800 - val_loss: 1.7867 - val_acc: 0.1667
Epoch 2/50
25/25 [==============================] - 2s - loss: 1.7988 - acc: 0.2000 - val_loss: 1.7906 - val_acc: 0.0833
Epoch 3/50
25/25 [==============================] - 2s - loss: 1.7911 - acc: 0.1200 - val_loss: 1.8030 - val_acc: 0.1667
Epoch 4/50
25/25 [==============================] - 2s - loss: 1.7593 - acc: 0.2000 - val_loss: 1.7897 - val_acc: 0.4167
Epoch 5/50
25/25 [==============================] - 2s - loss: 1.7709 - acc: 0.3200 - val_loss: 2.8932 - val_acc: 0.0833
Epoch 6/50
25/25 [==============================] - 2s - loss: 1.8209 - acc: 0.2000 - val_loss: 1.7847 - val_acc: 0.0833
Epoch 7/50
25/25 [==============================] - 2s - loss: 1.8550 - acc: 0.2800 - val_loss: 1.7936 - val_acc: 0.0833
Epoch 8/50
25/25 [==============================] - 2s - loss: 1.7728 - acc: 0.2400 - val_loss: 1.7755 - val_acc: 0.0833
Epoch 9/50
25/25 [==============================] - 2s - loss: 1.7465 - acc: 0.2400 - val_loss: 1.7786 - val_acc: 0.0000e+00
Epoch 10/50
25/25 [==============================] - 2s - loss: 1.8840 - acc: 0.2000 - val_loss: 1.7904 - val_acc: 0.0000e+00
Epoch 11/50
25/25 [==============================] - 2s - loss: 1.7024 - acc: 0.3600 - val_loss: 1.8692 - val_acc: 0.4167
Epoch 12/50
25/25 [==============================] - 2s - loss: 1.5507 - acc: 0.3200 - val_loss: 3.2505 - val_acc: 0.4167
Epoch 13/50
25/25 [==============================] - 2s - loss: 1.6289 - acc: 0.3600 - val_loss: 2.6982 - val_acc: 0.1667
Epoch 14/50
25/25 [==============================] - 2s - loss: 1.8527 - acc: 0.5200 - val_loss: 2.0418 - val_acc: 0.0000e+00
Epoch 15/50
25/25 [==============================] - 2s - loss: 1.3843 - acc: 0.5200 - val_loss: 2.2903 - val_acc: 0.0833
Epoch 16/50
25/25 [==============================] - 2s - loss: 1.0314 - acc: 0.5200 - val_loss: 3.8031 - val_acc: 0.0833
Epoch 17/50
25/25 [==============================] - 2s - loss: 1.0556 - acc: 0.6400 - val_loss: 3.3116 - val_acc: 0.1667
Epoch 18/50
25/25 [==============================] - 2s - loss: 0.6527 - acc: 0.8000 - val_loss: 2.5759 - val_acc: 0.1667
Epoch 19/50
25/25 [==============================] - 3s - loss: 0.5907 - acc: 0.8000 - val_loss: 8.9122 - val_acc: 0.1667
Epoch 20/50
25/25 [==============================] - 3s - loss: 0.3214 - acc: 0.8400 - val_loss: 5.6812 - val_acc: 0.1667
Epoch 21/50
25/25 [==============================] - 2s - loss: 0.0415 - acc: 1.0000 - val_loss: 7.3371 - val_acc: 0.1667
Epoch 22/50
25/25 [==============================] - 3s - loss: 0.0044 - acc: 1.0000 - val_loss: 8.2013 - val_acc: 0.1667
Epoch 23/50
25/25 [==============================] - 3s - loss: 0.0014 - acc: 1.0000 - val_loss: 8.6878 - val_acc: 0.1667
Epoch 24/50
25/25 [==============================] - 3s - loss: 5.5204e-04 - acc: 1.0000 - val_loss: 8.9220 - val_acc: 0.1667
Epoch 25/50
25/25 [==============================] - 3s - loss: 2.9930e-04 - acc: 1.0000 - val_loss: 9.1265 - val_acc: 0.1667
Epoch 26/50
25/25 [==============================] - 2s - loss: 1.7758e-04 - acc: 1.0000 - val_loss: 9.3510 - val_acc: 0.1667
Epoch 27/50
25/25 [==============================] - 2s - loss: 1.0138e-04 - acc: 1.0000 - val_loss: 9.5346 - val_acc: 0.1667
Epoch 28/50
25/25 [==============================] - 2s - loss: 6.2081e-05 - acc: 1.0000 - val_loss: 9.6659 - val_acc: 0.1667
Epoch 29/50
25/25 [==============================] - 2s - loss: 3.8289e-05 - acc: 1.0000 - val_loss: 9.8619 - val_acc: 0.1667
Epoch 30/50
25/25 [==============================] - 2s - loss: 2.6988e-05 - acc: 1.0000 - val_loss: 9.9801 - val_acc: 0.1667
Epoch 31/50
25/25 [==============================] - 3s - loss: 1.6020e-05 - acc: 1.0000 - val_loss: 10.0409 - val_acc: 0.1667
Epoch 32/50
25/25 [==============================] - 2s - loss: 1.0321e-05 - acc: 1.0000 - val_loss: 10.1680 - val_acc: 0.1667
Epoch 33/50
25/25 [==============================] - 2s - loss: 6.6639e-06 - acc: 1.0000 - val_loss: 10.2010 - val_acc: 0.1667
Epoch 34/50
25/25 [==============================] - 2s - loss: 4.2701e-06 - acc: 1.0000 - val_loss: 10.2969 - val_acc: 0.1667
Epoch 35/50
25/25 [==============================] - 2s - loss: 2.8753e-06 - acc: 1.0000 - val_loss: 10.3451 - val_acc: 0.1667
Epoch 36/50
25/25 [==============================] - 2s - loss: 1.9002e-06 - acc: 1.0000 - val_loss: 10.6020 - val_acc: 0.1667
Epoch 37/50
25/25 [==============================] - 2s - loss: 6.6280e-07 - acc: 1.0000 - val_loss: 10.8771 - val_acc: 0.1667
Epoch 38/50
25/25 [==============================] - 2s - loss: 2.2411e-07 - acc: 1.0000 - val_loss: 11.1231 - val_acc: 0.1667
Epoch 39/50
25/25 [==============================] - 2s - loss: 1.4305e-07 - acc: 1.0000 - val_loss: 11.2623 - val_acc: 0.1667
Epoch 40/50
25/25 [==============================] - 2s - loss: 1.2875e-07 - acc: 1.0000 - val_loss: 11.3575 - val_acc: 0.1667
Epoch 41/50
25/25 [==============================] - 2s - loss: 1.2159e-07 - acc: 1.0000 - val_loss: 11.4287 - val_acc: 0.1667
Epoch 42/50
25/25 [==============================] - 3s - loss: 1.1921e-07 - acc: 1.0000 - val_loss: 11.4961 - val_acc: 0.1667
Epoch 43/50
25/25 [==============================] - 3s - loss: 1.1921e-07 - acc: 1.0000 - val_loss: 11.4961 - val_acc: 0.1667
Epoch 44/50
25/25 [==============================] - 2s - loss: 1.1921e-07 - acc: 1.0000 - val_loss: 11.4961 - val_acc: 0.1667
Epoch 45/50
25/25 [==============================] - 2s - loss: 1.1921e-07 - acc: 1.0000 - val_loss: 11.4961 - val_acc: 0.1667
Epoch 46/50
25/25 [==============================] - 2s - loss: 1.1921e-07 - acc: 1.0000 - val_loss: 11.4961 - val_acc: 0.1667
Epoch 47/50
25/25 [==============================] - 2s - loss: 1.1921e-07 - acc: 1.0000 - val_loss: 11.4961 - val_acc: 0.1667
Epoch 48/50
25/25 [==============================] - 2s - loss: 1.1921e-07 - acc: 1.0000 - val_loss: 11.4961 - val_acc: 0.1667
Epoch 49/50
25/25 [==============================] - 2s - loss: 1.1921e-07 - acc: 1.0000 - val_loss: 11.4961 - val_acc: 0.1667
Epoch 50/50
25/25 [==============================] - 2s - loss: 1.1921e-07 - acc: 1.0000 - val_loss: 11.4961 - val_acc: 0.1667
Out[115]:
<keras.callbacks.History at 0x7f5990c1eb00>

Now same model, but with Dropouts


In [167]:
from keras.models import Model
from keras.layers import Dense, Dropout, Activation, Flatten, Input
from keras.layers import Convolution2D, MaxPooling2D

drop_out = 0.5
# drop_out = 0.25
# drop_out = 0.0

# input tensor for a 3-channel 64x64 image
inputs = Input(shape=(64, 64, 3))

# one block of convolutional layers
x = Convolution2D(32, 3, 3, activation='relu')(inputs)
x = Convolution2D(32, 3, 3, activation='relu')(x)
x = Convolution2D(32, 3, 3, activation='relu')(x)

# max pooling with 2x2 window, reducing data to a fourth, reduces risk of overfitting
x = MaxPooling2D(pool_size=(2, 2))(x)

# drops 25% / 50% of all nodes at training (but not for test/prediction), also reduces risk of overfitting
x = Dropout(drop_out)(x)
# http://cs231n.github.io/neural-networks-2/#reg

# one more block
x = Convolution2D(64, 3, 3, activation='relu')(x)
x = Convolution2D(64, 3, 3, activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(drop_out)(x)

# one more block
x = Convolution2D(128, 3, 3, activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(drop_out)(x)

x = Flatten()(x)
x = Dense(256, activation='relu')(x)
x = Dropout(drop_out)(x)

# softmax activation, 6 categories
predictions = Dense(6, activation='softmax')(x)
model = Model(input=inputs, output=predictions)
model.summary()
model.compile(optimizer='rmsprop',
              loss='categorical_crossentropy',
              metrics=['accuracy'])


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_19 (InputLayer)        (None, 64, 64, 3)         0         
_________________________________________________________________
conv2d_108 (Conv2D)          (None, 62, 62, 32)        896       
_________________________________________________________________
conv2d_109 (Conv2D)          (None, 60, 60, 32)        9248      
_________________________________________________________________
conv2d_110 (Conv2D)          (None, 58, 58, 32)        9248      
_________________________________________________________________
max_pooling2d_53 (MaxPooling (None, 29, 29, 32)        0         
_________________________________________________________________
dropout_19 (Dropout)         (None, 29, 29, 32)        0         
_________________________________________________________________
conv2d_111 (Conv2D)          (None, 27, 27, 64)        18496     
_________________________________________________________________
conv2d_112 (Conv2D)          (None, 25, 25, 64)        36928     
_________________________________________________________________
max_pooling2d_54 (MaxPooling (None, 12, 12, 64)        0         
_________________________________________________________________
dropout_20 (Dropout)         (None, 12, 12, 64)        0         
_________________________________________________________________
conv2d_113 (Conv2D)          (None, 10, 10, 128)       73856     
_________________________________________________________________
max_pooling2d_55 (MaxPooling (None, 5, 5, 128)         0         
_________________________________________________________________
dropout_21 (Dropout)         (None, 5, 5, 128)         0         
_________________________________________________________________
flatten_19 (Flatten)         (None, 3200)              0         
_________________________________________________________________
dense_37 (Dense)             (None, 256)               819456    
_________________________________________________________________
dropout_22 (Dropout)         (None, 256)               0         
_________________________________________________________________
dense_38 (Dense)             (None, 6)                 1542      
=================================================================
Total params: 969,670
Trainable params: 969,670
Non-trainable params: 0
_________________________________________________________________

In [148]:
!rm -rf ../tmp/tf_log
!rm -rf ../tmp/model-checkpoints

!mkdir ../tmp/model-checkpoints
!mkdir ../tmp/tf_log

In [134]:
# model.fit(X_train, y_train, epochs=50, batch_size=5, validation_split=0.3)
model.fit(X_train, y_train, epochs=50, batch_size=5, validation_split=0.3, 
#           callbacks=[tb_callback, checkpoint_callback, early_stopping_callback])
          callbacks=[tb_callback]) 
# model.fit(X_train, y_train, epochs=50, batch_size=5, validation_split=0.3, callbacks=[checkpointCallback])


Train on 25 samples, validate on 12 samples
Epoch 1/50
25/25 [==============================] - 2s - loss: 4.9244 - acc: 0.0800 - val_loss: 1.7926 - val_acc: 0.0833
Epoch 2/50
25/25 [==============================] - 2s - loss: 1.7915 - acc: 0.1600 - val_loss: 1.7912 - val_acc: 0.0833
Epoch 3/50
25/25 [==============================] - 2s - loss: 1.7890 - acc: 0.2000 - val_loss: 1.7900 - val_acc: 0.0833
Epoch 4/50
25/25 [==============================] - 2s - loss: 1.7866 - acc: 0.2400 - val_loss: 1.7888 - val_acc: 0.0833
Epoch 5/50
25/25 [==============================] - 2s - loss: 1.7905 - acc: 0.2000 - val_loss: 1.7875 - val_acc: 0.0833
Epoch 6/50
25/25 [==============================] - 2s - loss: 2.1429 - acc: 0.2800 - val_loss: 1.7788 - val_acc: 0.4167
Epoch 7/50
25/25 [==============================] - 2s - loss: 1.7794 - acc: 0.2000 - val_loss: 1.7801 - val_acc: 0.4167
Epoch 8/50
25/25 [==============================] - 2s - loss: 1.7579 - acc: 0.2000 - val_loss: 1.7800 - val_acc: 0.0833
Epoch 9/50
25/25 [==============================] - 2s - loss: 1.7725 - acc: 0.1600 - val_loss: 1.7858 - val_acc: 0.0833
Epoch 10/50
25/25 [==============================] - 2s - loss: 1.7424 - acc: 0.2000 - val_loss: 1.7795 - val_acc: 0.3333
Epoch 11/50
25/25 [==============================] - 2s - loss: 1.7494 - acc: 0.2000 - val_loss: 1.8115 - val_acc: 0.0833
Epoch 12/50
25/25 [==============================] - 2s - loss: 2.0720 - acc: 0.2800 - val_loss: 1.7737 - val_acc: 0.4167
Epoch 13/50
25/25 [==============================] - 2s - loss: 1.7533 - acc: 0.2000 - val_loss: 1.7795 - val_acc: 0.3333
Epoch 14/50
25/25 [==============================] - 2s - loss: 1.7751 - acc: 0.4000 - val_loss: 1.7814 - val_acc: 0.3333
Epoch 15/50
25/25 [==============================] - 2s - loss: 1.7606 - acc: 0.2000 - val_loss: 1.7689 - val_acc: 0.4167
Epoch 16/50
25/25 [==============================] - 2s - loss: 1.6856 - acc: 0.2400 - val_loss: 1.7616 - val_acc: 0.3333
Epoch 17/50
25/25 [==============================] - 2s - loss: 1.7809 - acc: 0.2400 - val_loss: 1.7494 - val_acc: 0.4167
Epoch 18/50
25/25 [==============================] - 2s - loss: 1.6905 - acc: 0.3200 - val_loss: 1.7690 - val_acc: 0.4167
Epoch 19/50
25/25 [==============================] - 2s - loss: 1.5323 - acc: 0.4800 - val_loss: 1.9767 - val_acc: 0.1667
Epoch 20/50
25/25 [==============================] - 2s - loss: 1.4473 - acc: 0.5200 - val_loss: 2.4929 - val_acc: 0.1667
Epoch 21/50
25/25 [==============================] - 2s - loss: 1.1405 - acc: 0.5600 - val_loss: 2.8102 - val_acc: 0.0000e+00
Epoch 22/50
25/25 [==============================] - 2s - loss: 1.1946 - acc: 0.6400 - val_loss: 2.2102 - val_acc: 0.0833
Epoch 23/50
25/25 [==============================] - 2s - loss: 1.0379 - acc: 0.5200 - val_loss: 2.6204 - val_acc: 0.1667
Epoch 24/50
25/25 [==============================] - 2s - loss: 0.8170 - acc: 0.6800 - val_loss: 3.6260 - val_acc: 0.1667
Epoch 25/50
25/25 [==============================] - 2s - loss: 0.9195 - acc: 0.7200 - val_loss: 3.0341 - val_acc: 0.0833
Epoch 26/50
25/25 [==============================] - 2s - loss: 0.6328 - acc: 0.7600 - val_loss: 3.4470 - val_acc: 0.0833
Epoch 27/50
25/25 [==============================] - 2s - loss: 0.4871 - acc: 0.8000 - val_loss: 3.5133 - val_acc: 0.0833
Epoch 28/50
25/25 [==============================] - 2s - loss: 0.2701 - acc: 0.8800 - val_loss: 4.5966 - val_acc: 0.0833
Epoch 29/50
25/25 [==============================] - 2s - loss: 0.4244 - acc: 0.8800 - val_loss: 3.6633 - val_acc: 0.2500
Epoch 30/50
25/25 [==============================] - 2s - loss: 0.0418 - acc: 1.0000 - val_loss: 4.7085 - val_acc: 0.2500
Epoch 31/50
25/25 [==============================] - 3s - loss: 0.0407 - acc: 1.0000 - val_loss: 5.3374 - val_acc: 0.1667
Epoch 32/50
25/25 [==============================] - 3s - loss: 0.2360 - acc: 0.9600 - val_loss: 4.9668 - val_acc: 0.1667
Epoch 33/50
25/25 [==============================] - 3s - loss: 0.0055 - acc: 1.0000 - val_loss: 5.1281 - val_acc: 0.2500
Epoch 34/50
25/25 [==============================] - 2s - loss: 0.0023 - acc: 1.0000 - val_loss: 5.4664 - val_acc: 0.1667
Epoch 35/50
25/25 [==============================] - 2s - loss: 0.0043 - acc: 1.0000 - val_loss: 6.2310 - val_acc: 0.1667
Epoch 36/50
25/25 [==============================] - 2s - loss: 0.0062 - acc: 1.0000 - val_loss: 5.9868 - val_acc: 0.1667
Epoch 37/50
25/25 [==============================] - 2s - loss: 0.8665 - acc: 0.8400 - val_loss: 7.9179 - val_acc: 0.4167
Epoch 38/50
25/25 [==============================] - 2s - loss: 5.6600 - acc: 0.6000 - val_loss: 3.9660 - val_acc: 0.1667
Epoch 39/50
25/25 [==============================] - 2s - loss: 0.4729 - acc: 0.8000 - val_loss: 5.7903 - val_acc: 0.2500
Epoch 40/50
25/25 [==============================] - 2s - loss: 0.1767 - acc: 0.8800 - val_loss: 7.2836 - val_acc: 0.1667
Epoch 41/50
25/25 [==============================] - 2s - loss: 0.1412 - acc: 0.9600 - val_loss: 5.7648 - val_acc: 0.1667
Epoch 42/50
25/25 [==============================] - 2s - loss: 0.0392 - acc: 1.0000 - val_loss: 6.4457 - val_acc: 0.1667
Epoch 43/50
25/25 [==============================] - 2s - loss: 0.0031 - acc: 1.0000 - val_loss: 6.6453 - val_acc: 0.1667
Epoch 44/50
25/25 [==============================] - 2s - loss: 0.0436 - acc: 1.0000 - val_loss: 8.2976 - val_acc: 0.1667
Epoch 45/50
25/25 [==============================] - 3s - loss: 0.0990 - acc: 0.9600 - val_loss: 9.6565 - val_acc: 0.1667
Epoch 46/50
25/25 [==============================] - 4s - loss: 0.3749 - acc: 0.9600 - val_loss: 7.6089 - val_acc: 0.0833
Epoch 47/50
25/25 [==============================] - 2s - loss: 0.0085 - acc: 1.0000 - val_loss: 7.7699 - val_acc: 0.0833
Epoch 48/50
25/25 [==============================] - 2s - loss: 5.8438e-04 - acc: 1.0000 - val_loss: 8.1787 - val_acc: 0.1667
Epoch 49/50
25/25 [==============================] - 2s - loss: 6.4339e-04 - acc: 1.0000 - val_loss: 8.3477 - val_acc: 0.1667
Epoch 50/50
25/25 [==============================] - 4s - loss: 9.3539e-05 - acc: 1.0000 - val_loss: 8.3759 - val_acc: 0.1667
Out[134]:
<keras.callbacks.History at 0x7f595c3b5748>

With Dropout, it typically refuses to overfit easily

Let us try it with a lot of data, refusing to overfit acutually is a feature, not a bug


In [168]:
# training on a smaller subset due to lack of processing power, need to try on more powerful machine with all data

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.9, random_state=3)
# We can use all our data for training, because we have a completely separate set for testing later
# X_train = X
# y_train = y

In [169]:
X_train.shape, y_train.shape


Out[169]:
((379, 64, 64, 3), (379, 6))

In [170]:
model = Model(input=inputs, output=predictions)
model.compile(optimizer='rmsprop',
              loss='categorical_crossentropy',
              metrics=['accuracy'])

In [171]:
from datetime import tzinfo, timedelta, datetime

In [174]:
!rm -rf ../tmp/tf_log
!rm -rf ../tmp/model-checkpoints

!mkdir ../tmp/model-checkpoints
!mkdir ../tmp/tf_log

In [175]:
print(datetime.utcnow().isoformat())
model.fit(X_train, y_train, epochs=150, batch_size=100, validation_split=0.3, 
#           callbacks=[tb_callback, early_stopping_callback])
          callbacks=[tb_callback])
# model.fit(X_train, y_train, epochs=50, batch_size=200, validation_split=0.3)
print(datetime.utcnow().isoformat())


2017-07-09T18:55:57.385610
Train on 265 samples, validate on 114 samples
Epoch 1/150
265/265 [==============================] - 12s - loss: 1.7882 - acc: 0.1962 - val_loss: 1.7867 - val_acc: 0.1667
Epoch 2/150
265/265 [==============================] - 12s - loss: 1.7766 - acc: 0.2000 - val_loss: 1.7782 - val_acc: 0.1667
Epoch 3/150
265/265 [==============================] - 11s - loss: 1.7648 - acc: 0.1962 - val_loss: 1.7773 - val_acc: 0.1667
Epoch 4/150
265/265 [==============================] - 12s - loss: 1.7783 - acc: 0.1887 - val_loss: 1.7739 - val_acc: 0.1667
Epoch 5/150
265/265 [==============================] - 11s - loss: 1.7912 - acc: 0.2038 - val_loss: 1.7802 - val_acc: 0.1667
Epoch 6/150
265/265 [==============================] - 11s - loss: 1.7601 - acc: 0.2264 - val_loss: 1.7737 - val_acc: 0.1667
Epoch 7/150
265/265 [==============================] - 12s - loss: 1.7523 - acc: 0.2302 - val_loss: 1.7778 - val_acc: 0.2544
Epoch 8/150
265/265 [==============================] - 12s - loss: 1.7565 - acc: 0.2038 - val_loss: 1.7619 - val_acc: 0.1667
Epoch 9/150
265/265 [==============================] - 12s - loss: 1.7262 - acc: 0.2453 - val_loss: 1.7584 - val_acc: 0.1667
Epoch 10/150
265/265 [==============================] - 13s - loss: 1.7316 - acc: 0.2679 - val_loss: 1.7732 - val_acc: 0.1667
Epoch 11/150
265/265 [==============================] - 12s - loss: 1.7568 - acc: 0.2491 - val_loss: 1.7485 - val_acc: 0.2281
Epoch 12/150
265/265 [==============================] - 10s - loss: 2.2043 - acc: 0.2302 - val_loss: 1.7682 - val_acc: 0.1667
Epoch 13/150
265/265 [==============================] - 13s - loss: 1.7490 - acc: 0.2151 - val_loss: 1.7676 - val_acc: 0.1667
Epoch 14/150
265/265 [==============================] - 12s - loss: 1.7388 - acc: 0.2302 - val_loss: 1.7652 - val_acc: 0.1667
Epoch 15/150
265/265 [==============================] - 10s - loss: 1.7329 - acc: 0.2302 - val_loss: 1.7694 - val_acc: 0.2105
Epoch 16/150
265/265 [==============================] - 10s - loss: 1.7269 - acc: 0.2566 - val_loss: 1.7618 - val_acc: 0.2105
Epoch 17/150
265/265 [==============================] - 10s - loss: 1.7099 - acc: 0.2792 - val_loss: 1.7478 - val_acc: 0.2193
Epoch 18/150
265/265 [==============================] - 11s - loss: 1.7778 - acc: 0.2830 - val_loss: 1.7906 - val_acc: 0.1491
Epoch 19/150
265/265 [==============================] - 11s - loss: 1.7647 - acc: 0.2264 - val_loss: 1.7701 - val_acc: 0.2281
Epoch 20/150
265/265 [==============================] - 10s - loss: 1.7114 - acc: 0.3283 - val_loss: 1.7461 - val_acc: 0.2281
Epoch 21/150
265/265 [==============================] - 10s - loss: 1.7163 - acc: 0.2906 - val_loss: 1.7588 - val_acc: 0.2018
Epoch 22/150
265/265 [==============================] - 11s - loss: 1.6855 - acc: 0.3321 - val_loss: 1.7404 - val_acc: 0.1930
Epoch 23/150
265/265 [==============================] - 11s - loss: 1.6908 - acc: 0.3132 - val_loss: 1.7915 - val_acc: 0.1667
Epoch 24/150
265/265 [==============================] - 12s - loss: 1.7051 - acc: 0.2717 - val_loss: 1.7420 - val_acc: 0.2281
Epoch 25/150
265/265 [==============================] - 13s - loss: 1.6814 - acc: 0.3321 - val_loss: 1.7792 - val_acc: 0.1579
Epoch 26/150
265/265 [==============================] - 15s - loss: 1.6705 - acc: 0.3094 - val_loss: 1.7392 - val_acc: 0.2193
Epoch 27/150
265/265 [==============================] - 14s - loss: 1.6941 - acc: 0.3170 - val_loss: 1.7425 - val_acc: 0.2193
Epoch 28/150
265/265 [==============================] - 11s - loss: 1.6343 - acc: 0.3321 - val_loss: 1.7765 - val_acc: 0.1579
Epoch 29/150
265/265 [==============================] - 10s - loss: 1.6630 - acc: 0.3170 - val_loss: 1.7415 - val_acc: 0.2105
Epoch 30/150
265/265 [==============================] - 10s - loss: 1.6218 - acc: 0.3585 - val_loss: 1.7843 - val_acc: 0.1667
Epoch 31/150
265/265 [==============================] - 10s - loss: 1.7374 - acc: 0.3019 - val_loss: 1.7601 - val_acc: 0.2193
Epoch 32/150
265/265 [==============================] - 12s - loss: 1.6570 - acc: 0.3057 - val_loss: 1.7737 - val_acc: 0.2018
Epoch 33/150
265/265 [==============================] - 12s - loss: 1.6782 - acc: 0.3170 - val_loss: 1.7316 - val_acc: 0.2018
Epoch 34/150
265/265 [==============================] - 12s - loss: 1.6407 - acc: 0.3283 - val_loss: 1.7374 - val_acc: 0.2105
Epoch 35/150
265/265 [==============================] - 12s - loss: 1.6199 - acc: 0.3547 - val_loss: 1.7241 - val_acc: 0.2281
Epoch 36/150
265/265 [==============================] - 13s - loss: 1.7253 - acc: 0.2717 - val_loss: 1.7682 - val_acc: 0.2018
Epoch 37/150
265/265 [==============================] - 12s - loss: 1.6893 - acc: 0.3321 - val_loss: 1.7424 - val_acc: 0.2018
Epoch 38/150
265/265 [==============================] - 12s - loss: 1.5736 - acc: 0.3736 - val_loss: 1.7542 - val_acc: 0.2281
Epoch 39/150
265/265 [==============================] - 12s - loss: 1.6257 - acc: 0.3811 - val_loss: 1.7730 - val_acc: 0.1754
Epoch 40/150
265/265 [==============================] - 13s - loss: 1.7675 - acc: 0.2943 - val_loss: 1.7351 - val_acc: 0.2018
Epoch 41/150
265/265 [==============================] - 13s - loss: 1.6155 - acc: 0.3623 - val_loss: 1.7270 - val_acc: 0.1930
Epoch 42/150
265/265 [==============================] - 13s - loss: 1.5673 - acc: 0.3849 - val_loss: 1.7384 - val_acc: 0.2193
Epoch 43/150
265/265 [==============================] - 13s - loss: 1.6466 - acc: 0.3208 - val_loss: 1.7349 - val_acc: 0.2368
Epoch 44/150
265/265 [==============================] - 13s - loss: 1.5788 - acc: 0.3698 - val_loss: 1.8432 - val_acc: 0.1754
Epoch 45/150
265/265 [==============================] - 13s - loss: 1.6429 - acc: 0.3472 - val_loss: 1.7338 - val_acc: 0.2281
Epoch 46/150
265/265 [==============================] - 12s - loss: 1.5803 - acc: 0.3472 - val_loss: 1.7721 - val_acc: 0.1842
Epoch 47/150
265/265 [==============================] - 13s - loss: 1.7524 - acc: 0.2981 - val_loss: 1.7411 - val_acc: 0.2105
Epoch 48/150
265/265 [==============================] - 13s - loss: 1.6080 - acc: 0.3660 - val_loss: 1.7054 - val_acc: 0.2105
Epoch 49/150
265/265 [==============================] - 12s - loss: 1.5142 - acc: 0.3925 - val_loss: 1.9883 - val_acc: 0.1754
Epoch 50/150
265/265 [==============================] - 13s - loss: 1.7603 - acc: 0.2755 - val_loss: 1.7341 - val_acc: 0.2456
Epoch 51/150
265/265 [==============================] - 13s - loss: 1.5718 - acc: 0.3396 - val_loss: 1.7218 - val_acc: 0.2105
Epoch 52/150
265/265 [==============================] - 13s - loss: 1.4773 - acc: 0.4226 - val_loss: 1.7130 - val_acc: 0.2368
Epoch 53/150
265/265 [==============================] - 13s - loss: 1.5072 - acc: 0.4189 - val_loss: 1.7876 - val_acc: 0.2105
Epoch 54/150
265/265 [==============================] - 13s - loss: 1.6063 - acc: 0.3396 - val_loss: 1.7131 - val_acc: 0.2456
Epoch 55/150
265/265 [==============================] - 13s - loss: 1.5222 - acc: 0.4189 - val_loss: 1.7618 - val_acc: 0.2193
Epoch 56/150
265/265 [==============================] - 13s - loss: 1.5058 - acc: 0.3925 - val_loss: 1.7098 - val_acc: 0.2544
Epoch 57/150
265/265 [==============================] - 13s - loss: 1.4449 - acc: 0.4264 - val_loss: 1.7470 - val_acc: 0.2281
Epoch 58/150
265/265 [==============================] - 13s - loss: 1.3879 - acc: 0.4453 - val_loss: 1.7388 - val_acc: 0.2456
Epoch 59/150
265/265 [==============================] - 13s - loss: 1.3564 - acc: 0.4340 - val_loss: 1.8760 - val_acc: 0.2281
Epoch 60/150
265/265 [==============================] - 13s - loss: 1.8034 - acc: 0.3245 - val_loss: 1.7253 - val_acc: 0.2982
Epoch 61/150
265/265 [==============================] - 13s - loss: 1.5537 - acc: 0.4000 - val_loss: 1.7537 - val_acc: 0.2544
Epoch 62/150
265/265 [==============================] - 13s - loss: 1.4233 - acc: 0.4792 - val_loss: 1.7306 - val_acc: 0.2193
Epoch 63/150
265/265 [==============================] - 13s - loss: 1.4341 - acc: 0.4113 - val_loss: 1.7946 - val_acc: 0.2018
Epoch 64/150
265/265 [==============================] - 13s - loss: 1.4998 - acc: 0.4038 - val_loss: 1.7574 - val_acc: 0.2193
Epoch 65/150
265/265 [==============================] - 13s - loss: 1.4879 - acc: 0.4000 - val_loss: 1.7248 - val_acc: 0.2456
Epoch 66/150
265/265 [==============================] - 13s - loss: 1.3303 - acc: 0.4792 - val_loss: 1.7529 - val_acc: 0.2193
Epoch 67/150
265/265 [==============================] - 12s - loss: 1.2544 - acc: 0.5358 - val_loss: 1.8405 - val_acc: 0.2368
Epoch 68/150
265/265 [==============================] - 12s - loss: 1.4990 - acc: 0.4566 - val_loss: 1.7664 - val_acc: 0.1667
Epoch 69/150
265/265 [==============================] - 13s - loss: 1.4863 - acc: 0.3774 - val_loss: 1.7418 - val_acc: 0.2719
Epoch 70/150
265/265 [==============================] - 14s - loss: 1.2603 - acc: 0.5774 - val_loss: 1.8702 - val_acc: 0.2368
Epoch 71/150
265/265 [==============================] - 12s - loss: 1.4112 - acc: 0.4151 - val_loss: 1.7263 - val_acc: 0.2281
Epoch 72/150
265/265 [==============================] - 13s - loss: 1.2209 - acc: 0.5434 - val_loss: 1.8223 - val_acc: 0.2544
Epoch 73/150
265/265 [==============================] - 13s - loss: 1.3403 - acc: 0.4642 - val_loss: 1.8386 - val_acc: 0.1667
Epoch 74/150
265/265 [==============================] - 15s - loss: 1.4869 - acc: 0.4226 - val_loss: 1.7535 - val_acc: 0.2281
Epoch 75/150
265/265 [==============================] - 12s - loss: 1.2858 - acc: 0.5358 - val_loss: 1.7588 - val_acc: 0.2193
Epoch 76/150
265/265 [==============================] - 12s - loss: 1.1552 - acc: 0.5585 - val_loss: 1.8094 - val_acc: 0.2281
Epoch 77/150
265/265 [==============================] - 13s - loss: 1.4065 - acc: 0.5019 - val_loss: 1.7970 - val_acc: 0.2105
Epoch 78/150
265/265 [==============================] - 13s - loss: 1.3876 - acc: 0.4377 - val_loss: 1.7348 - val_acc: 0.2544
Epoch 79/150
265/265 [==============================] - 13s - loss: 1.1112 - acc: 0.5849 - val_loss: 1.9090 - val_acc: 0.2193
Epoch 80/150
265/265 [==============================] - 13s - loss: 1.1337 - acc: 0.5774 - val_loss: 1.7978 - val_acc: 0.2368
Epoch 81/150
265/265 [==============================] - 14s - loss: 1.1908 - acc: 0.5321 - val_loss: 1.9882 - val_acc: 0.2632
Epoch 82/150
265/265 [==============================] - 13s - loss: 1.1845 - acc: 0.5736 - val_loss: 1.7858 - val_acc: 0.2281
Epoch 83/150
265/265 [==============================] - 13s - loss: 1.0139 - acc: 0.6491 - val_loss: 1.9518 - val_acc: 0.2368
Epoch 84/150
265/265 [==============================] - 12s - loss: 1.0917 - acc: 0.6038 - val_loss: 1.8508 - val_acc: 0.2368
Epoch 85/150
265/265 [==============================] - 12s - loss: 1.2251 - acc: 0.5396 - val_loss: 2.0026 - val_acc: 0.2018
Epoch 86/150
265/265 [==============================] - 12s - loss: 1.4602 - acc: 0.4642 - val_loss: 1.8015 - val_acc: 0.1930
Epoch 87/150
265/265 [==============================] - 13s - loss: 1.0909 - acc: 0.6000 - val_loss: 1.8449 - val_acc: 0.2018
Epoch 88/150
265/265 [==============================] - 12s - loss: 0.9150 - acc: 0.6453 - val_loss: 1.9511 - val_acc: 0.2456
Epoch 89/150
265/265 [==============================] - 13s - loss: 0.8951 - acc: 0.6415 - val_loss: 2.0065 - val_acc: 0.2281
Epoch 90/150
265/265 [==============================] - 12s - loss: 1.0651 - acc: 0.5585 - val_loss: 2.1733 - val_acc: 0.2368
Epoch 91/150
265/265 [==============================] - 13s - loss: 1.7502 - acc: 0.4604 - val_loss: 1.8217 - val_acc: 0.2456
Epoch 92/150
265/265 [==============================] - 12s - loss: 0.9444 - acc: 0.6491 - val_loss: 1.9800 - val_acc: 0.2368
Epoch 93/150
265/265 [==============================] - 13s - loss: 0.7963 - acc: 0.7283 - val_loss: 2.0517 - val_acc: 0.2807
Epoch 94/150
265/265 [==============================] - 12s - loss: 0.7728 - acc: 0.7358 - val_loss: 2.2464 - val_acc: 0.2719
Epoch 95/150
265/265 [==============================] - 12s - loss: 0.8713 - acc: 0.6528 - val_loss: 1.8951 - val_acc: 0.2105
Epoch 96/150
265/265 [==============================] - 12s - loss: 1.2695 - acc: 0.5132 - val_loss: 1.9186 - val_acc: 0.2632
Epoch 97/150
265/265 [==============================] - 14s - loss: 0.7737 - acc: 0.6792 - val_loss: 2.0841 - val_acc: 0.2807
Epoch 98/150
265/265 [==============================] - 12s - loss: 0.7918 - acc: 0.7094 - val_loss: 2.0471 - val_acc: 0.2632
Epoch 99/150
265/265 [==============================] - 13s - loss: 0.9094 - acc: 0.6264 - val_loss: 2.1331 - val_acc: 0.2281
Epoch 100/150
265/265 [==============================] - 13s - loss: 0.7291 - acc: 0.7245 - val_loss: 2.1124 - val_acc: 0.2544
Epoch 101/150
265/265 [==============================] - 13s - loss: 0.9415 - acc: 0.6377 - val_loss: 2.0355 - val_acc: 0.2281
Epoch 102/150
265/265 [==============================] - 12s - loss: 0.9364 - acc: 0.6717 - val_loss: 1.9817 - val_acc: 0.2807
Epoch 103/150
265/265 [==============================] - 14s - loss: 0.6854 - acc: 0.7547 - val_loss: 2.4798 - val_acc: 0.2456
Epoch 104/150
265/265 [==============================] - 14s - loss: 0.9047 - acc: 0.6642 - val_loss: 2.0617 - val_acc: 0.2632
Epoch 105/150
265/265 [==============================] - 13s - loss: 0.6411 - acc: 0.7660 - val_loss: 2.2611 - val_acc: 0.2807
Epoch 106/150
265/265 [==============================] - 12s - loss: 0.5768 - acc: 0.7736 - val_loss: 2.2289 - val_acc: 0.3246
Epoch 107/150
265/265 [==============================] - 12s - loss: 0.8872 - acc: 0.6906 - val_loss: 2.2364 - val_acc: 0.2895
Epoch 108/150
265/265 [==============================] - 12s - loss: 0.7579 - acc: 0.7170 - val_loss: 2.1814 - val_acc: 0.2982
Epoch 109/150
265/265 [==============================] - 14s - loss: 0.4799 - acc: 0.8151 - val_loss: 2.5137 - val_acc: 0.3070
Epoch 110/150
265/265 [==============================] - 14s - loss: 0.6026 - acc: 0.7736 - val_loss: 2.2195 - val_acc: 0.3246
Epoch 111/150
265/265 [==============================] - 14s - loss: 1.0305 - acc: 0.6415 - val_loss: 2.0634 - val_acc: 0.3158
Epoch 112/150
265/265 [==============================] - 12s - loss: 0.6771 - acc: 0.7472 - val_loss: 2.1780 - val_acc: 0.2632
Epoch 113/150
265/265 [==============================] - 12s - loss: 0.4737 - acc: 0.8453 - val_loss: 2.4624 - val_acc: 0.2632
Epoch 114/150
265/265 [==============================] - 13s - loss: 0.5604 - acc: 0.7887 - val_loss: 2.6176 - val_acc: 0.2632
Epoch 115/150
265/265 [==============================] - 12s - loss: 0.4429 - acc: 0.8453 - val_loss: 2.5701 - val_acc: 0.2456
Epoch 116/150
265/265 [==============================] - 13s - loss: 0.3997 - acc: 0.8453 - val_loss: 2.9215 - val_acc: 0.2544
Epoch 117/150
265/265 [==============================] - 12s - loss: 0.9979 - acc: 0.7019 - val_loss: 2.7721 - val_acc: 0.2018
Epoch 118/150
265/265 [==============================] - 12s - loss: 1.1131 - acc: 0.6377 - val_loss: 2.0536 - val_acc: 0.2193
Epoch 119/150
265/265 [==============================] - 12s - loss: 0.4554 - acc: 0.8528 - val_loss: 2.4243 - val_acc: 0.2105
Epoch 120/150
265/265 [==============================] - 12s - loss: 0.4188 - acc: 0.8604 - val_loss: 2.4362 - val_acc: 0.2719
Epoch 121/150
265/265 [==============================] - 12s - loss: 0.4711 - acc: 0.8415 - val_loss: 2.6143 - val_acc: 0.2281
Epoch 122/150
265/265 [==============================] - 12s - loss: 0.5616 - acc: 0.7925 - val_loss: 2.4081 - val_acc: 0.2368
Epoch 123/150
265/265 [==============================] - 12s - loss: 0.4244 - acc: 0.8679 - val_loss: 2.5050 - val_acc: 0.2456
Epoch 124/150
265/265 [==============================] - 12s - loss: 0.3023 - acc: 0.8981 - val_loss: 2.8472 - val_acc: 0.2456
Epoch 125/150
265/265 [==============================] - 12s - loss: 0.3957 - acc: 0.8642 - val_loss: 3.6014 - val_acc: 0.2632
Epoch 126/150
265/265 [==============================] - 13s - loss: 1.2517 - acc: 0.6151 - val_loss: 2.3458 - val_acc: 0.2544
Epoch 127/150
265/265 [==============================] - 12s - loss: 0.7153 - acc: 0.7358 - val_loss: 2.2187 - val_acc: 0.2281
Epoch 128/150
265/265 [==============================] - 13s - loss: 0.3372 - acc: 0.8906 - val_loss: 2.4134 - val_acc: 0.2281
Epoch 129/150
265/265 [==============================] - 13s - loss: 0.2881 - acc: 0.9019 - val_loss: 2.6480 - val_acc: 0.2632
Epoch 130/150
265/265 [==============================] - 12s - loss: 0.2763 - acc: 0.9057 - val_loss: 2.9795 - val_acc: 0.2456
Epoch 131/150
265/265 [==============================] - 12s - loss: 0.3952 - acc: 0.8679 - val_loss: 2.9460 - val_acc: 0.2368
Epoch 132/150
265/265 [==============================] - 14s - loss: 0.7404 - acc: 0.7396 - val_loss: 2.5271 - val_acc: 0.2544
Epoch 133/150
265/265 [==============================] - 14s - loss: 0.3928 - acc: 0.8302 - val_loss: 2.7515 - val_acc: 0.2544
Epoch 134/150
265/265 [==============================] - 15s - loss: 0.3483 - acc: 0.8830 - val_loss: 3.4418 - val_acc: 0.2544
Epoch 135/150
265/265 [==============================] - 13s - loss: 0.2272 - acc: 0.9283 - val_loss: 3.4934 - val_acc: 0.2456
Epoch 136/150
265/265 [==============================] - 13s - loss: 0.2964 - acc: 0.8981 - val_loss: 3.3314 - val_acc: 0.2544
Epoch 137/150
265/265 [==============================] - 12s - loss: 0.3000 - acc: 0.8830 - val_loss: 3.2715 - val_acc: 0.2368
Epoch 138/150
265/265 [==============================] - 12s - loss: 0.7886 - acc: 0.7698 - val_loss: 2.8287 - val_acc: 0.2281
Epoch 139/150
265/265 [==============================] - 12s - loss: 0.3318 - acc: 0.8868 - val_loss: 2.6391 - val_acc: 0.2982
Epoch 140/150
265/265 [==============================] - 13s - loss: 0.2547 - acc: 0.9019 - val_loss: 3.3947 - val_acc: 0.2544
Epoch 141/150
265/265 [==============================] - 13s - loss: 0.2331 - acc: 0.9170 - val_loss: 3.7133 - val_acc: 0.2719
Epoch 142/150
265/265 [==============================] - 13s - loss: 0.5736 - acc: 0.8075 - val_loss: 3.4479 - val_acc: 0.3158
Epoch 143/150
265/265 [==============================] - 13s - loss: 0.8120 - acc: 0.7736 - val_loss: 2.6344 - val_acc: 0.2895
Epoch 144/150
265/265 [==============================] - 14s - loss: 0.2107 - acc: 0.9208 - val_loss: 2.8510 - val_acc: 0.2719
Epoch 145/150
265/265 [==============================] - 12s - loss: 0.2228 - acc: 0.9208 - val_loss: 3.0919 - val_acc: 0.2807
Epoch 146/150
265/265 [==============================] - 14s - loss: 0.2729 - acc: 0.8906 - val_loss: 3.9666 - val_acc: 0.2456
Epoch 147/150
265/265 [==============================] - 15s - loss: 0.6143 - acc: 0.8000 - val_loss: 2.7515 - val_acc: 0.2719
Epoch 148/150
265/265 [==============================] - 14s - loss: 0.2024 - acc: 0.9321 - val_loss: 3.0110 - val_acc: 0.2982
Epoch 149/150
265/265 [==============================] - 13s - loss: 0.3395 - acc: 0.8830 - val_loss: 3.4493 - val_acc: 0.2368
Epoch 150/150
265/265 [==============================] - 13s - loss: 0.2645 - acc: 0.9094 - val_loss: 3.3100 - val_acc: 0.2807
2017-07-09T19:28:51.002737

It really tries not to overfit, but in general we do not have enough training data


In [165]:
train_loss, train_accuracy = model.evaluate(X_train, y_train, batch_size=32)
train_loss, train_accuracy


379/379 [==============================] - 5s     
Out[165]:
(1.1381789043270503, 0.7941952497946555)

In [166]:
test_loss, test_accuracy = model.evaluate(X_test, y_test, batch_size=32)
test_loss, test_accuracy


3411/3411 [==============================] - 47s    
Out[166]:
(3.5652635606353633, 0.29698035768384773)

In [ ]:
!mkdir models

In [ ]:
model.save('models/conv-vgg-augmented.hdf5')

In [ ]:
!ls -l models

In [ ]: