In [1]:
import warnings
warnings.filterwarnings('ignore')

In [2]:
%matplotlib inline
%pylab inline


Populating the interactive namespace from numpy and matplotlib

In [3]:
import matplotlib.pylab as plt
# https://docs.scipy.org/doc/numpy/reference/routines.math.html
import numpy as np

In [4]:
from datetime import tzinfo, timedelta, datetime

In [5]:
from distutils.version import StrictVersion

In [6]:
import sklearn

assert StrictVersion(sklearn.__version__ ) >= StrictVersion('0.18.1')

sklearn.__version__


Out[6]:
'0.18.1'

In [7]:
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)

assert StrictVersion(tf.__version__) >= StrictVersion('1.1.0')

tf.__version__


Out[7]:
'1.2.1'

In [8]:
import keras

assert StrictVersion(keras.__version__) >= StrictVersion('2.0.0')

keras.__version__


Using TensorFlow backend.
Out[8]:
'2.0.6'

In [9]:
# !curl -O https://raw.githubusercontent.com/DJCordhose/speed-limit-signs/master/data/speed-limit-signs.zip
# !curl -O https://raw.githubusercontent.com/DJCordhose/speed-limit-signs/master/data/augmented-signs.zip

In [10]:
# https://docs.python.org/3/library/zipfile.html
# from zipfile import ZipFile
# zip = ZipFile(r'speed-limit-signs.zip')
# zip.extractall('.')
# zip = ZipFile(r'augmented-signs.zip')
# zip.extractall('.')

In [11]:
# !ls -l speed-limit-signs

In [12]:
# !ls -l augmented-signs

In [13]:
import os
import skimage.data
import skimage.transform
from keras.utils.np_utils import to_categorical
import numpy as np

def load_data(data_dir, type=".ppm"):
    num_categories = 6

    # Get all subdirectories of data_dir. Each represents a label.
    directories = [d for d in os.listdir(data_dir) 
                   if os.path.isdir(os.path.join(data_dir, d))]
    # Loop through the label directories and collect the data in
    # two lists, labels and images.
    labels = []
    images = []
    for d in directories:
        label_dir = os.path.join(data_dir, d)
        file_names = [os.path.join(label_dir, f) for f in os.listdir(label_dir) if f.endswith(type)]
        # For each label, load it's images and add them to the images list.
        # And add the label number (i.e. directory name) to the labels list.
        for f in file_names:
            images.append(skimage.data.imread(f))
            labels.append(int(d))
    images64 = [skimage.transform.resize(image, (64, 64)) for image in images]
    y = np.array(labels)
    y = to_categorical(y, num_categories)
    X = np.array(images64)
    return X, y

In [14]:
# Load datasets.
ROOT_PATH = "./"

In [15]:
original_dir = os.path.join(ROOT_PATH, "speed-limit-signs")
original_images, original_labels = load_data(original_dir, type=".ppm")

In [16]:
data_dir = os.path.join(ROOT_PATH, "augmented-signs")
X, y = load_data(data_dir, type=".png")

In [17]:
from sklearn.model_selection import train_test_split

In [18]:
checkpoint_callback = keras.callbacks.ModelCheckpoint('../tmp/model-checkpoints/weights.epoch-{epoch:02d}-val_loss-{val_loss:.2f}.hdf5');

In [25]:
early_stopping_callback = keras.callbacks.EarlyStopping(monitor='val_loss', patience=300, verbose=1)

In [21]:
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tensorboard/README.md
# https://keras.io/callbacks/#tensorboard
# http://stackoverflow.com/questions/42112260/how-do-i-use-the-tensorboard-callback-of-keras
tb_callback = keras.callbacks.TensorBoard(log_dir='../tmp/tf_log')
#                                          histogram_freq=1, write_graph=True, write_images=True)
#                                          histogram_freq=1, write_graph=True, write_images=True)
# tbCallBack = keras.callbacks.TensorBoard(log_dir='./logs', histogram_freq=0, batch_size=32, write_graph=True, write_grads=False, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None)
# To start tensorboard
# tensorboard --logdir=/mnt/c/Users/olive/Development/ml/tf_log
# open http://localhost:6006

In [22]:
# we want to distribute our different classes equally over test and train, this works using stratify
# https://github.com/amueller/scipy-2017-sklearn/blob/master/notebooks/04.Training_and_Testing_Data.ipynb
# http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=3, stratify=y, )

In [23]:
X_train.shape, y_train.shape


Out[23]:
((3032, 64, 64, 3), (3032, 6))

In [24]:
from keras.models import Model
from keras.layers import Dense, Dropout, Activation, Flatten, Input
from keras.layers import Convolution2D, MaxPooling2D

# drop_out = 0.9
# drop_out = 0.75
drop_out = 0.5
# drop_out = 0.25
# drop_out = 0.0

# input tensor for a 3-channel 64x64 image
inputs = Input(shape=(64, 64, 3))

# one block of convolutional layers
x = Convolution2D(64, 3, 3, activation='relu')(inputs)
# x = Dropout(drop_out)(x)
x = Convolution2D(64, 3, 3, activation='relu')(x)
# x = Dropout(drop_out)(x)
x = Convolution2D(64, 3, 3, activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(drop_out)(x)

# one more block
x = Convolution2D(128, 3, 3, activation='relu')(x)
# x = Dropout(drop_out)(x)
x = Convolution2D(128, 3, 3, activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(drop_out)(x)

# one more block
x = Convolution2D(256, 3, 3, activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(drop_out)(x)

x = Flatten()(x)
x = Dense(256, activation='relu')(x)
x = Dropout(drop_out)(x)

# softmax activation, 6 categories
predictions = Dense(6, activation='softmax')(x)
model = Model(input=inputs, output=predictions)
model.summary()
model.compile(optimizer='rmsprop',
              loss='categorical_crossentropy',
              metrics=['accuracy'])


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_1 (InputLayer)         (None, 64, 64, 3)         0         
_________________________________________________________________
conv2d_1 (Conv2D)            (None, 62, 62, 64)        1792      
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 60, 60, 64)        36928     
_________________________________________________________________
conv2d_3 (Conv2D)            (None, 58, 58, 64)        36928     
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 29, 29, 64)        0         
_________________________________________________________________
dropout_1 (Dropout)          (None, 29, 29, 64)        0         
_________________________________________________________________
conv2d_4 (Conv2D)            (None, 27, 27, 128)       73856     
_________________________________________________________________
conv2d_5 (Conv2D)            (None, 25, 25, 128)       147584    
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 12, 12, 128)       0         
_________________________________________________________________
dropout_2 (Dropout)          (None, 12, 12, 128)       0         
_________________________________________________________________
conv2d_6 (Conv2D)            (None, 10, 10, 256)       295168    
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 (None, 5, 5, 256)         0         
_________________________________________________________________
dropout_3 (Dropout)          (None, 5, 5, 256)         0         
_________________________________________________________________
flatten_1 (Flatten)          (None, 6400)              0         
_________________________________________________________________
dense_1 (Dense)              (None, 256)               1638656   
_________________________________________________________________
dropout_4 (Dropout)          (None, 256)               0         
_________________________________________________________________
dense_2 (Dense)              (None, 6)                 1542      
=================================================================
Total params: 2,232,454
Trainable params: 2,232,454
Non-trainable params: 0
_________________________________________________________________

In [26]:
!rm -rf ../tmp/tf_log
!rm -rf ../tmp/model-checkpoints

!mkdir ../tmp/model-checkpoints
!mkdir ../tmp/tf_log

In [ ]:
# Running on a GPU bach size might be critical depdendng on the GPU memory available
# more is desirable, but we might end up using 50 only 
print(datetime.utcnow().isoformat())
# BE CAREFUL, validation data is always the last data sets and not shuffled
# https://keras.io/getting-started/faq/#how-is-the-validation-split-computed
model.fit(X_train, y_train, epochs=2000, batch_size=500, validation_split=0.3, 
          callbacks=[tb_callback, early_stopping_callback])
#           callbacks=[tb_callback])
# model.fit(X_train, y_train, epochs=50, batch_size=200, validation_split=0.3)
print(datetime.utcnow().isoformat())


2017-07-31T06:34:45.622357
Train on 2122 samples, validate on 910 samples
Epoch 1/2000
2122/2122 [==============================] - 15s - loss: 3.5236 - acc: 0.1946 - val_loss: 1.7893 - val_acc: 0.1912
Epoch 2/2000
2122/2122 [==============================] - 4s - loss: 1.7781 - acc: 0.2055 - val_loss: 1.7771 - val_acc: 0.2000
Epoch 3/2000
2122/2122 [==============================] - 4s - loss: 1.7573 - acc: 0.1937 - val_loss: 1.7718 - val_acc: 0.2088
Epoch 4/2000
2122/2122 [==============================] - 4s - loss: 1.7570 - acc: 0.2050 - val_loss: 1.7639 - val_acc: 0.2000
Epoch 5/2000
2122/2122 [==============================] - 4s - loss: 1.7546 - acc: 0.2125 - val_loss: 1.7478 - val_acc: 0.2000
Epoch 6/2000
2122/2122 [==============================] - 5s - loss: 1.7651 - acc: 0.2276 - val_loss: 1.7855 - val_acc: 0.1714
Epoch 7/2000
2122/2122 [==============================] - 5s - loss: 1.7615 - acc: 0.2323 - val_loss: 1.7489 - val_acc: 0.2363
Epoch 8/2000
2122/2122 [==============================] - 5s - loss: 2.0125 - acc: 0.2135 - val_loss: 1.7860 - val_acc: 0.1714
Epoch 9/2000
2122/2122 [==============================] - 5s - loss: 1.7723 - acc: 0.2012 - val_loss: 1.7829 - val_acc: 0.1692
Epoch 10/2000
2122/2122 [==============================] - 5s - loss: 1.7508 - acc: 0.2229 - val_loss: 1.7294 - val_acc: 0.2527
Epoch 11/2000
2122/2122 [==============================] - 5s - loss: 1.7576 - acc: 0.2262 - val_loss: 1.7624 - val_acc: 0.2110
Epoch 12/2000
2122/2122 [==============================] - 5s - loss: 1.7361 - acc: 0.2620 - val_loss: 2.3431 - val_acc: 0.1714
Epoch 13/2000
2122/2122 [==============================] - 5s - loss: 1.9842 - acc: 0.2300 - val_loss: 1.7432 - val_acc: 0.2736
Epoch 14/2000
2122/2122 [==============================] - 4s - loss: 1.7290 - acc: 0.2380 - val_loss: 1.7254 - val_acc: 0.3198
Epoch 15/2000
2122/2122 [==============================] - 4s - loss: 1.7129 - acc: 0.2733 - val_loss: 1.7033 - val_acc: 0.2714
Epoch 16/2000
2122/2122 [==============================] - 4s - loss: 1.7128 - acc: 0.2620 - val_loss: 1.8091 - val_acc: 0.1780
Epoch 17/2000
2122/2122 [==============================] - 4s - loss: 1.7303 - acc: 0.2394 - val_loss: 1.6931 - val_acc: 0.3066
Epoch 18/2000
2122/2122 [==============================] - 4s - loss: 1.6726 - acc: 0.2870 - val_loss: 2.0014 - val_acc: 0.1714
Epoch 19/2000
2122/2122 [==============================] - 4s - loss: 1.8323 - acc: 0.2394 - val_loss: 1.7147 - val_acc: 0.2527
Epoch 20/2000
2122/2122 [==============================] - 4s - loss: 1.7298 - acc: 0.2625 - val_loss: 1.7686 - val_acc: 0.1901
Epoch 21/2000
2122/2122 [==============================] - 4s - loss: 1.7065 - acc: 0.2710 - val_loss: 1.7703 - val_acc: 0.1890
Epoch 22/2000
2122/2122 [==============================] - 4s - loss: 1.6928 - acc: 0.2696 - val_loss: 1.6595 - val_acc: 0.3011
Epoch 23/2000
2122/2122 [==============================] - 4s - loss: 1.9332 - acc: 0.2182 - val_loss: 1.7481 - val_acc: 0.2286
Epoch 24/2000
2122/2122 [==============================] - 4s - loss: 1.7491 - acc: 0.2846 - val_loss: 1.7383 - val_acc: 0.2912
Epoch 25/2000
2122/2122 [==============================] - 4s - loss: 1.7011 - acc: 0.2828 - val_loss: 1.6962 - val_acc: 0.2802
Epoch 26/2000
2122/2122 [==============================] - 4s - loss: 1.6887 - acc: 0.2950 - val_loss: 1.6725 - val_acc: 0.3066
Epoch 27/2000
2122/2122 [==============================] - 4s - loss: 1.6733 - acc: 0.2941 - val_loss: 1.7003 - val_acc: 0.3319
Epoch 28/2000
2122/2122 [==============================] - 4s - loss: 1.6477 - acc: 0.3063 - val_loss: 1.9137 - val_acc: 0.1802
Epoch 29/2000
2122/2122 [==============================] - 4s - loss: 1.7409 - acc: 0.2502 - val_loss: 1.7215 - val_acc: 0.2692
Epoch 30/2000
2122/2122 [==============================] - 4s - loss: 1.6993 - acc: 0.2842 - val_loss: 1.6692 - val_acc: 0.3055
Epoch 31/2000
2122/2122 [==============================] - 4s - loss: 1.6843 - acc: 0.2851 - val_loss: 1.7196 - val_acc: 0.3121
Epoch 32/2000
2122/2122 [==============================] - 4s - loss: 1.6696 - acc: 0.3106 - val_loss: 1.6783 - val_acc: 0.2824
Epoch 33/2000
2122/2122 [==============================] - 5s - loss: 1.6497 - acc: 0.3115 - val_loss: 1.6232 - val_acc: 0.3231
Epoch 34/2000
2122/2122 [==============================] - 5s - loss: 1.7887 - acc: 0.2710 - val_loss: 1.7428 - val_acc: 0.2780
Epoch 35/2000
2122/2122 [==============================] - 5s - loss: 1.6973 - acc: 0.2696 - val_loss: 1.6465 - val_acc: 0.3330
Epoch 36/2000
2122/2122 [==============================] - 4s - loss: 1.7180 - acc: 0.2592 - val_loss: 1.7009 - val_acc: 0.3099
Epoch 37/2000
2122/2122 [==============================] - 5s - loss: 1.6376 - acc: 0.3360 - val_loss: 1.6340 - val_acc: 0.3121
Epoch 38/2000
2122/2122 [==============================] - 5s - loss: 1.7266 - acc: 0.2804 - val_loss: 1.7071 - val_acc: 0.3385
Epoch 39/2000
2122/2122 [==============================] - 5s - loss: 1.6530 - acc: 0.3252 - val_loss: 1.6465 - val_acc: 0.2857
Epoch 40/2000
2122/2122 [==============================] - 4s - loss: 1.6353 - acc: 0.3106 - val_loss: 1.7685 - val_acc: 0.2341
Epoch 41/2000
2122/2122 [==============================] - 4s - loss: 1.7254 - acc: 0.2663 - val_loss: 1.6342 - val_acc: 0.3319
Epoch 42/2000
2122/2122 [==============================] - 5s - loss: 1.6187 - acc: 0.3351 - val_loss: 1.6806 - val_acc: 0.2813
Epoch 43/2000
2122/2122 [==============================] - 5s - loss: 1.6250 - acc: 0.3299 - val_loss: 1.9030 - val_acc: 0.2615
Epoch 44/2000
2122/2122 [==============================] - 4s - loss: 1.9126 - acc: 0.2502 - val_loss: 1.7060 - val_acc: 0.2780
Epoch 45/2000
2122/2122 [==============================] - 4s - loss: 1.6495 - acc: 0.2978 - val_loss: 1.6302 - val_acc: 0.3275
Epoch 46/2000
2122/2122 [==============================] - 4s - loss: 1.6302 - acc: 0.3318 - val_loss: 1.5875 - val_acc: 0.3374
Epoch 47/2000
2122/2122 [==============================] - 4s - loss: 1.6157 - acc: 0.3398 - val_loss: 1.6458 - val_acc: 0.3187
Epoch 48/2000
2122/2122 [==============================] - 4s - loss: 1.5689 - acc: 0.3638 - val_loss: 1.6094 - val_acc: 0.3363
Epoch 49/2000
2122/2122 [==============================] - 5s - loss: 1.5526 - acc: 0.3666 - val_loss: 1.7117 - val_acc: 0.2473
Epoch 50/2000
2122/2122 [==============================] - 4s - loss: 1.5996 - acc: 0.3374 - val_loss: 1.5387 - val_acc: 0.3527
Epoch 51/2000
2122/2122 [==============================] - 4s - loss: 1.6019 - acc: 0.3497 - val_loss: 1.5387 - val_acc: 0.3560
Epoch 52/2000
2122/2122 [==============================] - 5s - loss: 1.5119 - acc: 0.3817 - val_loss: 1.5233 - val_acc: 0.3549
Epoch 53/2000
2122/2122 [==============================] - 4s - loss: 1.7222 - acc: 0.3162 - val_loss: 1.5938 - val_acc: 0.3429
Epoch 54/2000
2122/2122 [==============================] - 4s - loss: 1.5259 - acc: 0.3864 - val_loss: 1.7432 - val_acc: 0.2637
Epoch 55/2000
2122/2122 [==============================] - 4s - loss: 1.6359 - acc: 0.3398 - val_loss: 1.5687 - val_acc: 0.3495
Epoch 56/2000
2122/2122 [==============================] - 4s - loss: 1.5327 - acc: 0.3737 - val_loss: 1.6653 - val_acc: 0.3132
Epoch 57/2000
2122/2122 [==============================] - 4s - loss: 1.5414 - acc: 0.3756 - val_loss: 1.5211 - val_acc: 0.3802
Epoch 58/2000
2122/2122 [==============================] - 4s - loss: 1.8755 - acc: 0.3798 - val_loss: 1.6584 - val_acc: 0.3220
Epoch 59/2000
2122/2122 [==============================] - 4s - loss: 1.5803 - acc: 0.3709 - val_loss: 1.5245 - val_acc: 0.3549
Epoch 60/2000
2122/2122 [==============================] - 4s - loss: 1.4976 - acc: 0.4034 - val_loss: 1.5222 - val_acc: 0.3604
Epoch 61/2000
2122/2122 [==============================] - 4s - loss: 1.4724 - acc: 0.4128 - val_loss: 1.6951 - val_acc: 0.3253
Epoch 62/2000
2122/2122 [==============================] - 4s - loss: 1.5541 - acc: 0.3784 - val_loss: 1.5090 - val_acc: 0.3769
Epoch 63/2000
2122/2122 [==============================] - 4s - loss: 1.5508 - acc: 0.4062 - val_loss: 1.5235 - val_acc: 0.3901
Epoch 64/2000
2122/2122 [==============================] - 5s - loss: 1.3823 - acc: 0.4581 - val_loss: 1.5549 - val_acc: 0.3407
Epoch 65/2000
2122/2122 [==============================] - 4s - loss: 1.5157 - acc: 0.3973 - val_loss: 1.4519 - val_acc: 0.3890
Epoch 66/2000
2122/2122 [==============================] - 5s - loss: 1.4651 - acc: 0.4298 - val_loss: 1.6098 - val_acc: 0.3220
Epoch 67/2000
2122/2122 [==============================] - 4s - loss: 1.4436 - acc: 0.4265 - val_loss: 1.4907 - val_acc: 0.3791
Epoch 68/2000
2122/2122 [==============================] - 4s - loss: 1.3317 - acc: 0.4746 - val_loss: 1.5226 - val_acc: 0.4000
Epoch 69/2000
2122/2122 [==============================] - 5s - loss: 1.3902 - acc: 0.4703 - val_loss: 1.6286 - val_acc: 0.3022
Epoch 70/2000
2122/2122 [==============================] - 4s - loss: 1.4841 - acc: 0.3973 - val_loss: 1.4606 - val_acc: 0.4165
Epoch 71/2000
2122/2122 [==============================] - 4s - loss: 1.3190 - acc: 0.5009 - val_loss: 1.8810 - val_acc: 0.3516
Epoch 72/2000
2122/2122 [==============================] - 4s - loss: 1.6465 - acc: 0.3916 - val_loss: 1.4566 - val_acc: 0.4088
Epoch 73/2000
2122/2122 [==============================] - 5s - loss: 1.3499 - acc: 0.4717 - val_loss: 1.4791 - val_acc: 0.4055
Epoch 74/2000
2122/2122 [==============================] - 5s - loss: 1.3198 - acc: 0.4779 - val_loss: 1.3925 - val_acc: 0.4505
Epoch 75/2000
2122/2122 [==============================] - 5s - loss: 1.3181 - acc: 0.4755 - val_loss: 1.4068 - val_acc: 0.4440
Epoch 76/2000
2122/2122 [==============================] - 5s - loss: 1.2746 - acc: 0.5080 - val_loss: 1.4828 - val_acc: 0.3846
Epoch 77/2000
2122/2122 [==============================] - 5s - loss: 1.3251 - acc: 0.4746 - val_loss: 1.3807 - val_acc: 0.4637
Epoch 78/2000
2122/2122 [==============================] - 4s - loss: 1.2819 - acc: 0.5221 - val_loss: 1.5498 - val_acc: 0.4198
Epoch 79/2000
2122/2122 [==============================] - 4s - loss: 1.2505 - acc: 0.5221 - val_loss: 1.3125 - val_acc: 0.4659
Epoch 80/2000
2122/2122 [==============================] - 5s - loss: 1.1647 - acc: 0.5401 - val_loss: 1.4388 - val_acc: 0.4330
Epoch 81/2000
2122/2122 [==============================] - 5s - loss: 1.3417 - acc: 0.4783 - val_loss: 1.3892 - val_acc: 0.4560
Epoch 82/2000
2122/2122 [==============================] - 5s - loss: 1.3286 - acc: 0.4958 - val_loss: 1.2698 - val_acc: 0.5011
Epoch 83/2000
2122/2122 [==============================] - 5s - loss: 1.1367 - acc: 0.5467 - val_loss: 1.2979 - val_acc: 0.5000
Epoch 84/2000
2122/2122 [==============================] - 5s - loss: 1.1695 - acc: 0.5679 - val_loss: 1.3319 - val_acc: 0.4835
Epoch 85/2000
2122/2122 [==============================] - 5s - loss: 1.0244 - acc: 0.6103 - val_loss: 1.4284 - val_acc: 0.4374
Epoch 86/2000
2122/2122 [==============================] - 5s - loss: 1.2121 - acc: 0.5363 - val_loss: 1.2681 - val_acc: 0.5121
Epoch 87/2000
2122/2122 [==============================] - 5s - loss: 0.9888 - acc: 0.6225 - val_loss: 1.3423 - val_acc: 0.4593
Epoch 88/2000
2122/2122 [==============================] - 5s - loss: 1.0669 - acc: 0.5914 - val_loss: 1.1569 - val_acc: 0.5484
Epoch 89/2000
2122/2122 [==============================] - 5s - loss: 0.9675 - acc: 0.6268 - val_loss: 1.2204 - val_acc: 0.5505
Epoch 90/2000
2122/2122 [==============================] - 5s - loss: 1.0207 - acc: 0.6023 - val_loss: 1.2899 - val_acc: 0.4945
Epoch 91/2000
 500/2122 [======>.......................] - ETA: 3s - loss: 1.0049 - acc: 0.5860

In [27]:
train_loss, train_accuracy = model.evaluate(X_train, y_train, batch_size=500)
train_loss, train_accuracy


3032/3032 [==============================] - 2s     
Out[27]:
(0.26316697995036648, 0.95316629752634696)

In [28]:
test_loss, test_accuracy = model.evaluate(X_test, y_test, batch_size=500)
test_loss, test_accuracy


758/758 [==============================] - 1s     
Out[28]:
(1.0037021731323805, 0.81530343589807874)

In [29]:
original_loss, original_accuracy = model.evaluate(original_images, original_labels, batch_size=500)
original_loss, original_accuracy


379/379 [==============================] - 1s
Out[29]:
(1.0506060123443604, 0.81530344486236572)

In [30]:
!mkdir models


mkdir: cannot create directory ‘models’: File exists

In [31]:
model.save('models/conv-vgg-augmented.hdf5')

In [32]:
!ls -lh models


total 18M
-rw-rw-r-- 1 ubuntu ubuntu 18M Jul 21 19:56 conv-vgg-augmented.hdf5

In [33]:
!curl --upload-file ./models/conv-vgg-augmented.hdf5 https://transfer.sh/conv-vgg-augmented.hdf5


https://transfer.sh/K5RH8/conv-vgg-augmented.hdf5

In [ ]: