Data augmentation example

  • Use a little sample of MNIST to generate a model with good performance.

In [1]:
from __future__ import print_function

#Basic libraries
import numpy as np
import tensorflow as tf
print('Tensorflow version: ', tf.__version__)
import time

#Show images
import matplotlib.pyplot as plt
%matplotlib inline
# plt configuration
plt.rcParams['figure.figsize'] = (10, 10)        # size of images
plt.rcParams['image.interpolation'] = 'nearest'  # show exact image
plt.rcParams['image.cmap'] = 'gray'  # use grayscale 

# Select GPU
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"


Tensorflow version:  1.4.0

In [2]:
# Import mnist dataset and rescale between [0,1]
from tensorflow.contrib.keras import datasets

(X_train, y_train), (X_test, y_test) = datasets.mnist.load_data(path='mnist.npz')

X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255.
X_test /= 255.

print('X train shape: ',X_train.shape)
print('y train shape: ',y_train.shape)
print('X test shape: ', X_test.shape)
print('y test shape: ', y_test.shape)


X train shape:  (60000, 28, 28)
y train shape:  (60000,)
X test shape:  (10000, 28, 28)
y test shape:  (10000,)

In [3]:
# Reshape (the input format is vector)
X_train = np.reshape(X_train, (len(X_train),28,28,1))
X_test  = np.reshape(X_test,  (len(X_test) ,28,28,1))
print('X train shape: ',X_train.shape)
print('X test shape: ', X_test.shape)


X train shape:  (60000, 28, 28, 1)
X test shape:  (10000, 28, 28, 1)

In [4]:
#Select a random subsample of the train data for each class
def create_sample(x, y,  sample_size=100):

    class_list = np.unique(y_train)

    sample_ix = []
    for i in class_list:
        class_ix = np.where(y==i)[0]
        class_ix_sample  = np.random.choice(class_ix, sample_size)
        sample_ix += list(class_ix_sample)
    
    #Shuffle sample
    sample_ix = np.random.permutation(sample_ix)

    return x[sample_ix], y[sample_ix]

#Create new sample datasets
X_train_sample, y_train_sample = create_sample(X_train, y_train, sample_size=100)
print('X train sample shape: ',X_train_sample.shape)
print('y train sample shape: ',y_train_sample.shape)


X train sample shape:  (1000, 28, 28, 1)
y train sample shape:  (1000,)

In [5]:
#Examine the data
fig = plt.figure()
for n, i in enumerate(range(32)): 
    a = fig.add_subplot(4,8,n+1)
    a.set_title(str(y_train_sample[i]))
    fig.tight_layout()
    plt.imshow(np.reshape(X_train_sample[i],(28,28)))



In [6]:
#Define the architecture
from tensorflow.contrib.keras import models, layers, optimizers, callbacks, preprocessing


print('Creating Lenet architecture.')

#Inputs
images = layers.Input(batch_shape=(None, 28, 28, 1), dtype='float32', name='Images')

#First convolutional layer
conv1 = layers.Conv2D(20, (5,5))(images)
pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)

#Second convolutional layer
conv2 = layers.Conv2D(20, (5,5))(pool1)
pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)

#Flatten convolution output
flat = layers.Flatten(name='Flat_image')(pool2)

# First dense layer
dense1 = layers.Dense(500, activation='relu', name='Dense_1')(flat)

# Second dense layer
output = layers.Dense(10, activation='softmax', name='Dense_output')(dense1)


Convolutional model...

In [7]:
# Configure input and output data for the Model
model_conv = models.Model(inputs=images, outputs=output)
model_conv.summary()


# Select optimizer and compile model
sgd_optimizer = optimizers.SGD(lr=0.01, momentum=0.99, decay=0.005, nesterov=True)
model_conv.compile(loss='sparse_categorical_crossentropy', 
                   optimizer=sgd_optimizer, metrics=['accuracy'])


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
Images (InputLayer)          (None, 28, 28, 1)         0         
_________________________________________________________________
conv2d_1 (Conv2D)            (None, 24, 24, 20)        520       
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 12, 12, 20)        0         
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 8, 8, 20)          10020     
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 4, 4, 20)          0         
_________________________________________________________________
Flat_image (Flatten)         (None, 320)               0         
_________________________________________________________________
Dense_1 (Dense)              (None, 500)               160500    
_________________________________________________________________
Dense_output (Dense)         (None, 10)                5010      
=================================================================
Total params: 176,050
Trainable params: 176,050
Non-trainable params: 0
_________________________________________________________________

In [8]:
#Fit the model
batch_size = 64
epochs = 30

# Tensorflow callback
tb_callback_ln = callbacks.TensorBoard(log_dir='/tmp/tensorboard/mnist/conv_aug1')

history_conv = model_conv.fit(X_train_sample, y_train_sample, batch_size=batch_size, epochs=epochs,
                              validation_data=(X_test, y_test), callbacks=[tb_callback_ln])


Train on 1000 samples, validate on 10000 samples
Epoch 1/40
1000/1000 [==============================] - 1s - loss: 2.2218 - acc: 0.2500 - val_loss: 1.9811 - val_acc: 0.4975
Epoch 2/40
1000/1000 [==============================] - 0s - loss: 1.2735 - acc: 0.6930 - val_loss: 0.6495 - val_acc: 0.7985
Epoch 3/40
1000/1000 [==============================] - 0s - loss: 0.8050 - acc: 0.8350 - val_loss: 0.7325 - val_acc: 0.8771
Epoch 4/40
1000/1000 [==============================] - 0s - loss: 0.5482 - acc: 0.8910 - val_loss: 0.6386 - val_acc: 0.8674
Epoch 5/40
1000/1000 [==============================] - 0s - loss: 0.4383 - acc: 0.8960 - val_loss: 0.7281 - val_acc: 0.8767
Epoch 6/40
1000/1000 [==============================] - 0s - loss: 0.2988 - acc: 0.9440 - val_loss: 0.7930 - val_acc: 0.8965
Epoch 7/40
1000/1000 [==============================] - 0s - loss: 0.2384 - acc: 0.9580 - val_loss: 0.9983 - val_acc: 0.8808
Epoch 8/40
1000/1000 [==============================] - 0s - loss: 0.2718 - acc: 0.9580 - val_loss: 1.0468 - val_acc: 0.8971
Epoch 9/40
1000/1000 [==============================] - 0s - loss: 0.2354 - acc: 0.9620 - val_loss: 0.9861 - val_acc: 0.9027
Epoch 10/40
1000/1000 [==============================] - 0s - loss: 0.3006 - acc: 0.9590 - val_loss: 0.9552 - val_acc: 0.9146
Epoch 11/40
1000/1000 [==============================] - 0s - loss: 0.2704 - acc: 0.9680 - val_loss: 1.2012 - val_acc: 0.8962
Epoch 12/40
1000/1000 [==============================] - 0s - loss: 0.2465 - acc: 0.9730 - val_loss: 1.2834 - val_acc: 0.8952
Epoch 13/40
1000/1000 [==============================] - 0s - loss: 0.2273 - acc: 0.9760 - val_loss: 1.1994 - val_acc: 0.8996
Epoch 14/40
1000/1000 [==============================] - 0s - loss: 0.2959 - acc: 0.9700 - val_loss: 1.0845 - val_acc: 0.9181
Epoch 15/40
1000/1000 [==============================] - 0s - loss: 0.3632 - acc: 0.9700 - val_loss: 1.2727 - val_acc: 0.9066
Epoch 16/40
1000/1000 [==============================] - 0s - loss: 0.3744 - acc: 0.9640 - val_loss: 1.1142 - val_acc: 0.9184
Epoch 17/40
1000/1000 [==============================] - 0s - loss: 0.5982 - acc: 0.9550 - val_loss: 1.3266 - val_acc: 0.9067
Epoch 18/40
1000/1000 [==============================] - 0s - loss: 0.5034 - acc: 0.9630 - val_loss: 1.1935 - val_acc: 0.9160
Epoch 19/40
1000/1000 [==============================] - 0s - loss: 0.6762 - acc: 0.9510 - val_loss: 1.2781 - val_acc: 0.9121
Epoch 20/40
1000/1000 [==============================] - 0s - loss: 0.7121 - acc: 0.9470 - val_loss: 1.4577 - val_acc: 0.9014
Epoch 21/40
1000/1000 [==============================] - 0s - loss: 1.2045 - acc: 0.9180 - val_loss: 1.7380 - val_acc: 0.8859
Epoch 22/40
1000/1000 [==============================] - 0s - loss: 1.1538 - acc: 0.9240 - val_loss: 2.2071 - val_acc: 0.8557
Epoch 23/40
1000/1000 [==============================] - 0s - loss: 1.6352 - acc: 0.8920 - val_loss: 2.5208 - val_acc: 0.8388
Epoch 24/40
1000/1000 [==============================] - 0s - loss: 1.6463 - acc: 0.8910 - val_loss: 2.1636 - val_acc: 0.8594
Epoch 25/40
1000/1000 [==============================] - 0s - loss: 2.1674 - acc: 0.8610 - val_loss: 2.8288 - val_acc: 0.8203
Epoch 26/40
1000/1000 [==============================] - 0s - loss: 2.2332 - acc: 0.8600 - val_loss: 2.5309 - val_acc: 0.8410
Epoch 27/40
1000/1000 [==============================] - 0s - loss: 3.3205 - acc: 0.7920 - val_loss: 7.6416 - val_acc: 0.5240
Epoch 28/40
1000/1000 [==============================] - 0s - loss: 9.8482 - acc: 0.3890 - val_loss: 10.8006 - val_acc: 0.3295
Epoch 29/40
1000/1000 [==============================] - 0s - loss: 12.7997 - acc: 0.2050 - val_loss: 13.9757 - val_acc: 0.1327
Epoch 30/40
1000/1000 [==============================] - 0s - loss: 14.4579 - acc: 0.1030 - val_loss: 14.5385 - val_acc: 0.0980
Epoch 31/40
1000/1000 [==============================] - 0s - loss: 14.5063 - acc: 0.1000 - val_loss: 14.5385 - val_acc: 0.0980
Epoch 32/40
1000/1000 [==============================] - 0s - loss: 14.5063 - acc: 0.1000 - val_loss: 14.5385 - val_acc: 0.0980
Epoch 33/40
1000/1000 [==============================] - 0s - loss: 14.5063 - acc: 0.1000 - val_loss: 14.5385 - val_acc: 0.0980
Epoch 34/40
1000/1000 [==============================] - 0s - loss: 14.5063 - acc: 0.1000 - val_loss: 14.5385 - val_acc: 0.0980
Epoch 35/40
1000/1000 [==============================] - 0s - loss: 14.5063 - acc: 0.1000 - val_loss: 14.5385 - val_acc: 0.0980
Epoch 36/40
1000/1000 [==============================] - 0s - loss: 14.5063 - acc: 0.1000 - val_loss: 14.5385 - val_acc: 0.0980
Epoch 37/40
1000/1000 [==============================] - 0s - loss: 14.5063 - acc: 0.1000 - val_loss: 14.5385 - val_acc: 0.0980
Epoch 38/40
1000/1000 [==============================] - 0s - loss: 14.5063 - acc: 0.1000 - val_loss: 14.5385 - val_acc: 0.0980
Epoch 39/40
1000/1000 [==============================] - 0s - loss: 14.5063 - acc: 0.1000 - val_loss: 14.5385 - val_acc: 0.0980
Epoch 40/40
1000/1000 [==============================] - 0s - loss: 14.5063 - acc: 0.1000 - val_loss: 14.5385 - val_acc: 0.0980

In [9]:
plt.plot(history_conv.history['acc'], label='acc')
plt.plot(history_conv.history['val_acc'], label='val acc')
plt.legend(loc='lower right')
plt.show()



In [ ]:

Data augmentation model


In [10]:
print('Second model. The same Lenet architecture')

#Inputs
images = layers.Input(batch_shape=(None, 28, 28, 1), dtype='float32', name='Images')

#First convolutional layer
conv1 = layers.Conv2D(20, (5,5))(images)
pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)

#Second convolutional layer
conv2 = layers.Conv2D(20, (5,5))(pool1)
pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)

#Flatten convolution output
flat = layers.Flatten(name='Flat_image')(pool2)

# First dense layer
dense1 = layers.Dense(500, activation='relu', name='Dense_1')(flat)

# Second dense layer
output = layers.Dense(10, activation='softmax', name='Dense_output')(dense1)


Second model. The same Lenet architecture

In [11]:
# Define inputs - outputs
model_aug = models.Model(inputs=images, outputs=output)
model_aug.summary()


# Select optimizer and compile model
sgd_optimizer = optimizers.SGD(lr=0.01, momentum=0.99, decay=0.005, nesterov=True)
model_aug.compile(loss='sparse_categorical_crossentropy', 
                   optimizer=sgd_optimizer, metrics=['accuracy'])


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
Images (InputLayer)          (None, 28, 28, 1)         0         
_________________________________________________________________
conv2d_3 (Conv2D)            (None, 24, 24, 20)        520       
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 (None, 12, 12, 20)        0         
_________________________________________________________________
conv2d_4 (Conv2D)            (None, 8, 8, 20)          10020     
_________________________________________________________________
max_pooling2d_4 (MaxPooling2 (None, 4, 4, 20)          0         
_________________________________________________________________
Flat_image (Flatten)         (None, 320)               0         
_________________________________________________________________
Dense_1 (Dense)              (None, 500)               160500    
_________________________________________________________________
Dense_output (Dense)         (None, 10)                5010      
=================================================================
Total params: 176,050
Trainable params: 176,050
Non-trainable params: 0
_________________________________________________________________

In [12]:
# Augmentation for training
train_datagen = preprocessing.image.ImageDataGenerator(
    #rotation_range=0.25,
    width_shift_range=0.2,
    height_shift_range=0.12,
    shear_range=0.3,
    zoom_range=[0.9,1.3])

In [13]:
#Visualize the ugmented images
fig = plt.figure()
for n, i in enumerate(range(32)): 
    a = fig.add_subplot(4,8,n+1)
    a.set_title(str(y_train_sample[i]))
    fig.tight_layout()
    plt.imshow(np.reshape(train_datagen.random_transform(X_train_sample[i]),(28,28)))



In [14]:
batch_size = 64
epochs = 30
tb_callback_ln = callbacks.TensorBoard(log_dir='/tmp/tensorboard/mnist/conv_aug2')
history_aug = model_aug.fit_generator(train_datagen.flow(X_train_sample, y_train_sample, batch_size=batch_size),
                                      steps_per_epoch=len(X_train) // batch_size,
                                      epochs=epochs,
                                      validation_data=(X_test, y_test),
                                      callbacks=[tb_callback_ln])


Epoch 1/30
937/937 [==============================] - 12s - loss: 0.4670 - acc: 0.8517 - val_loss: 0.1154 - val_acc: 0.9697
Epoch 2/30
937/937 [==============================] - 12s - loss: 0.0938 - acc: 0.9709 - val_loss: 0.0832 - val_acc: 0.9773
Epoch 3/30
937/937 [==============================] - 12s - loss: 0.0503 - acc: 0.9838 - val_loss: 0.0918 - val_acc: 0.9767
Epoch 4/30
937/937 [==============================] - 12s - loss: 0.0319 - acc: 0.9899 - val_loss: 0.0848 - val_acc: 0.9778
Epoch 5/30
937/937 [==============================] - 12s - loss: 0.0255 - acc: 0.9920 - val_loss: 0.0909 - val_acc: 0.9772
Epoch 6/30
937/937 [==============================] - 12s - loss: 0.0227 - acc: 0.9927 - val_loss: 0.0912 - val_acc: 0.9776
Epoch 7/30
937/937 [==============================] - 12s - loss: 0.0191 - acc: 0.9940 - val_loss: 0.0910 - val_acc: 0.9769
Epoch 8/30
937/937 [==============================] - 12s - loss: 0.0166 - acc: 0.9948 - val_loss: 0.0856 - val_acc: 0.9786
Epoch 9/30
937/937 [==============================] - 12s - loss: 0.0151 - acc: 0.9950 - val_loss: 0.0756 - val_acc: 0.9801
Epoch 10/30
937/937 [==============================] - 11s - loss: 0.0151 - acc: 0.9953 - val_loss: 0.0808 - val_acc: 0.9786
Epoch 11/30
937/937 [==============================] - 12s - loss: 0.0138 - acc: 0.9957 - val_loss: 0.0818 - val_acc: 0.9788
Epoch 12/30
937/937 [==============================] - 12s - loss: 0.0136 - acc: 0.9959 - val_loss: 0.0930 - val_acc: 0.9777
Epoch 13/30
937/937 [==============================] - 12s - loss: 0.0135 - acc: 0.9956 - val_loss: 0.0840 - val_acc: 0.9783
Epoch 14/30
937/937 [==============================] - 12s - loss: 0.0114 - acc: 0.9966 - val_loss: 0.0795 - val_acc: 0.9796
Epoch 15/30
937/937 [==============================] - 12s - loss: 0.0116 - acc: 0.9968 - val_loss: 0.0785 - val_acc: 0.9798
Epoch 16/30
937/937 [==============================] - 12s - loss: 0.0122 - acc: 0.9962 - val_loss: 0.0831 - val_acc: 0.9790
Epoch 17/30
937/937 [==============================] - 12s - loss: 0.0103 - acc: 0.9969 - val_loss: 0.0825 - val_acc: 0.9789
Epoch 18/30
937/937 [==============================] - 12s - loss: 0.0093 - acc: 0.9972 - val_loss: 0.0837 - val_acc: 0.9792
Epoch 19/30
937/937 [==============================] - 12s - loss: 0.0114 - acc: 0.9966 - val_loss: 0.0888 - val_acc: 0.9792
Epoch 20/30
937/937 [==============================] - 12s - loss: 0.0109 - acc: 0.9969 - val_loss: 0.0798 - val_acc: 0.9798
Epoch 21/30
937/937 [==============================] - 12s - loss: 0.0089 - acc: 0.9973 - val_loss: 0.0828 - val_acc: 0.9799
Epoch 22/30
937/937 [==============================] - 12s - loss: 0.0100 - acc: 0.9972 - val_loss: 0.0846 - val_acc: 0.9799
Epoch 23/30
937/937 [==============================] - 11s - loss: 0.0096 - acc: 0.9971 - val_loss: 0.0875 - val_acc: 0.9793
Epoch 24/30
937/937 [==============================] - 12s - loss: 0.0087 - acc: 0.9975 - val_loss: 0.0889 - val_acc: 0.9788
Epoch 25/30
937/937 [==============================] - 12s - loss: 0.0100 - acc: 0.9969 - val_loss: 0.0806 - val_acc: 0.9807
Epoch 26/30
937/937 [==============================] - 12s - loss: 0.0088 - acc: 0.9973 - val_loss: 0.0853 - val_acc: 0.9795
Epoch 27/30
937/937 [==============================] - 12s - loss: 0.0090 - acc: 0.9973 - val_loss: 0.0809 - val_acc: 0.9798
Epoch 28/30
937/937 [==============================] - 12s - loss: 0.0088 - acc: 0.9974 - val_loss: 0.0853 - val_acc: 0.9793
Epoch 29/30
937/937 [==============================] - 11s - loss: 0.0094 - acc: 0.9970 - val_loss: 0.0833 - val_acc: 0.9796
Epoch 30/30
937/937 [==============================] - 12s - loss: 0.0083 - acc: 0.9976 - val_loss: 0.0811 - val_acc: 0.9802

In [15]:
# Plot history
plt.plot(history_aug.history['acc'], label='acc')
plt.plot(history_aug.history['val_acc'], label='val acc')
plt.legend(loc='lower right')
plt.show()



In [ ]:

Improve data augmentation

- Create a generator function adding new transformations

In [16]:
print('Model 3. The same Lenet architecture')

#Inputs
images = layers.Input(batch_shape=(None, 28, 28, 1), dtype='float32', name='Images')

#First convolutional layer
conv1 = layers.Conv2D(20, (5,5))(images)
pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)

#Second convolutional layer
conv2 = layers.Conv2D(20, (5,5))(pool1)
pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)

#Flatten convolution output
flat = layers.Flatten(name='Flat_image')(pool2)

# First dense layer
dense1 = layers.Dense(500, activation='relu', name='Dense_1')(flat)

# Second dense layer
output = layers.Dense(10, activation='softmax', name='Dense_output')(dense1)


Model 3. The same Lenet architecture

In [17]:
# Model Architecture defined
model_aug2 = models.Model(inputs=images, outputs=output)
model_aug2.summary()


# Select optimizer and compile model
sgd_optimizer = optimizers.SGD(lr=0.01, momentum=0.99, decay=0.005, nesterov=True)
model_aug2.compile(loss='sparse_categorical_crossentropy', 
                   optimizer=sgd_optimizer, metrics=['accuracy'])


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
Images (InputLayer)          (None, 28, 28, 1)         0         
_________________________________________________________________
conv2d_5 (Conv2D)            (None, 24, 24, 20)        520       
_________________________________________________________________
max_pooling2d_5 (MaxPooling2 (None, 12, 12, 20)        0         
_________________________________________________________________
conv2d_6 (Conv2D)            (None, 8, 8, 20)          10020     
_________________________________________________________________
max_pooling2d_6 (MaxPooling2 (None, 4, 4, 20)          0         
_________________________________________________________________
Flat_image (Flatten)         (None, 320)               0         
_________________________________________________________________
Dense_1 (Dense)              (None, 500)               160500    
_________________________________________________________________
Dense_output (Dense)         (None, 10)                5010      
=================================================================
Total params: 176,050
Trainable params: 176,050
Non-trainable params: 0
_________________________________________________________________

In [18]:
# Augmentation for training
train_datagen = preprocessing.image.ImageDataGenerator(
    rotation_range=0.25,
    width_shift_range=0.2,
    height_shift_range=0.12,
    shear_range=0.3,
    zoom_range=[0.9,1.3])

In [19]:
def img_augmented(img):
    import random
    import cv2
    #Erode - dilate
    if random.random() < 0.7:
        img = cv2.erode(img, np.ones(2, np.uint8), iterations=1)
    elif random.random() < 0.7:
        img = cv2.dilate(img, np.ones(2, np.uint8), iterations=1)
    return img


def data_generator(x, y, batch_size=32):
    for x_batch, y_batch in train_datagen.flow(x, y, batch_size=batch_size):
        x_batch_transform=[]
        for img in x_batch:
            x_batch_transform += [np.reshape(img_augmented(img),(28,28,1))]
        yield np.array(x_batch_transform), y_batch

In [20]:
#Visualize data augmentation
dg = data_generator(X_train_sample, y_train_sample, batch_size=32)
x_sample, y_sample = next(dg)


#Examine the data
fig = plt.figure()
for n, i in enumerate(range(32)): 
    a = fig.add_subplot(4,8,n+1)
    a.set_title(str(y_sample[i]))
    fig.tight_layout()
    plt.imshow(np.reshape(x_sample[i],(28,28)))


---------------------------------------------------------------------------
ImportError                               Traceback (most recent call last)
<ipython-input-20-45a1630677bd> in <module>()
      1 #Visualize data augmentation
      2 dg = data_generator(X_train_sample, y_train_sample, batch_size=32)
----> 3 x_sample, y_sample = next(dg)
      4 
      5 

<ipython-input-19-29e6f04a3eb3> in data_generator(x, y, batch_size)
     14         x_batch_transform=[]
     15         for img in x_batch:
---> 16             x_batch_transform += [np.reshape(img_augmented(img),(28,28,1))]
     17         yield np.array(x_batch_transform), y_batch

<ipython-input-19-29e6f04a3eb3> in img_augmented(img)
      1 def img_augmented(img):
      2     import random
----> 3     import cv2
      4     #Erode - dilate
      5     if random.random() < 0.7:

ImportError: No module named 'cv2'

In [ ]:
batch_size = 64
epochs = 30
tb_callback_ln = callbacks.TensorBoard(log_dir='/tmp/tensorboard/mnist/conv_aug2')
history_aug2 = model_aug2.fit_generator(data_generator(X_train_sample, y_train_sample, batch_size=batch_size),
                                       steps_per_epoch=len(X_train) // batch_size,
                                       epochs=epochs,
                                       validation_data=(X_test, y_test),
                                       callbacks=[tb_callback_ln])

In [ ]:
# Compare test
plt.plot(history_aug.history['val_acc'] , label='val acc aug1')
plt.plot(history_aug2.history['val_acc'], label='val acc aug2')
plt.legend(loc='lower right')
plt.show()

In [ ]: