Data augmentation example

  • Use a little sample of MNIST to generate a model with good performance.

In [1]:
from __future__ import print_function

#Basic libraries
import numpy as np
import tensorflow as tf
print('Tensorflow version: ', tf.__version__)
import time

#Show images
import matplotlib.pyplot as plt
%matplotlib inline
# plt configuration
plt.rcParams['figure.figsize'] = (10, 10)        # size of images
plt.rcParams['image.interpolation'] = 'nearest'  # show exact image
plt.rcParams['image.cmap'] = 'gray'  # use grayscale 

# Select GPU
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"


Tensorflow version:  1.4.0

In [2]:
# Import mnist dataset and rescale between [0,1]
from tensorflow.contrib.keras import datasets

(X_train, y_train), (X_test, y_test) = datasets.mnist.load_data(path='mnist.npz')

X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255.
X_test /= 255.

print('X train shape: ',X_train.shape)
print('y train shape: ',y_train.shape)
print('X test shape: ', X_test.shape)
print('y test shape: ', y_test.shape)


X train shape:  (60000, 28, 28)
y train shape:  (60000,)
X test shape:  (10000, 28, 28)
y test shape:  (10000,)

In [3]:
# Reshape (the input format is vector)
X_train = np.reshape(X_train, (len(X_train),28,28,1))
X_test  = np.reshape(X_test,  (len(X_test) ,28,28,1))
print('X train shape: ',X_train.shape)
print('X test shape: ', X_test.shape)


X train shape:  (60000, 28, 28, 1)
X test shape:  (10000, 28, 28, 1)

In [4]:
#Select a random subsample of the train data for each class
def create_sample(x, y,  sample_size=100):

    class_list = np.unique(y_train)

    sample_ix = []
    for i in class_list:
        class_ix = np.where(y==i)[0]
        class_ix_sample  = np.random.choice(class_ix, sample_size)
        sample_ix += list(class_ix_sample)
    
    #Shuffle sample
    sample_ix = np.random.permutation(sample_ix)

    return x[sample_ix], y[sample_ix]

#Create new sample datasets
X_train_sample, y_train_sample = create_sample(X_train, y_train, sample_size=100)
print('X train sample shape: ',X_train_sample.shape)
print('y train sample shape: ',y_train_sample.shape)


X train sample shape:  (1000, 28, 28, 1)
y train sample shape:  (1000,)

In [5]:
#Examine the data
fig = plt.figure()
for n, i in enumerate(range(32)): 
    a = fig.add_subplot(4,8,n+1)
    a.set_title(str(y_train_sample[i]))
    fig.tight_layout()
    plt.imshow(np.reshape(X_train_sample[i],(28,28)))



In [6]:
#Define the architecture
from tensorflow.contrib.keras import models, layers, optimizers, callbacks, preprocessing


print('Creating Lenet architecture.')

#Inputs
images = layers.Input(batch_shape=(None, 28, 28, 1), dtype='float32', name='Images')

#First convolutional layer
conv1 = layers.Conv2D(20, (5,5))(images)
pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)

#Second convolutional layer
conv2 = layers.Conv2D(20, (5,5))(pool1)
pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)

#Flatten convolution output
flat = layers.Flatten(name='Flat_image')(pool2)

# First dense layer
dense1 = layers.Dense(500, activation='relu', name='Dense_1')(flat)

# Second dense layer
output = layers.Dense(10, activation='softmax', name='Dense_output')(dense1)


Creating Lenet architecture.

In [7]:
# Configure input and output data for the Model
model_conv = models.Model(inputs=images, outputs=output)
model_conv.summary()


# Select optimizer and compile model
sgd_optimizer = optimizers.SGD(lr=0.01, momentum=0.99, decay=0.005, nesterov=True)
model_conv.compile(loss='sparse_categorical_crossentropy', 
                   optimizer=sgd_optimizer, metrics=['accuracy'])


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
Images (InputLayer)          (None, 28, 28, 1)         0         
_________________________________________________________________
conv2d_1 (Conv2D)            (None, 24, 24, 20)        520       
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 12, 12, 20)        0         
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 8, 8, 20)          10020     
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 4, 4, 20)          0         
_________________________________________________________________
Flat_image (Flatten)         (None, 320)               0         
_________________________________________________________________
Dense_1 (Dense)              (None, 500)               160500    
_________________________________________________________________
Dense_output (Dense)         (None, 10)                5010      
=================================================================
Total params: 176,050
Trainable params: 176,050
Non-trainable params: 0
_________________________________________________________________

In [8]:
#Fit the model
batch_size = 64
epochs = 30

# Tensorflow callback
tb_callback_ln = callbacks.TensorBoard(log_dir='/tmp/tensorboard/mnist/conv_aug1')

history_conv = model_conv.fit(X_train_sample, y_train_sample, batch_size=batch_size, epochs=epochs,
                              validation_data=(X_test, y_test), callbacks=[tb_callback_ln])


Train on 1000 samples, validate on 10000 samples
Epoch 1/30
1000/1000 [==============================] - 1s - loss: 2.2182 - acc: 0.3200 - val_loss: 1.9625 - val_acc: 0.6616
Epoch 2/30
1000/1000 [==============================] - 0s - loss: 1.2014 - acc: 0.7290 - val_loss: 0.6855 - val_acc: 0.7749
Epoch 3/30
1000/1000 [==============================] - 0s - loss: 0.6383 - acc: 0.8420 - val_loss: 0.8387 - val_acc: 0.8664
Epoch 4/30
1000/1000 [==============================] - 0s - loss: 0.6607 - acc: 0.8750 - val_loss: 0.7245 - val_acc: 0.8679
Epoch 5/30
1000/1000 [==============================] - 0s - loss: 0.3535 - acc: 0.9160 - val_loss: 0.7090 - val_acc: 0.8940
Epoch 6/30
1000/1000 [==============================] - 0s - loss: 0.2502 - acc: 0.9630 - val_loss: 0.7528 - val_acc: 0.9038
Epoch 7/30
1000/1000 [==============================] - 0s - loss: 0.2081 - acc: 0.9630 - val_loss: 0.8520 - val_acc: 0.9031
Epoch 8/30
1000/1000 [==============================] - 0s - loss: 0.1445 - acc: 0.9750 - val_loss: 0.9393 - val_acc: 0.9021
Epoch 9/30
1000/1000 [==============================] - 0s - loss: 0.2080 - acc: 0.9700 - val_loss: 1.0464 - val_acc: 0.9046
Epoch 10/30
1000/1000 [==============================] - 0s - loss: 0.1390 - acc: 0.9790 - val_loss: 1.0701 - val_acc: 0.9126
Epoch 11/30
1000/1000 [==============================] - 0s - loss: 0.1869 - acc: 0.9800 - val_loss: 1.1849 - val_acc: 0.9082
Epoch 12/30
1000/1000 [==============================] - 0s - loss: 0.6726 - acc: 0.9450 - val_loss: 1.7539 - val_acc: 0.8709
Epoch 13/30
1000/1000 [==============================] - 0s - loss: 0.5248 - acc: 0.9540 - val_loss: 1.6876 - val_acc: 0.8773
Epoch 14/30
1000/1000 [==============================] - 0s - loss: 0.7724 - acc: 0.9420 - val_loss: 1.7247 - val_acc: 0.8819
Epoch 15/30
1000/1000 [==============================] - 0s - loss: 1.0070 - acc: 0.9300 - val_loss: 2.6703 - val_acc: 0.8222
Epoch 16/30
1000/1000 [==============================] - 0s - loss: 1.1011 - acc: 0.9270 - val_loss: 2.0280 - val_acc: 0.8667
Epoch 17/30
1000/1000 [==============================] - 0s - loss: 1.7937 - acc: 0.8800 - val_loss: 1.9223 - val_acc: 0.8749
Epoch 18/30
1000/1000 [==============================] - 0s - loss: 2.4079 - acc: 0.8450 - val_loss: 2.5498 - val_acc: 0.8381
Epoch 19/30
1000/1000 [==============================] - 0s - loss: 3.0997 - acc: 0.8040 - val_loss: 5.5598 - val_acc: 0.6505
Epoch 20/30
1000/1000 [==============================] - 0s - loss: 4.3792 - acc: 0.7240 - val_loss: 4.9476 - val_acc: 0.6914
Epoch 21/30
1000/1000 [==============================] - 0s - loss: 6.5267 - acc: 0.5930 - val_loss: 6.3958 - val_acc: 0.6019
Epoch 22/30
1000/1000 [==============================] - 0s - loss: 5.9234 - acc: 0.6320 - val_loss: 6.5067 - val_acc: 0.5958
Epoch 23/30
1000/1000 [==============================] - 0s - loss: 6.7535 - acc: 0.5810 - val_loss: 7.5655 - val_acc: 0.5306
Epoch 24/30
1000/1000 [==============================] - 0s - loss: 7.7850 - acc: 0.5170 - val_loss: 8.6161 - val_acc: 0.4653
Epoch 25/30
1000/1000 [==============================] - 0s - loss: 9.5580 - acc: 0.4070 - val_loss: 10.8987 - val_acc: 0.3238
Epoch 26/30
1000/1000 [==============================] - 0s - loss: 11.3955 - acc: 0.2930 - val_loss: 12.3173 - val_acc: 0.2357
Epoch 27/30
1000/1000 [==============================] - 0s - loss: 12.5560 - acc: 0.2210 - val_loss: 13.1423 - val_acc: 0.1845
Epoch 28/30
1000/1000 [==============================] - 0s - loss: 13.0879 - acc: 0.1880 - val_loss: 13.6369 - val_acc: 0.1539
Epoch 29/30
1000/1000 [==============================] - 0s - loss: 13.6843 - acc: 0.1510 - val_loss: 13.9309 - val_acc: 0.1357
Epoch 30/30
1000/1000 [==============================] - 0s - loss: 14.0066 - acc: 0.1310 - val_loss: 14.1130 - val_acc: 0.1244

In [9]:
plt.plot(history_conv.history['acc'], label='acc')
plt.plot(history_conv.history['val_acc'], label='val acc')
plt.legend(loc='lower right')
plt.show()



In [ ]:

Data augmentation model


In [10]:
print('Second model. The same Lenet architecture')

#Inputs
images = layers.Input(batch_shape=(None, 28, 28, 1), dtype='float32', name='Images')

#First convolutional layer
conv1 = layers.Conv2D(20, (5,5))(images)
pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)

#Second convolutional layer
conv2 = layers.Conv2D(20, (5,5))(pool1)
pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)

#Flatten convolution output
flat = layers.Flatten(name='Flat_image')(pool2)

# First dense layer
dense1 = layers.Dense(500, activation='relu', name='Dense_1')(flat)

# Second dense layer
output = layers.Dense(10, activation='softmax', name='Dense_output')(dense1)


Second model. The same Lenet architecture

In [11]:
# Define inputs - outputs
model_aug = models.Model(inputs=images, outputs=output)
model_aug.summary()


# Select optimizer and compile model
sgd_optimizer = optimizers.SGD(lr=0.01, momentum=0.99, decay=0.005, nesterov=True)
model_aug.compile(loss='sparse_categorical_crossentropy', 
                   optimizer=sgd_optimizer, metrics=['accuracy'])


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
Images (InputLayer)          (None, 28, 28, 1)         0         
_________________________________________________________________
conv2d_3 (Conv2D)            (None, 24, 24, 20)        520       
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 (None, 12, 12, 20)        0         
_________________________________________________________________
conv2d_4 (Conv2D)            (None, 8, 8, 20)          10020     
_________________________________________________________________
max_pooling2d_4 (MaxPooling2 (None, 4, 4, 20)          0         
_________________________________________________________________
Flat_image (Flatten)         (None, 320)               0         
_________________________________________________________________
Dense_1 (Dense)              (None, 500)               160500    
_________________________________________________________________
Dense_output (Dense)         (None, 10)                5010      
=================================================================
Total params: 176,050
Trainable params: 176,050
Non-trainable params: 0
_________________________________________________________________

In [12]:
# Augmentation for training
train_datagen = preprocessing.image.ImageDataGenerator(
    #rotation_range=0.25,
    width_shift_range=0.2,
    height_shift_range=0.12,
    shear_range=0.3,
    zoom_range=[0.9,1.3])

In [13]:
#Visualize the ugmented images
fig = plt.figure()
for n, i in enumerate(range(32)): 
    a = fig.add_subplot(4,8,n+1)
    a.set_title(str(y_train_sample[i]))
    fig.tight_layout()
    plt.imshow(np.reshape(train_datagen.random_transform(X_train_sample[i]),(28,28)))



In [14]:
batch_size = 64
epochs = 30
tb_callback_ln = callbacks.TensorBoard(log_dir='/tmp/tensorboard/mnist/conv_aug2')
history_aug = model_aug.fit_generator(train_datagen.flow(X_train_sample, y_train_sample, batch_size=batch_size),
                                      steps_per_epoch=len(X_train) // batch_size,
                                      epochs=epochs,
                                      validation_data=(X_test, y_test),
                                      callbacks=[tb_callback_ln])


Epoch 1/30
937/937 [==============================] - 11s - loss: 0.4568 - acc: 0.8559 - val_loss: 0.1398 - val_acc: 0.9671
Epoch 2/30
937/937 [==============================] - 11s - loss: 0.0948 - acc: 0.9712 - val_loss: 0.1246 - val_acc: 0.9732
Epoch 3/30
937/937 [==============================] - 11s - loss: 0.0514 - acc: 0.9846 - val_loss: 0.1164 - val_acc: 0.9725
Epoch 4/30
937/937 [==============================] - 11s - loss: 0.0301 - acc: 0.9906 - val_loss: 0.1139 - val_acc: 0.9745
Epoch 5/30
937/937 [==============================] - 11s - loss: 0.0224 - acc: 0.9936 - val_loss: 0.1075 - val_acc: 0.9751
Epoch 6/30
937/937 [==============================] - 11s - loss: 0.0207 - acc: 0.9937 - val_loss: 0.1091 - val_acc: 0.9764
Epoch 7/30
937/937 [==============================] - 12s - loss: 0.0172 - acc: 0.9952 - val_loss: 0.1035 - val_acc: 0.9768
Epoch 8/30
937/937 [==============================] - 11s - loss: 0.0159 - acc: 0.9953 - val_loss: 0.1038 - val_acc: 0.9774
Epoch 9/30
937/937 [==============================] - 11s - loss: 0.0134 - acc: 0.9957 - val_loss: 0.1025 - val_acc: 0.9772
Epoch 10/30
937/937 [==============================] - 11s - loss: 0.0124 - acc: 0.9962 - val_loss: 0.0956 - val_acc: 0.9784
Epoch 11/30
937/937 [==============================] - 11s - loss: 0.0129 - acc: 0.9962 - val_loss: 0.1089 - val_acc: 0.9752
Epoch 12/30
937/937 [==============================] - 12s - loss: 0.0120 - acc: 0.9962 - val_loss: 0.1000 - val_acc: 0.9782
Epoch 13/30
937/937 [==============================] - 11s - loss: 0.0107 - acc: 0.9967 - val_loss: 0.1035 - val_acc: 0.9770
Epoch 14/30
937/937 [==============================] - 12s - loss: 0.0121 - acc: 0.9966 - val_loss: 0.0984 - val_acc: 0.9774
Epoch 15/30
937/937 [==============================] - 11s - loss: 0.0100 - acc: 0.9970 - val_loss: 0.0982 - val_acc: 0.9789
Epoch 16/30
937/937 [==============================] - 11s - loss: 0.0111 - acc: 0.9968 - val_loss: 0.0986 - val_acc: 0.9782
Epoch 17/30
937/937 [==============================] - 11s - loss: 0.0103 - acc: 0.9969 - val_loss: 0.1076 - val_acc: 0.9769
Epoch 18/30
937/937 [==============================] - 12s - loss: 0.0100 - acc: 0.9971 - val_loss: 0.0983 - val_acc: 0.9778
Epoch 19/30
937/937 [==============================] - 11s - loss: 0.0101 - acc: 0.9970 - val_loss: 0.1008 - val_acc: 0.9780
Epoch 20/30
937/937 [==============================] - 11s - loss: 0.0100 - acc: 0.9966 - val_loss: 0.1043 - val_acc: 0.9767
Epoch 21/30
937/937 [==============================] - 12s - loss: 0.0084 - acc: 0.9973 - val_loss: 0.0996 - val_acc: 0.9782
Epoch 22/30
937/937 [==============================] - 11s - loss: 0.0084 - acc: 0.9976 - val_loss: 0.1007 - val_acc: 0.9774
Epoch 23/30
937/937 [==============================] - 11s - loss: 0.0092 - acc: 0.9973 - val_loss: 0.1005 - val_acc: 0.9775
Epoch 24/30
937/937 [==============================] - 11s - loss: 0.0080 - acc: 0.9974 - val_loss: 0.1061 - val_acc: 0.9773
Epoch 25/30
937/937 [==============================] - 11s - loss: 0.0090 - acc: 0.9974 - val_loss: 0.1016 - val_acc: 0.9778
Epoch 26/30
937/937 [==============================] - 11s - loss: 0.0081 - acc: 0.9976 - val_loss: 0.1001 - val_acc: 0.9775
Epoch 27/30
937/937 [==============================] - 11s - loss: 0.0071 - acc: 0.9981 - val_loss: 0.1019 - val_acc: 0.9782
Epoch 28/30
937/937 [==============================] - 11s - loss: 0.0075 - acc: 0.9978 - val_loss: 0.0998 - val_acc: 0.9779
Epoch 29/30
937/937 [==============================] - 12s - loss: 0.0085 - acc: 0.9976 - val_loss: 0.1007 - val_acc: 0.9774
Epoch 30/30
937/937 [==============================] - 11s - loss: 0.0072 - acc: 0.9978 - val_loss: 0.1040 - val_acc: 0.9773

In [15]:
# Plot history
plt.plot(history_aug.history['acc'], label='acc')
plt.plot(history_aug.history['val_acc'], label='val acc')
plt.legend(loc='lower right')
plt.show()



In [ ]:

Improve data augmentation

- Create a generator function adding new transformations

In [16]:
print('Model 3. The same Lenet architecture')

#Inputs
images = layers.Input(batch_shape=(None, 28, 28, 1), dtype='float32', name='Images')

#First convolutional layer
conv1 = layers.Conv2D(20, (5,5))(images)
pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)

#Second convolutional layer
conv2 = layers.Conv2D(20, (5,5))(pool1)
pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)

#Flatten convolution output
flat = layers.Flatten(name='Flat_image')(pool2)

# First dense layer
dense1 = layers.Dense(500, activation='relu', name='Dense_1')(flat)

# Second dense layer
output = layers.Dense(10, activation='softmax', name='Dense_output')(dense1)


Model 3. The same Lenet architecture

In [17]:
# Model Architecture defined
model_aug2 = models.Model(inputs=images, outputs=output)
model_aug2.summary()


# Select optimizer and compile model
sgd_optimizer = optimizers.SGD(lr=0.01, momentum=0.99, decay=0.005, nesterov=True)
model_aug2.compile(loss='sparse_categorical_crossentropy', 
                   optimizer=sgd_optimizer, metrics=['accuracy'])


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
Images (InputLayer)          (None, 28, 28, 1)         0         
_________________________________________________________________
conv2d_5 (Conv2D)            (None, 24, 24, 20)        520       
_________________________________________________________________
max_pooling2d_5 (MaxPooling2 (None, 12, 12, 20)        0         
_________________________________________________________________
conv2d_6 (Conv2D)            (None, 8, 8, 20)          10020     
_________________________________________________________________
max_pooling2d_6 (MaxPooling2 (None, 4, 4, 20)          0         
_________________________________________________________________
Flat_image (Flatten)         (None, 320)               0         
_________________________________________________________________
Dense_1 (Dense)              (None, 500)               160500    
_________________________________________________________________
Dense_output (Dense)         (None, 10)                5010      
=================================================================
Total params: 176,050
Trainable params: 176,050
Non-trainable params: 0
_________________________________________________________________

In [18]:
# Augmentation for training
train_datagen = preprocessing.image.ImageDataGenerator(
    rotation_range=0.25,
    width_shift_range=0.2,
    height_shift_range=0.12,
    shear_range=0.3,
    zoom_range=[0.9,1.3])

In [19]:
def img_augmented(img):
    import random
    import cv2
    #Erode - dilate
    if random.random() < 0.7:
        img = cv2.erode(img, np.ones(2, np.uint8), iterations=1)
    elif random.random() < 0.7:
        img = cv2.dilate(img, np.ones(2, np.uint8), iterations=1)
    return img


def data_generator(x, y, batch_size=32):
    for x_batch, y_batch in train_datagen.flow(x, y, batch_size=batch_size):
        x_batch_transform=[]
        for img in x_batch:
            x_batch_transform += [np.reshape(img_augmented(img),(28,28,1))]
        yield np.array(x_batch_transform), y_batch

In [20]:
#Visualize data augmentation
dg = data_generator(X_train_sample, y_train_sample, batch_size=32)
x_sample, y_sample = next(dg)


#Examine the data
fig = plt.figure()
for n, i in enumerate(range(32)): 
    a = fig.add_subplot(4,8,n+1)
    a.set_title(str(y_sample[i]))
    fig.tight_layout()
    plt.imshow(np.reshape(x_sample[i],(28,28)))



In [21]:
batch_size = 64
epochs = 30
tb_callback_ln = callbacks.TensorBoard(log_dir='/tmp/tensorboard/mnist/conv_aug2')
history_aug2 = model_aug2.fit_generator(data_generator(X_train_sample, y_train_sample, batch_size=batch_size),
                                       steps_per_epoch=len(X_train) // batch_size,
                                       epochs=epochs,
                                       validation_data=(X_test, y_test),
                                       callbacks=[tb_callback_ln])


Epoch 1/30
937/937 [==============================] - 14s - loss: 0.5013 - acc: 0.8374 - val_loss: 0.1262 - val_acc: 0.9629
Epoch 2/30
937/937 [==============================] - 13s - loss: 0.1112 - acc: 0.9646 - val_loss: 0.1052 - val_acc: 0.9727
Epoch 3/30
937/937 [==============================] - 13s - loss: 0.0636 - acc: 0.9797 - val_loss: 0.0885 - val_acc: 0.9771
Epoch 4/30
937/937 [==============================] - 13s - loss: 0.0504 - acc: 0.9837 - val_loss: 0.0846 - val_acc: 0.9792
Epoch 5/30
937/937 [==============================] - 13s - loss: 0.0425 - acc: 0.9860 - val_loss: 0.0853 - val_acc: 0.9783
Epoch 6/30
937/937 [==============================] - 13s - loss: 0.0352 - acc: 0.9889 - val_loss: 0.0861 - val_acc: 0.9797
Epoch 7/30
937/937 [==============================] - 13s - loss: 0.0316 - acc: 0.9895 - val_loss: 0.0850 - val_acc: 0.9807
Epoch 8/30
937/937 [==============================] - 13s - loss: 0.0312 - acc: 0.9902 - val_loss: 0.0872 - val_acc: 0.9799
Epoch 9/30
937/937 [==============================] - 13s - loss: 0.0258 - acc: 0.9920 - val_loss: 0.0850 - val_acc: 0.9809
Epoch 10/30
937/937 [==============================] - 13s - loss: 0.0274 - acc: 0.9914 - val_loss: 0.0846 - val_acc: 0.9807
Epoch 11/30
937/937 [==============================] - 13s - loss: 0.0248 - acc: 0.9921 - val_loss: 0.0864 - val_acc: 0.9805
Epoch 12/30
937/937 [==============================] - 13s - loss: 0.0231 - acc: 0.9925 - val_loss: 0.0820 - val_acc: 0.9819
Epoch 13/30
937/937 [==============================] - 13s - loss: 0.0240 - acc: 0.9925 - val_loss: 0.0864 - val_acc: 0.9805
Epoch 14/30
937/937 [==============================] - 13s - loss: 0.0206 - acc: 0.9934 - val_loss: 0.0842 - val_acc: 0.9821
Epoch 15/30
937/937 [==============================] - 13s - loss: 0.0213 - acc: 0.9936 - val_loss: 0.0845 - val_acc: 0.9821
Epoch 16/30
937/937 [==============================] - 13s - loss: 0.0220 - acc: 0.9930 - val_loss: 0.0817 - val_acc: 0.9822
Epoch 17/30
937/937 [==============================] - 13s - loss: 0.0187 - acc: 0.9940 - val_loss: 0.0849 - val_acc: 0.9820
Epoch 18/30
937/937 [==============================] - 13s - loss: 0.0184 - acc: 0.9944 - val_loss: 0.0866 - val_acc: 0.9818
Epoch 19/30
937/937 [==============================] - 13s - loss: 0.0201 - acc: 0.9933 - val_loss: 0.0900 - val_acc: 0.9801
Epoch 20/30
937/937 [==============================] - 13s - loss: 0.0172 - acc: 0.9945 - val_loss: 0.0858 - val_acc: 0.9818
Epoch 21/30
937/937 [==============================] - 13s - loss: 0.0188 - acc: 0.9943 - val_loss: 0.0896 - val_acc: 0.9807
Epoch 22/30
937/937 [==============================] - 13s - loss: 0.0182 - acc: 0.9944 - val_loss: 0.0849 - val_acc: 0.9813
Epoch 23/30
937/937 [==============================] - 14s - loss: 0.0187 - acc: 0.9938 - val_loss: 0.0828 - val_acc: 0.9816
Epoch 24/30
937/937 [==============================] - 13s - loss: 0.0182 - acc: 0.9940 - val_loss: 0.0889 - val_acc: 0.9796
Epoch 25/30
937/937 [==============================] - 14s - loss: 0.0171 - acc: 0.9944 - val_loss: 0.0850 - val_acc: 0.9812
Epoch 26/30
937/937 [==============================] - 13s - loss: 0.0176 - acc: 0.9946 - val_loss: 0.0864 - val_acc: 0.9803
Epoch 27/30
937/937 [==============================] - 13s - loss: 0.0173 - acc: 0.9943 - val_loss: 0.0863 - val_acc: 0.9811
Epoch 28/30
937/937 [==============================] - 13s - loss: 0.0167 - acc: 0.9947 - val_loss: 0.0862 - val_acc: 0.9808
Epoch 29/30
937/937 [==============================] - 13s - loss: 0.0166 - acc: 0.9950 - val_loss: 0.0845 - val_acc: 0.9817
Epoch 30/30
937/937 [==============================] - 13s - loss: 0.0167 - acc: 0.9947 - val_loss: 0.0834 - val_acc: 0.9819

In [22]:
# Compare test
plt.plot(history_aug.history['val_acc'] , label='val acc aug1')
plt.plot(history_aug2.history['val_acc'], label='val acc aug2')
plt.legend(loc='lower right')
plt.show()



In [ ]: