In [2]:
from __future__ import print_function
import tensorflow as tf
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dropout, Activation, Conv2D, GlobalAveragePooling2D, merge
from keras.utils import np_utils
from keras.optimizers import SGD
from keras import backend as K
from keras.models import Model
from keras.layers.core import Lambda
from keras.callbacks import ModelCheckpoint
import pandas
import cv2
import numpy as np
import matplotlib.pyplot as plt
import sys


Using TensorFlow backend.

In [3]:
K.set_image_dim_ordering('tf')

batch_size = 32
nb_classes = 10
nb_epoch = 375
rows, cols = 32, 32
channels = 3

In [4]:
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
(img_train, lbl_train), (img_test, lbl_test) = cifar10.load_data()
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')

print (X_train.shape[1:])

Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)


X_train shape: (50000, 32, 32, 3)
50000 train samples
10000 test samples
(32, 32, 3)

In [5]:
categories = ['airplane', 'automobile', 'bird', 'cat', 'deer','dog', 'frog', 'horse', 'ship', 'truck']
imgid = 2
image = img_train[imgid]
image = image.astype('float32')
image /= 255

%matplotlib inline
imgplot = plt.imshow(image)

categoryid = lbl_train[imgid]

print(categories)
print("categoryid :"+str(categoryid))
print("category : "+str(categories[categoryid[0]]))


['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
categoryid :[9]
category : truck

In [6]:
model = Sequential()

model.add(Conv2D(96, (3, 3), padding='same', input_shape=(32, 32, 3)))
model.add(Activation('relu'))
model.add(Conv2D(96, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(96, (3, 3), padding='same', strides=(2,2)))
model.add(Dropout(0.5))

model.add(Conv2D(192, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(192, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(192, (3, 3), padding='same', strides=(2,2)))
model.add(Dropout(0.5))

model.add(Conv2D(192, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(192, (1, 1), padding='valid'))
model.add(Activation('relu'))
model.add(Conv2D(10, (1, 1), padding='valid'))

model.add(GlobalAveragePooling2D())
model.add(Activation('softmax'))

sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)

In [7]:
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
print (model.summary())


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d_1 (Conv2D)            (None, 32, 32, 96)        2688      
_________________________________________________________________
activation_1 (Activation)    (None, 32, 32, 96)        0         
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 32, 32, 96)        83040     
_________________________________________________________________
activation_2 (Activation)    (None, 32, 32, 96)        0         
_________________________________________________________________
conv2d_3 (Conv2D)            (None, 16, 16, 96)        83040     
_________________________________________________________________
dropout_1 (Dropout)          (None, 16, 16, 96)        0         
_________________________________________________________________
conv2d_4 (Conv2D)            (None, 16, 16, 192)       166080    
_________________________________________________________________
activation_3 (Activation)    (None, 16, 16, 192)       0         
_________________________________________________________________
conv2d_5 (Conv2D)            (None, 16, 16, 192)       331968    
_________________________________________________________________
activation_4 (Activation)    (None, 16, 16, 192)       0         
_________________________________________________________________
conv2d_6 (Conv2D)            (None, 8, 8, 192)         331968    
_________________________________________________________________
dropout_2 (Dropout)          (None, 8, 8, 192)         0         
_________________________________________________________________
conv2d_7 (Conv2D)            (None, 8, 8, 192)         331968    
_________________________________________________________________
activation_5 (Activation)    (None, 8, 8, 192)         0         
_________________________________________________________________
conv2d_8 (Conv2D)            (None, 8, 8, 192)         37056     
_________________________________________________________________
activation_6 (Activation)    (None, 8, 8, 192)         0         
_________________________________________________________________
conv2d_9 (Conv2D)            (None, 8, 8, 10)          1930      
_________________________________________________________________
global_average_pooling2d_1 ( (None, 10)                0         
_________________________________________________________________
activation_7 (Activation)    (None, 10)                0         
=================================================================
Total params: 1,369,738
Trainable params: 1,369,738
Non-trainable params: 0
_________________________________________________________________
None

In [ ]:
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255

datagen = ImageDataGenerator(
        featurewise_center=False,  # set input mean to 0 over the dataset
        samplewise_center=False,  # set each sample mean to 0
        featurewise_std_normalization=False,  # divide inputs by std of the dataset
        samplewise_std_normalization=False,  # divide each input by its std
        zca_whitening=False,  # apply ZCA whitening
        rotation_range=0,  # randomly rotate images in the range (degrees, 0 to 180)
        width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
        height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
        horizontal_flip=True,  # randomly flip images
        vertical_flip=False) 

datagen.fit(X_train)
filepath="weights.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='max')

callbacks_list = [checkpoint]
# Fit the model on the batches generated by datagen.flow().
history_callback = model.fit_generator(datagen.flow(X_train, Y_train,
                                    batch_size=batch_size),
                                    steps_per_epoch=2000,
                                    epochs=nb_epoch,
                                    validation_data=(X_test, Y_test),
                                    callbacks=callbacks_list, verbose=1)


Epoch 1/375
2000/2000 [==============================] - 70s 35ms/step - loss: 1.8727 - acc: 0.2911 - val_loss: 1.5216 - val_acc: 0.4299

Epoch 00001: val_acc improved from -inf to 0.42990, saving model to weights.hdf5
Epoch 2/375
2000/2000 [==============================] - 68s 34ms/step - loss: 1.4043 - acc: 0.4866 - val_loss: 1.1808 - val_acc: 0.5646

Epoch 00002: val_acc improved from 0.42990 to 0.56460, saving model to weights.hdf5
Epoch 3/375
2000/2000 [==============================] - 68s 34ms/step - loss: 1.1462 - acc: 0.5872 - val_loss: 1.0267 - val_acc: 0.6356

Epoch 00003: val_acc improved from 0.56460 to 0.63560, saving model to weights.hdf5
Epoch 4/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.9940 - acc: 0.6449 - val_loss: 0.8735 - val_acc: 0.6917

Epoch 00004: val_acc improved from 0.63560 to 0.69170, saving model to weights.hdf5
Epoch 5/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.8672 - acc: 0.6942 - val_loss: 0.8469 - val_acc: 0.7117

Epoch 00005: val_acc improved from 0.69170 to 0.71170, saving model to weights.hdf5
Epoch 6/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.7879 - acc: 0.7258 - val_loss: 0.7331 - val_acc: 0.7605

Epoch 00006: val_acc improved from 0.71170 to 0.76050, saving model to weights.hdf5
Epoch 7/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.7223 - acc: 0.7495 - val_loss: 0.7034 - val_acc: 0.7689

Epoch 00007: val_acc improved from 0.76050 to 0.76890, saving model to weights.hdf5
Epoch 8/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.6693 - acc: 0.7682 - val_loss: 0.6478 - val_acc: 0.7813

Epoch 00008: val_acc improved from 0.76890 to 0.78130, saving model to weights.hdf5
Epoch 9/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.6346 - acc: 0.7807 - val_loss: 0.6061 - val_acc: 0.8008

Epoch 00009: val_acc improved from 0.78130 to 0.80080, saving model to weights.hdf5
Epoch 10/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.6043 - acc: 0.7921 - val_loss: 0.6111 - val_acc: 0.7993

Epoch 00010: val_acc did not improve from 0.80080
Epoch 11/375
2000/2000 [==============================] - 82s 41ms/step - loss: 0.5784 - acc: 0.8011 - val_loss: 0.5849 - val_acc: 0.8169

Epoch 00011: val_acc improved from 0.80080 to 0.81690, saving model to weights.hdf5
Epoch 12/375
2000/2000 [==============================] - 71s 35ms/step - loss: 0.5561 - acc: 0.8075 - val_loss: 0.6136 - val_acc: 0.8032

Epoch 00012: val_acc did not improve from 0.81690
Epoch 13/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.5350 - acc: 0.8136 - val_loss: 0.5906 - val_acc: 0.8168

Epoch 00013: val_acc did not improve from 0.81690
Epoch 14/375
2000/2000 [==============================] - 77s 38ms/step - loss: 0.5133 - acc: 0.8223 - val_loss: 0.5551 - val_acc: 0.8269

Epoch 00014: val_acc improved from 0.81690 to 0.82690, saving model to weights.hdf5
Epoch 15/375
2000/2000 [==============================] - 74s 37ms/step - loss: 0.4976 - acc: 0.8273 - val_loss: 0.5596 - val_acc: 0.8293

Epoch 00015: val_acc improved from 0.82690 to 0.82930, saving model to weights.hdf5
Epoch 16/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.4901 - acc: 0.8305 - val_loss: 0.6239 - val_acc: 0.8117

Epoch 00016: val_acc did not improve from 0.82930
Epoch 17/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.4740 - acc: 0.8353 - val_loss: 0.5398 - val_acc: 0.8306

Epoch 00017: val_acc improved from 0.82930 to 0.83060, saving model to weights.hdf5
Epoch 18/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.4663 - acc: 0.8381 - val_loss: 0.5690 - val_acc: 0.8339

Epoch 00018: val_acc improved from 0.83060 to 0.83390, saving model to weights.hdf5
Epoch 19/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.4520 - acc: 0.8426 - val_loss: 0.4717 - val_acc: 0.8461

Epoch 00019: val_acc improved from 0.83390 to 0.84610, saving model to weights.hdf5
Epoch 20/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.4469 - acc: 0.8439 - val_loss: 0.5381 - val_acc: 0.8375

Epoch 00020: val_acc did not improve from 0.84610
Epoch 21/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.4342 - acc: 0.8493 - val_loss: 0.5363 - val_acc: 0.8398

Epoch 00021: val_acc did not improve from 0.84610
Epoch 22/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.4290 - acc: 0.8518 - val_loss: 0.4738 - val_acc: 0.8534

Epoch 00022: val_acc improved from 0.84610 to 0.85340, saving model to weights.hdf5
Epoch 23/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.4188 - acc: 0.8537 - val_loss: 0.4911 - val_acc: 0.8412

Epoch 00023: val_acc did not improve from 0.85340
Epoch 24/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.4135 - acc: 0.8554 - val_loss: 0.4790 - val_acc: 0.8549

Epoch 00024: val_acc improved from 0.85340 to 0.85490, saving model to weights.hdf5
Epoch 25/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.4073 - acc: 0.8586 - val_loss: 0.4882 - val_acc: 0.8528

Epoch 00025: val_acc did not improve from 0.85490
Epoch 26/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.4009 - acc: 0.8623 - val_loss: 0.4629 - val_acc: 0.8525

Epoch 00026: val_acc did not improve from 0.85490
Epoch 27/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3946 - acc: 0.8619 - val_loss: 0.5355 - val_acc: 0.8479

Epoch 00027: val_acc did not improve from 0.85490
Epoch 28/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3929 - acc: 0.8635 - val_loss: 0.5057 - val_acc: 0.8550

Epoch 00028: val_acc improved from 0.85490 to 0.85500, saving model to weights.hdf5
Epoch 29/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3810 - acc: 0.8668 - val_loss: 0.4609 - val_acc: 0.8595

Epoch 00029: val_acc improved from 0.85500 to 0.85950, saving model to weights.hdf5
Epoch 30/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3766 - acc: 0.8694 - val_loss: 0.4685 - val_acc: 0.8602

Epoch 00030: val_acc improved from 0.85950 to 0.86020, saving model to weights.hdf5
Epoch 31/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3799 - acc: 0.8688 - val_loss: 0.4841 - val_acc: 0.8552

Epoch 00031: val_acc did not improve from 0.86020
Epoch 32/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3708 - acc: 0.8706 - val_loss: 0.4956 - val_acc: 0.8528

Epoch 00032: val_acc did not improve from 0.86020
Epoch 33/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3670 - acc: 0.8718 - val_loss: 0.4963 - val_acc: 0.8520

Epoch 00033: val_acc did not improve from 0.86020
Epoch 34/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3618 - acc: 0.8733 - val_loss: 0.5200 - val_acc: 0.8512

Epoch 00034: val_acc did not improve from 0.86020
Epoch 35/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3597 - acc: 0.8740 - val_loss: 0.5218 - val_acc: 0.8483

Epoch 00035: val_acc did not improve from 0.86020
Epoch 36/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3549 - acc: 0.8757 - val_loss: 0.4761 - val_acc: 0.8567

Epoch 00036: val_acc did not improve from 0.86020
Epoch 37/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3517 - acc: 0.8780 - val_loss: 0.4913 - val_acc: 0.8574

Epoch 00037: val_acc did not improve from 0.86020
Epoch 38/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3502 - acc: 0.8777 - val_loss: 0.4340 - val_acc: 0.8677

Epoch 00038: val_acc improved from 0.86020 to 0.86770, saving model to weights.hdf5
Epoch 39/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3473 - acc: 0.8800 - val_loss: 0.4334 - val_acc: 0.8683

Epoch 00039: val_acc improved from 0.86770 to 0.86830, saving model to weights.hdf5
Epoch 40/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3370 - acc: 0.8834 - val_loss: 0.4541 - val_acc: 0.8661

Epoch 00040: val_acc did not improve from 0.86830
Epoch 41/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3366 - acc: 0.8826 - val_loss: 0.4484 - val_acc: 0.8712

Epoch 00041: val_acc improved from 0.86830 to 0.87120, saving model to weights.hdf5
Epoch 42/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3426 - acc: 0.8797 - val_loss: 0.4329 - val_acc: 0.8713

Epoch 00042: val_acc improved from 0.87120 to 0.87130, saving model to weights.hdf5
Epoch 43/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3344 - acc: 0.8829 - val_loss: 0.5167 - val_acc: 0.8624

Epoch 00043: val_acc did not improve from 0.87130
Epoch 44/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3323 - acc: 0.8837 - val_loss: 0.4604 - val_acc: 0.8701

Epoch 00044: val_acc did not improve from 0.87130
Epoch 45/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3309 - acc: 0.8839 - val_loss: 0.4460 - val_acc: 0.8717

Epoch 00045: val_acc improved from 0.87130 to 0.87170, saving model to weights.hdf5
Epoch 46/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3327 - acc: 0.8852 - val_loss: 0.4683 - val_acc: 0.8696

Epoch 00046: val_acc did not improve from 0.87170
Epoch 47/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3308 - acc: 0.8843 - val_loss: 0.4557 - val_acc: 0.8690

Epoch 00047: val_acc did not improve from 0.87170
Epoch 48/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3220 - acc: 0.8865 - val_loss: 0.4729 - val_acc: 0.8728

Epoch 00048: val_acc improved from 0.87170 to 0.87280, saving model to weights.hdf5
Epoch 49/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3227 - acc: 0.8882 - val_loss: 0.4288 - val_acc: 0.8766

Epoch 00049: val_acc improved from 0.87280 to 0.87660, saving model to weights.hdf5
Epoch 50/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3183 - acc: 0.8905 - val_loss: 0.4276 - val_acc: 0.8758

Epoch 00050: val_acc did not improve from 0.87660
Epoch 51/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3151 - acc: 0.8905 - val_loss: 0.4528 - val_acc: 0.8746

Epoch 00051: val_acc did not improve from 0.87660
Epoch 52/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3190 - acc: 0.8895 - val_loss: 0.4352 - val_acc: 0.8705

Epoch 00052: val_acc did not improve from 0.87660
Epoch 53/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3148 - acc: 0.8909 - val_loss: 0.4676 - val_acc: 0.8755

Epoch 00053: val_acc did not improve from 0.87660
Epoch 54/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3089 - acc: 0.8928 - val_loss: 0.4824 - val_acc: 0.8700

Epoch 00054: val_acc did not improve from 0.87660
Epoch 55/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3085 - acc: 0.8915 - val_loss: 0.4542 - val_acc: 0.8795

Epoch 00055: val_acc improved from 0.87660 to 0.87950, saving model to weights.hdf5
Epoch 56/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3019 - acc: 0.8945 - val_loss: 0.4678 - val_acc: 0.8746

Epoch 00056: val_acc did not improve from 0.87950
Epoch 57/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3074 - acc: 0.8925 - val_loss: 0.4734 - val_acc: 0.8660

Epoch 00057: val_acc did not improve from 0.87950
Epoch 58/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3084 - acc: 0.8943 - val_loss: 0.4539 - val_acc: 0.8764

Epoch 00058: val_acc did not improve from 0.87950
Epoch 59/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3010 - acc: 0.8959 - val_loss: 0.4583 - val_acc: 0.8752

Epoch 00059: val_acc did not improve from 0.87950
Epoch 60/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2996 - acc: 0.8949 - val_loss: 0.4463 - val_acc: 0.8780

Epoch 00060: val_acc did not improve from 0.87950
Epoch 61/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.3020 - acc: 0.8940 - val_loss: 0.4394 - val_acc: 0.8814

Epoch 00061: val_acc improved from 0.87950 to 0.88140, saving model to weights.hdf5
Epoch 62/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2966 - acc: 0.8965 - val_loss: 0.4605 - val_acc: 0.8790

Epoch 00062: val_acc did not improve from 0.88140
Epoch 63/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2910 - acc: 0.8988 - val_loss: 0.4431 - val_acc: 0.8799

Epoch 00063: val_acc did not improve from 0.88140
Epoch 64/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2997 - acc: 0.8962 - val_loss: 0.4787 - val_acc: 0.8774

Epoch 00064: val_acc did not improve from 0.88140
Epoch 65/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2944 - acc: 0.8975 - val_loss: 0.4152 - val_acc: 0.8863

Epoch 00065: val_acc improved from 0.88140 to 0.88630, saving model to weights.hdf5
Epoch 66/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2923 - acc: 0.8985 - val_loss: 0.4835 - val_acc: 0.8771

Epoch 00066: val_acc did not improve from 0.88630
Epoch 67/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2958 - acc: 0.8964 - val_loss: 0.4708 - val_acc: 0.8817

Epoch 00067: val_acc did not improve from 0.88630
Epoch 68/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2899 - acc: 0.8998 - val_loss: 0.4985 - val_acc: 0.8769

Epoch 00068: val_acc did not improve from 0.88630
Epoch 69/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2903 - acc: 0.9001 - val_loss: 0.4629 - val_acc: 0.8792

Epoch 00069: val_acc did not improve from 0.88630
Epoch 70/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2891 - acc: 0.8993 - val_loss: 0.4673 - val_acc: 0.8762

Epoch 00070: val_acc did not improve from 0.88630
Epoch 71/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2896 - acc: 0.8983 - val_loss: 0.4803 - val_acc: 0.8726

Epoch 00071: val_acc did not improve from 0.88630
Epoch 72/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2885 - acc: 0.9010 - val_loss: 0.4705 - val_acc: 0.8851

Epoch 00072: val_acc did not improve from 0.88630
Epoch 73/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2818 - acc: 0.9028 - val_loss: 0.4542 - val_acc: 0.8816

Epoch 00073: val_acc did not improve from 0.88630
Epoch 74/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2764 - acc: 0.9049 - val_loss: 0.4315 - val_acc: 0.8841

Epoch 00074: val_acc did not improve from 0.88630
Epoch 75/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2858 - acc: 0.9012 - val_loss: 0.4346 - val_acc: 0.8849

Epoch 00075: val_acc did not improve from 0.88630
Epoch 76/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2822 - acc: 0.9020 - val_loss: 0.4792 - val_acc: 0.8786

Epoch 00076: val_acc did not improve from 0.88630
Epoch 77/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2815 - acc: 0.9015 - val_loss: 0.5250 - val_acc: 0.8698

Epoch 00077: val_acc did not improve from 0.88630
Epoch 78/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2785 - acc: 0.9030 - val_loss: 0.5520 - val_acc: 0.8681

Epoch 00078: val_acc did not improve from 0.88630
Epoch 79/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2760 - acc: 0.9045 - val_loss: 0.4807 - val_acc: 0.8840

Epoch 00079: val_acc did not improve from 0.88630
Epoch 80/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2744 - acc: 0.9039 - val_loss: 0.4973 - val_acc: 0.8765

Epoch 00080: val_acc did not improve from 0.88630
Epoch 81/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2767 - acc: 0.9046 - val_loss: 0.4528 - val_acc: 0.8793

Epoch 00081: val_acc did not improve from 0.88630
Epoch 82/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2741 - acc: 0.9053 - val_loss: 0.4727 - val_acc: 0.8737

Epoch 00082: val_acc did not improve from 0.88630
Epoch 83/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2708 - acc: 0.9056 - val_loss: 0.5165 - val_acc: 0.8699

Epoch 00083: val_acc did not improve from 0.88630
Epoch 84/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2789 - acc: 0.9022 - val_loss: 0.5013 - val_acc: 0.8800

Epoch 00084: val_acc did not improve from 0.88630
Epoch 85/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2755 - acc: 0.9052 - val_loss: 0.5054 - val_acc: 0.8751

Epoch 00085: val_acc did not improve from 0.88630
Epoch 86/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2713 - acc: 0.9056 - val_loss: 0.5121 - val_acc: 0.8739

Epoch 00086: val_acc did not improve from 0.88630
Epoch 87/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2716 - acc: 0.9053 - val_loss: 0.4702 - val_acc: 0.8801

Epoch 00087: val_acc did not improve from 0.88630
Epoch 88/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2699 - acc: 0.9057 - val_loss: 0.4876 - val_acc: 0.8812

Epoch 00088: val_acc did not improve from 0.88630
Epoch 89/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2666 - acc: 0.9070 - val_loss: 0.4788 - val_acc: 0.8806

Epoch 00089: val_acc did not improve from 0.88630
Epoch 90/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2653 - acc: 0.9071 - val_loss: 0.4735 - val_acc: 0.8808

Epoch 00090: val_acc did not improve from 0.88630
Epoch 91/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2615 - acc: 0.9089 - val_loss: 0.4464 - val_acc: 0.8848

Epoch 00091: val_acc did not improve from 0.88630
Epoch 92/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2670 - acc: 0.9078 - val_loss: 0.4718 - val_acc: 0.8848

Epoch 00092: val_acc did not improve from 0.88630
Epoch 93/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2627 - acc: 0.9084 - val_loss: 0.4294 - val_acc: 0.8931

Epoch 00093: val_acc improved from 0.88630 to 0.89310, saving model to weights.hdf5
Epoch 94/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2624 - acc: 0.9089 - val_loss: 0.4969 - val_acc: 0.8797

Epoch 00094: val_acc did not improve from 0.89310
Epoch 95/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2618 - acc: 0.9088 - val_loss: 0.5045 - val_acc: 0.8800

Epoch 00095: val_acc did not improve from 0.89310
Epoch 96/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2596 - acc: 0.9104 - val_loss: 0.4518 - val_acc: 0.8851

Epoch 00096: val_acc did not improve from 0.89310
Epoch 97/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2616 - acc: 0.9099 - val_loss: 0.4706 - val_acc: 0.8800

Epoch 00097: val_acc did not improve from 0.89310
Epoch 98/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2595 - acc: 0.9099 - val_loss: 0.5076 - val_acc: 0.8825

Epoch 00098: val_acc did not improve from 0.89310
Epoch 99/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2617 - acc: 0.9087 - val_loss: 0.4590 - val_acc: 0.8866

Epoch 00099: val_acc did not improve from 0.89310
Epoch 100/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2535 - acc: 0.9122 - val_loss: 0.4583 - val_acc: 0.8820

Epoch 00100: val_acc did not improve from 0.89310
Epoch 101/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2585 - acc: 0.9084 - val_loss: 0.4425 - val_acc: 0.8916

Epoch 00101: val_acc did not improve from 0.89310
Epoch 102/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2515 - acc: 0.9129 - val_loss: 0.4715 - val_acc: 0.8846

Epoch 00102: val_acc did not improve from 0.89310
Epoch 103/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2665 - acc: 0.9081 - val_loss: 0.4412 - val_acc: 0.8851

Epoch 00103: val_acc did not improve from 0.89310
Epoch 104/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2585 - acc: 0.9099 - val_loss: 0.5194 - val_acc: 0.8775

Epoch 00104: val_acc did not improve from 0.89310
Epoch 105/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2537 - acc: 0.9130 - val_loss: 0.4794 - val_acc: 0.8822

Epoch 00105: val_acc did not improve from 0.89310
Epoch 106/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2520 - acc: 0.9120 - val_loss: 0.4791 - val_acc: 0.8816

Epoch 00106: val_acc did not improve from 0.89310
Epoch 107/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2538 - acc: 0.9123 - val_loss: 0.4994 - val_acc: 0.8852

Epoch 00107: val_acc did not improve from 0.89310
Epoch 108/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2538 - acc: 0.9121 - val_loss: 0.4670 - val_acc: 0.8801

Epoch 00108: val_acc did not improve from 0.89310
Epoch 109/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2537 - acc: 0.9111 - val_loss: 0.4473 - val_acc: 0.8907

Epoch 00109: val_acc did not improve from 0.89310
Epoch 110/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2547 - acc: 0.9126 - val_loss: 0.4728 - val_acc: 0.8809

Epoch 00110: val_acc did not improve from 0.89310
Epoch 111/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2486 - acc: 0.9138 - val_loss: 0.4908 - val_acc: 0.8823

Epoch 00111: val_acc did not improve from 0.89310
Epoch 112/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2492 - acc: 0.9144 - val_loss: 0.5223 - val_acc: 0.8782

Epoch 00112: val_acc did not improve from 0.89310
Epoch 113/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2525 - acc: 0.9134 - val_loss: 0.4731 - val_acc: 0.8907

Epoch 00113: val_acc did not improve from 0.89310
Epoch 114/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2527 - acc: 0.9123 - val_loss: 0.4684 - val_acc: 0.8874

Epoch 00114: val_acc did not improve from 0.89310
Epoch 115/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2495 - acc: 0.9144 - val_loss: 0.5162 - val_acc: 0.8801

Epoch 00115: val_acc did not improve from 0.89310
Epoch 116/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2484 - acc: 0.9136 - val_loss: 0.4883 - val_acc: 0.8858

Epoch 00116: val_acc did not improve from 0.89310
Epoch 117/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2463 - acc: 0.9152 - val_loss: 0.5191 - val_acc: 0.8834

Epoch 00117: val_acc did not improve from 0.89310
Epoch 118/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2437 - acc: 0.9152 - val_loss: 0.5039 - val_acc: 0.8807

Epoch 00118: val_acc did not improve from 0.89310
Epoch 119/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2425 - acc: 0.9159 - val_loss: 0.4981 - val_acc: 0.8884

Epoch 00119: val_acc did not improve from 0.89310
Epoch 120/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2416 - acc: 0.9168 - val_loss: 0.5157 - val_acc: 0.8842

Epoch 00120: val_acc did not improve from 0.89310
Epoch 121/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2414 - acc: 0.9163 - val_loss: 0.4443 - val_acc: 0.8880

Epoch 00121: val_acc did not improve from 0.89310
Epoch 122/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2456 - acc: 0.9153 - val_loss: 0.4418 - val_acc: 0.8886

Epoch 00122: val_acc did not improve from 0.89310
Epoch 123/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2373 - acc: 0.9181 - val_loss: 0.4845 - val_acc: 0.8839

Epoch 00123: val_acc did not improve from 0.89310
Epoch 124/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2409 - acc: 0.9169 - val_loss: 0.4619 - val_acc: 0.8861

Epoch 00124: val_acc did not improve from 0.89310
Epoch 125/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2413 - acc: 0.9160 - val_loss: 0.4781 - val_acc: 0.8907

Epoch 00125: val_acc did not improve from 0.89310
Epoch 126/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2428 - acc: 0.9164 - val_loss: 0.4841 - val_acc: 0.8818

Epoch 00126: val_acc did not improve from 0.89310
Epoch 127/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2395 - acc: 0.9171 - val_loss: 0.5307 - val_acc: 0.8776

Epoch 00127: val_acc did not improve from 0.89310
Epoch 128/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2417 - acc: 0.9158 - val_loss: 0.4857 - val_acc: 0.8827

Epoch 00128: val_acc did not improve from 0.89310
Epoch 129/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2390 - acc: 0.9176 - val_loss: 0.4941 - val_acc: 0.8928

Epoch 00129: val_acc did not improve from 0.89310
Epoch 130/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2453 - acc: 0.9150 - val_loss: 0.5485 - val_acc: 0.8793

Epoch 00130: val_acc did not improve from 0.89310
Epoch 131/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2421 - acc: 0.9169 - val_loss: 0.4660 - val_acc: 0.8903

Epoch 00131: val_acc did not improve from 0.89310
Epoch 132/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2408 - acc: 0.9163 - val_loss: 0.4634 - val_acc: 0.8849

Epoch 00132: val_acc did not improve from 0.89310
Epoch 133/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2400 - acc: 0.9155 - val_loss: 0.4461 - val_acc: 0.8961

Epoch 00133: val_acc improved from 0.89310 to 0.89610, saving model to weights.hdf5
Epoch 134/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2336 - acc: 0.9182 - val_loss: 0.4751 - val_acc: 0.8928

Epoch 00134: val_acc did not improve from 0.89610
Epoch 135/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2385 - acc: 0.9183 - val_loss: 0.4877 - val_acc: 0.8823

Epoch 00135: val_acc did not improve from 0.89610
Epoch 136/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2394 - acc: 0.9180 - val_loss: 0.4460 - val_acc: 0.8913

Epoch 00136: val_acc did not improve from 0.89610
Epoch 137/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2327 - acc: 0.9188 - val_loss: 0.4736 - val_acc: 0.8882

Epoch 00137: val_acc did not improve from 0.89610
Epoch 138/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2346 - acc: 0.9200 - val_loss: 0.4436 - val_acc: 0.8907

Epoch 00138: val_acc did not improve from 0.89610
Epoch 139/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2320 - acc: 0.9202 - val_loss: 0.4637 - val_acc: 0.8933

Epoch 00139: val_acc did not improve from 0.89610
Epoch 140/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2300 - acc: 0.9213 - val_loss: 0.4851 - val_acc: 0.8917

Epoch 00140: val_acc did not improve from 0.89610
Epoch 141/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2348 - acc: 0.9192 - val_loss: 0.4780 - val_acc: 0.8909

Epoch 00141: val_acc did not improve from 0.89610
Epoch 142/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2258 - acc: 0.9224 - val_loss: 0.4526 - val_acc: 0.8947

Epoch 00142: val_acc did not improve from 0.89610
Epoch 143/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2339 - acc: 0.9202 - val_loss: 0.4358 - val_acc: 0.8899

Epoch 00143: val_acc did not improve from 0.89610
Epoch 144/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2240 - acc: 0.9212 - val_loss: 0.4591 - val_acc: 0.8923

Epoch 00144: val_acc did not improve from 0.89610
Epoch 145/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2304 - acc: 0.9208 - val_loss: 0.5166 - val_acc: 0.8831

Epoch 00145: val_acc did not improve from 0.89610
Epoch 146/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2302 - acc: 0.9209 - val_loss: 0.5412 - val_acc: 0.8841

Epoch 00146: val_acc did not improve from 0.89610
Epoch 147/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2303 - acc: 0.9219 - val_loss: 0.5533 - val_acc: 0.8814

Epoch 00147: val_acc did not improve from 0.89610
Epoch 148/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2260 - acc: 0.9215 - val_loss: 0.4731 - val_acc: 0.8863

Epoch 00148: val_acc did not improve from 0.89610
Epoch 149/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2268 - acc: 0.9218 - val_loss: 0.4596 - val_acc: 0.8925

Epoch 00149: val_acc did not improve from 0.89610
Epoch 150/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2303 - acc: 0.9211 - val_loss: 0.4826 - val_acc: 0.8915

Epoch 00150: val_acc did not improve from 0.89610
Epoch 151/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2276 - acc: 0.9228 - val_loss: 0.4690 - val_acc: 0.8913

Epoch 00151: val_acc did not improve from 0.89610
Epoch 152/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2296 - acc: 0.9214 - val_loss: 0.4592 - val_acc: 0.8882

Epoch 00152: val_acc did not improve from 0.89610
Epoch 153/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2238 - acc: 0.9229 - val_loss: 0.5209 - val_acc: 0.8819

Epoch 00153: val_acc did not improve from 0.89610
Epoch 154/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2272 - acc: 0.9212 - val_loss: 0.4932 - val_acc: 0.8877

Epoch 00154: val_acc did not improve from 0.89610
Epoch 155/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2214 - acc: 0.9232 - val_loss: 0.5099 - val_acc: 0.8806

Epoch 00155: val_acc did not improve from 0.89610
Epoch 156/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2224 - acc: 0.9239 - val_loss: 0.5280 - val_acc: 0.8885

Epoch 00156: val_acc did not improve from 0.89610
Epoch 157/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2289 - acc: 0.9213 - val_loss: 0.5159 - val_acc: 0.8817

Epoch 00157: val_acc did not improve from 0.89610
Epoch 158/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2188 - acc: 0.9242 - val_loss: 0.4585 - val_acc: 0.8872

Epoch 00158: val_acc did not improve from 0.89610
Epoch 159/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2224 - acc: 0.9240 - val_loss: 0.5402 - val_acc: 0.8858

Epoch 00159: val_acc did not improve from 0.89610
Epoch 160/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2240 - acc: 0.9237 - val_loss: 0.4615 - val_acc: 0.8932

Epoch 00160: val_acc did not improve from 0.89610
Epoch 161/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2220 - acc: 0.9245 - val_loss: 0.4775 - val_acc: 0.8924

Epoch 00161: val_acc did not improve from 0.89610
Epoch 162/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2234 - acc: 0.9229 - val_loss: 0.5641 - val_acc: 0.8821

Epoch 00162: val_acc did not improve from 0.89610
Epoch 163/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2172 - acc: 0.9246 - val_loss: 0.4966 - val_acc: 0.8879

Epoch 00163: val_acc did not improve from 0.89610
Epoch 164/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2253 - acc: 0.9217 - val_loss: 0.4806 - val_acc: 0.8951

Epoch 00164: val_acc did not improve from 0.89610
Epoch 165/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2191 - acc: 0.9246 - val_loss: 0.4975 - val_acc: 0.8934

Epoch 00165: val_acc did not improve from 0.89610
Epoch 166/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2194 - acc: 0.9257 - val_loss: 0.5670 - val_acc: 0.8801

Epoch 00166: val_acc did not improve from 0.89610
Epoch 167/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2151 - acc: 0.9256 - val_loss: 0.5462 - val_acc: 0.8799

Epoch 00167: val_acc did not improve from 0.89610
Epoch 168/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2218 - acc: 0.9240 - val_loss: 0.5202 - val_acc: 0.8862

Epoch 00168: val_acc did not improve from 0.89610
Epoch 169/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2181 - acc: 0.9244 - val_loss: 0.4760 - val_acc: 0.8871

Epoch 00169: val_acc did not improve from 0.89610
Epoch 170/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2160 - acc: 0.9266 - val_loss: 0.4899 - val_acc: 0.8951

Epoch 00170: val_acc did not improve from 0.89610
Epoch 171/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2128 - acc: 0.9267 - val_loss: 0.4937 - val_acc: 0.8909

Epoch 00171: val_acc did not improve from 0.89610
Epoch 172/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2164 - acc: 0.9258 - val_loss: 0.4911 - val_acc: 0.8936

Epoch 00172: val_acc did not improve from 0.89610
Epoch 173/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2181 - acc: 0.9244 - val_loss: 0.4804 - val_acc: 0.8877

Epoch 00173: val_acc did not improve from 0.89610
Epoch 174/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2138 - acc: 0.9273 - val_loss: 0.4557 - val_acc: 0.8961

Epoch 00174: val_acc did not improve from 0.89610
Epoch 175/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2112 - acc: 0.9276 - val_loss: 0.5114 - val_acc: 0.8945

Epoch 00175: val_acc did not improve from 0.89610
Epoch 176/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2106 - acc: 0.9278 - val_loss: 0.5205 - val_acc: 0.8870

Epoch 00176: val_acc did not improve from 0.89610
Epoch 177/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2047 - acc: 0.9301 - val_loss: 0.4617 - val_acc: 0.8998

Epoch 00177: val_acc improved from 0.89610 to 0.89980, saving model to weights.hdf5
Epoch 178/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2081 - acc: 0.9285 - val_loss: 0.4901 - val_acc: 0.8904

Epoch 00178: val_acc did not improve from 0.89980
Epoch 179/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2072 - acc: 0.9284 - val_loss: 0.5183 - val_acc: 0.8852

Epoch 00179: val_acc did not improve from 0.89980
Epoch 180/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2113 - acc: 0.9286 - val_loss: 0.5215 - val_acc: 0.8868

Epoch 00180: val_acc did not improve from 0.89980
Epoch 181/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2105 - acc: 0.9278 - val_loss: 0.5344 - val_acc: 0.8846

Epoch 00181: val_acc did not improve from 0.89980
Epoch 182/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2102 - acc: 0.9283 - val_loss: 0.4604 - val_acc: 0.8897

Epoch 00182: val_acc did not improve from 0.89980
Epoch 183/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2064 - acc: 0.9293 - val_loss: 0.4844 - val_acc: 0.8880

Epoch 00183: val_acc did not improve from 0.89980
Epoch 184/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2115 - acc: 0.9279 - val_loss: 0.5363 - val_acc: 0.8839

Epoch 00184: val_acc did not improve from 0.89980
Epoch 185/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2028 - acc: 0.9302 - val_loss: 0.4641 - val_acc: 0.8974

Epoch 00185: val_acc did not improve from 0.89980
Epoch 186/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2126 - acc: 0.9274 - val_loss: 0.4857 - val_acc: 0.8922

Epoch 00186: val_acc did not improve from 0.89980
Epoch 187/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2102 - acc: 0.9281 - val_loss: 0.4693 - val_acc: 0.8933

Epoch 00187: val_acc did not improve from 0.89980
Epoch 188/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2064 - acc: 0.9301 - val_loss: 0.5102 - val_acc: 0.8882

Epoch 00188: val_acc did not improve from 0.89980
Epoch 189/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2052 - acc: 0.9282 - val_loss: 0.5290 - val_acc: 0.8887

Epoch 00189: val_acc did not improve from 0.89980
Epoch 190/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2054 - acc: 0.9296 - val_loss: 0.5415 - val_acc: 0.8863

Epoch 00190: val_acc did not improve from 0.89980
Epoch 191/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2048 - acc: 0.9306 - val_loss: 0.4708 - val_acc: 0.8947

Epoch 00191: val_acc did not improve from 0.89980
Epoch 192/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2061 - acc: 0.9287 - val_loss: 0.4708 - val_acc: 0.8955

Epoch 00192: val_acc did not improve from 0.89980
Epoch 193/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2092 - acc: 0.9284 - val_loss: 0.5064 - val_acc: 0.8928

Epoch 00193: val_acc did not improve from 0.89980
Epoch 194/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2046 - acc: 0.9303 - val_loss: 0.5410 - val_acc: 0.8884

Epoch 00194: val_acc did not improve from 0.89980
Epoch 195/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2017 - acc: 0.9308 - val_loss: 0.4621 - val_acc: 0.8930

Epoch 00195: val_acc did not improve from 0.89980
Epoch 196/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2021 - acc: 0.9312 - val_loss: 0.4894 - val_acc: 0.8909

Epoch 00196: val_acc did not improve from 0.89980
Epoch 197/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2060 - acc: 0.9291 - val_loss: 0.5131 - val_acc: 0.8956

Epoch 00197: val_acc did not improve from 0.89980
Epoch 198/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2027 - acc: 0.9313 - val_loss: 0.4823 - val_acc: 0.9005

Epoch 00198: val_acc improved from 0.89980 to 0.90050, saving model to weights.hdf5
Epoch 199/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2015 - acc: 0.9306 - val_loss: 0.4937 - val_acc: 0.8940

Epoch 00199: val_acc did not improve from 0.90050
Epoch 200/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2038 - acc: 0.9304 - val_loss: 0.4937 - val_acc: 0.8901

Epoch 00200: val_acc did not improve from 0.90050
Epoch 201/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2016 - acc: 0.9311 - val_loss: 0.5132 - val_acc: 0.8907

Epoch 00201: val_acc did not improve from 0.90050
Epoch 202/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.1980 - acc: 0.9327 - val_loss: 0.5004 - val_acc: 0.8939

Epoch 00202: val_acc did not improve from 0.90050
Epoch 203/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.1993 - acc: 0.9325 - val_loss: 0.5630 - val_acc: 0.8889

Epoch 00203: val_acc did not improve from 0.90050
Epoch 204/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.1986 - acc: 0.9322 - val_loss: 0.5401 - val_acc: 0.8857

Epoch 00204: val_acc did not improve from 0.90050
Epoch 205/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2021 - acc: 0.9307 - val_loss: 0.4753 - val_acc: 0.8973

Epoch 00205: val_acc did not improve from 0.90050
Epoch 206/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.1946 - acc: 0.9330 - val_loss: 0.5099 - val_acc: 0.8961

Epoch 00206: val_acc did not improve from 0.90050
Epoch 207/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2041 - acc: 0.9306 - val_loss: 0.4876 - val_acc: 0.8879

Epoch 00207: val_acc did not improve from 0.90050
Epoch 208/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2004 - acc: 0.9319 - val_loss: 0.4564 - val_acc: 0.9011

Epoch 00208: val_acc improved from 0.90050 to 0.90110, saving model to weights.hdf5
Epoch 209/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.1967 - acc: 0.9328 - val_loss: 0.4671 - val_acc: 0.8987

Epoch 00209: val_acc did not improve from 0.90110
Epoch 210/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.2002 - acc: 0.9321 - val_loss: 0.4563 - val_acc: 0.8985

Epoch 00210: val_acc did not improve from 0.90110
Epoch 211/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.1975 - acc: 0.9315 - val_loss: 0.5046 - val_acc: 0.8922

Epoch 00211: val_acc did not improve from 0.90110
Epoch 212/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.1928 - acc: 0.9338 - val_loss: 0.5018 - val_acc: 0.8887

Epoch 00212: val_acc did not improve from 0.90110
Epoch 213/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.1908 - acc: 0.9352 - val_loss: 0.4690 - val_acc: 0.8976

Epoch 00213: val_acc did not improve from 0.90110
Epoch 214/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.1910 - acc: 0.9353 - val_loss: 0.5225 - val_acc: 0.8911

Epoch 00214: val_acc did not improve from 0.90110
Epoch 215/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.1987 - acc: 0.9314 - val_loss: 0.4465 - val_acc: 0.9000

Epoch 00216: val_acc did not improve from 0.90110
Epoch 217/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.1945 - acc: 0.9337 - val_loss: 0.5131 - val_acc: 0.8972

Epoch 00217: val_acc did not improve from 0.90110
Epoch 218/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.1935 - acc: 0.9344 - val_loss: 0.5575 - val_acc: 0.8860

Epoch 00218: val_acc did not improve from 0.90110
Epoch 219/375
1034/2000 [==============>...............] - ETA: 31s - loss: 0.1908 - acc: 0.9350
IOPub message rate exceeded.
The notebook server will temporarily stop sending output
to the client in order to avoid crashing it.
To change this limit, set the config variable
`--NotebookApp.iopub_msg_rate_limit`.

Current values:
NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec)
NotebookApp.rate_limit_window=3.0 (secs)

2000/2000 [==============================] - 68s 34ms/step - loss: 0.1929 - acc: 0.9339 - val_loss: 0.4938 - val_acc: 0.8936

Epoch 00222: val_acc did not improve from 0.90110
Epoch 223/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.1850 - acc: 0.9362 - val_loss: 0.5333 - val_acc: 0.8964

Epoch 00223: val_acc did not improve from 0.90110
Epoch 224/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.1558 - acc: 0.9470 - val_loss: 0.5243 - val_acc: 0.9049

Epoch 00323: val_acc did not improve from 0.90700
Epoch 324/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.1516 - acc: 0.9492 - val_loss: 0.5324 - val_acc: 0.8948

Epoch 00324: val_acc did not improve from 0.90700
Epoch 325/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.1570 - acc: 0.9469 - val_loss: 0.4916 - val_acc: 0.9053

Epoch 00325: val_acc did not improve from 0.90700
Epoch 326/375
 531/2000 [======>.......................] - ETA: 47s - loss: 0.1449 - acc: 0.9507
IOPub message rate exceeded.
The notebook server will temporarily stop sending output
to the client in order to avoid crashing it.
To change this limit, set the config variable
`--NotebookApp.iopub_msg_rate_limit`.

Current values:
NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec)
NotebookApp.rate_limit_window=3.0 (secs)

2000/2000 [==============================] - 68s 34ms/step - loss: 0.1497 - acc: 0.9486 - val_loss: 0.5252 - val_acc: 0.9059

Epoch 00330: val_acc did not improve from 0.90700
Epoch 331/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.1508 - acc: 0.9485 - val_loss: 0.5170 - val_acc: 0.9031

Epoch 00331: val_acc did not improve from 0.90700
Epoch 332/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.1561 - acc: 0.9474 - val_loss: 0.4795 - val_acc: 0.9069

Epoch 00332: val_acc did not improve from 0.90700
Epoch 333/375
1635/2000 [=======================>......] - ETA: 11s - loss: 0.1515 - acc: 0.9482

In [54]:
def getImage(id):
    image = img_test[id]
    image = image.astype('float32')
    image /= 255
    return image

def showImage(id):
    image = getImage(id)
    %matplotlib inline
    imgplot = plt.imshow(image)
    labelid = lbl_test[id]
    category = categories[labelid[0]]
    print("category : "+category)    

def predictImage(id):
    image = getImage(id)
    showImage(id)
    image = np.expand_dims(image, axis=0)
    result = model.predict(image)
    result = result[0].tolist()
    best_index=result.index(max(result))
    print ("prediction : "+categories[best_index])
    
    
predictImage(50)


category : truck
prediction : truck

In [55]:
pandas.DataFrame(history_callback.history).to_csv("history.csv")

model.save('keras_allconv.h5')

In [56]:
model_json = model.to_json()
with open("allconv.json", "w") as json_file:
    json_file.write(model_json)

In [ ]:


In [ ]: