In [1]:
# https://keras.io/
!pip install -q keras
import keras
Using TensorFlow backend.
In [0]:
import keras
from keras.datasets import cifar10
from keras.models import Model, Sequential
from keras.layers import Dense, Dropout, Flatten, Input, AveragePooling2D, merge, Activation
from keras.layers import Conv2D, SeparableConv2D, MaxPooling2D, BatchNormalization
from keras.layers import Concatenate
from keras.callbacks import ModelCheckpoint
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
In [0]:
# this part will prevent tensorflow to allocate all the avaliable GPU Memory
# backend
import tensorflow as tf
from keras import backend as k
# Don't pre-allocate memory; allocate as-needed
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
# Create a session with the above options specified.
k.tensorflow_backend.set_session(tf.Session(config=config))
In [0]:
# Hyperparameters
batch_size = 128
num_classes = 10
epochs = 250
l = 40
num_filter = 12
compression = 0.5
dropout_rate = 0.2
In [5]:
# Load CIFAR10 Data
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
img_height, img_width, channel = x_train.shape[1],x_train.shape[2],x_train.shape[3]
# convert to one hot encoing
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
Downloading data from https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
170500096/170498071 [==============================] - 86s 1us/step
In [0]:
train_datagen = ImageDataGenerator(
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
validation_datagen = ImageDataGenerator()
In [0]:
# Dense Block
def add_denseblock(input, num_filter = 12, dropout_rate = 0.2, separable=False):
global compression
temp = input
for _ in range(l):
BatchNorm = BatchNormalization()(temp)
relu = Activation('relu')(BatchNorm)
if separable:
Conv2D_3_3 = SeparableConv2D(int(num_filter*compression), (3,3), use_bias=False ,padding='same')(relu)
else:
Conv2D_3_3 = Conv2D(int(num_filter*compression), (3,3), use_bias=False ,padding='same')(relu)
if dropout_rate>0:
Conv2D_3_3 = Dropout(dropout_rate)(Conv2D_3_3)
concat = Concatenate(axis=-1)([temp,Conv2D_3_3])
temp = concat
return temp
In [0]:
def add_transition(input, num_filter = 12, dropout_rate = 0.2):
global compression
BatchNorm = BatchNormalization()(input)
relu = Activation('relu')(BatchNorm)
Conv2D_BottleNeck = Conv2D(int(num_filter*compression), (1,1), use_bias=False ,padding='same')(relu)
if dropout_rate>0:
Conv2D_BottleNeck = Dropout(dropout_rate)(Conv2D_BottleNeck)
avg = AveragePooling2D(pool_size=(2,2))(Conv2D_BottleNeck)
return avg
In [0]:
def output_layer(input):
global compression
BatchNorm = BatchNormalization()(input)
relu = Activation('relu')(BatchNorm)
AvgPooling = AveragePooling2D(pool_size=(2,2))(relu)
flat = Flatten()(AvgPooling)
output = Dense(num_classes, activation='softmax')(flat)
return output
In [0]:
num_filter = 16
dropout_rate = 0.2
l = 12
input = Input(shape=(img_height, img_width, channel,))
First_Conv2D = Conv2D(num_filter, (3,3), use_bias=False ,padding='same')(input)
num_filter = 32
First_Block = add_denseblock(First_Conv2D, num_filter, dropout_rate)
First_Transition = add_transition(First_Block, num_filter, dropout_rate)
num_filter = 64
Second_Block = add_denseblock(First_Transition, num_filter, dropout_rate, separable=False)
Second_Transition = add_transition(Second_Block, num_filter, dropout_rate)
num_filter = 64
Third_Block = add_denseblock(Second_Transition, num_filter, dropout_rate, separable=True)
Third_Transition = add_transition(Third_Block, num_filter, dropout_rate)
num_filter = 32
Fourth_Block = add_denseblock(Third_Transition, num_filter, dropout_rate, separable=True)
Fourth_Transition = add_transition(Fourth_Block, num_filter, dropout_rate)
num_filter = 16
Last_Block = add_denseblock(Fourth_Transition, num_filter, dropout_rate)
output = output_layer(Last_Block)
In [19]:
model = Model(inputs=[input], outputs=[output])
model.summary()
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_4 (InputLayer) (None, 32, 32, 3) 0
__________________________________________________________________________________________________
conv2d_116 (Conv2D) (None, 32, 32, 16) 432 input_4[0][0]
__________________________________________________________________________________________________
batch_normalization_271 (BatchN (None, 32, 32, 16) 64 conv2d_116[0][0]
__________________________________________________________________________________________________
activation_271 (Activation) (None, 32, 32, 16) 0 batch_normalization_271[0][0]
__________________________________________________________________________________________________
conv2d_117 (Conv2D) (None, 32, 32, 16) 2304 activation_271[0][0]
__________________________________________________________________________________________________
dropout_269 (Dropout) (None, 32, 32, 16) 0 conv2d_117[0][0]
__________________________________________________________________________________________________
concatenate_261 (Concatenate) (None, 32, 32, 32) 0 conv2d_116[0][0]
dropout_269[0][0]
__________________________________________________________________________________________________
batch_normalization_272 (BatchN (None, 32, 32, 32) 128 concatenate_261[0][0]
__________________________________________________________________________________________________
activation_272 (Activation) (None, 32, 32, 32) 0 batch_normalization_272[0][0]
__________________________________________________________________________________________________
conv2d_118 (Conv2D) (None, 32, 32, 16) 4608 activation_272[0][0]
__________________________________________________________________________________________________
dropout_270 (Dropout) (None, 32, 32, 16) 0 conv2d_118[0][0]
__________________________________________________________________________________________________
concatenate_262 (Concatenate) (None, 32, 32, 48) 0 concatenate_261[0][0]
dropout_270[0][0]
__________________________________________________________________________________________________
batch_normalization_273 (BatchN (None, 32, 32, 48) 192 concatenate_262[0][0]
__________________________________________________________________________________________________
activation_273 (Activation) (None, 32, 32, 48) 0 batch_normalization_273[0][0]
__________________________________________________________________________________________________
conv2d_119 (Conv2D) (None, 32, 32, 16) 6912 activation_273[0][0]
__________________________________________________________________________________________________
dropout_271 (Dropout) (None, 32, 32, 16) 0 conv2d_119[0][0]
__________________________________________________________________________________________________
concatenate_263 (Concatenate) (None, 32, 32, 64) 0 concatenate_262[0][0]
dropout_271[0][0]
__________________________________________________________________________________________________
batch_normalization_274 (BatchN (None, 32, 32, 64) 256 concatenate_263[0][0]
__________________________________________________________________________________________________
activation_274 (Activation) (None, 32, 32, 64) 0 batch_normalization_274[0][0]
__________________________________________________________________________________________________
conv2d_120 (Conv2D) (None, 32, 32, 16) 9216 activation_274[0][0]
__________________________________________________________________________________________________
dropout_272 (Dropout) (None, 32, 32, 16) 0 conv2d_120[0][0]
__________________________________________________________________________________________________
concatenate_264 (Concatenate) (None, 32, 32, 80) 0 concatenate_263[0][0]
dropout_272[0][0]
__________________________________________________________________________________________________
batch_normalization_275 (BatchN (None, 32, 32, 80) 320 concatenate_264[0][0]
__________________________________________________________________________________________________
activation_275 (Activation) (None, 32, 32, 80) 0 batch_normalization_275[0][0]
__________________________________________________________________________________________________
conv2d_121 (Conv2D) (None, 32, 32, 16) 11520 activation_275[0][0]
__________________________________________________________________________________________________
dropout_273 (Dropout) (None, 32, 32, 16) 0 conv2d_121[0][0]
__________________________________________________________________________________________________
concatenate_265 (Concatenate) (None, 32, 32, 96) 0 concatenate_264[0][0]
dropout_273[0][0]
__________________________________________________________________________________________________
batch_normalization_276 (BatchN (None, 32, 32, 96) 384 concatenate_265[0][0]
__________________________________________________________________________________________________
activation_276 (Activation) (None, 32, 32, 96) 0 batch_normalization_276[0][0]
__________________________________________________________________________________________________
conv2d_122 (Conv2D) (None, 32, 32, 16) 13824 activation_276[0][0]
__________________________________________________________________________________________________
dropout_274 (Dropout) (None, 32, 32, 16) 0 conv2d_122[0][0]
__________________________________________________________________________________________________
concatenate_266 (Concatenate) (None, 32, 32, 112) 0 concatenate_265[0][0]
dropout_274[0][0]
__________________________________________________________________________________________________
batch_normalization_277 (BatchN (None, 32, 32, 112) 448 concatenate_266[0][0]
__________________________________________________________________________________________________
activation_277 (Activation) (None, 32, 32, 112) 0 batch_normalization_277[0][0]
__________________________________________________________________________________________________
conv2d_123 (Conv2D) (None, 32, 32, 16) 16128 activation_277[0][0]
__________________________________________________________________________________________________
dropout_275 (Dropout) (None, 32, 32, 16) 0 conv2d_123[0][0]
__________________________________________________________________________________________________
concatenate_267 (Concatenate) (None, 32, 32, 128) 0 concatenate_266[0][0]
dropout_275[0][0]
__________________________________________________________________________________________________
batch_normalization_278 (BatchN (None, 32, 32, 128) 512 concatenate_267[0][0]
__________________________________________________________________________________________________
activation_278 (Activation) (None, 32, 32, 128) 0 batch_normalization_278[0][0]
__________________________________________________________________________________________________
conv2d_124 (Conv2D) (None, 32, 32, 16) 18432 activation_278[0][0]
__________________________________________________________________________________________________
dropout_276 (Dropout) (None, 32, 32, 16) 0 conv2d_124[0][0]
__________________________________________________________________________________________________
concatenate_268 (Concatenate) (None, 32, 32, 144) 0 concatenate_267[0][0]
dropout_276[0][0]
__________________________________________________________________________________________________
batch_normalization_279 (BatchN (None, 32, 32, 144) 576 concatenate_268[0][0]
__________________________________________________________________________________________________
activation_279 (Activation) (None, 32, 32, 144) 0 batch_normalization_279[0][0]
__________________________________________________________________________________________________
conv2d_125 (Conv2D) (None, 32, 32, 16) 20736 activation_279[0][0]
__________________________________________________________________________________________________
dropout_277 (Dropout) (None, 32, 32, 16) 0 conv2d_125[0][0]
__________________________________________________________________________________________________
concatenate_269 (Concatenate) (None, 32, 32, 160) 0 concatenate_268[0][0]
dropout_277[0][0]
__________________________________________________________________________________________________
batch_normalization_280 (BatchN (None, 32, 32, 160) 640 concatenate_269[0][0]
__________________________________________________________________________________________________
activation_280 (Activation) (None, 32, 32, 160) 0 batch_normalization_280[0][0]
__________________________________________________________________________________________________
conv2d_126 (Conv2D) (None, 32, 32, 16) 23040 activation_280[0][0]
__________________________________________________________________________________________________
dropout_278 (Dropout) (None, 32, 32, 16) 0 conv2d_126[0][0]
__________________________________________________________________________________________________
concatenate_270 (Concatenate) (None, 32, 32, 176) 0 concatenate_269[0][0]
dropout_278[0][0]
__________________________________________________________________________________________________
batch_normalization_281 (BatchN (None, 32, 32, 176) 704 concatenate_270[0][0]
__________________________________________________________________________________________________
activation_281 (Activation) (None, 32, 32, 176) 0 batch_normalization_281[0][0]
__________________________________________________________________________________________________
conv2d_127 (Conv2D) (None, 32, 32, 16) 25344 activation_281[0][0]
__________________________________________________________________________________________________
dropout_279 (Dropout) (None, 32, 32, 16) 0 conv2d_127[0][0]
__________________________________________________________________________________________________
concatenate_271 (Concatenate) (None, 32, 32, 192) 0 concatenate_270[0][0]
dropout_279[0][0]
__________________________________________________________________________________________________
batch_normalization_282 (BatchN (None, 32, 32, 192) 768 concatenate_271[0][0]
__________________________________________________________________________________________________
activation_282 (Activation) (None, 32, 32, 192) 0 batch_normalization_282[0][0]
__________________________________________________________________________________________________
conv2d_128 (Conv2D) (None, 32, 32, 16) 27648 activation_282[0][0]
__________________________________________________________________________________________________
dropout_280 (Dropout) (None, 32, 32, 16) 0 conv2d_128[0][0]
__________________________________________________________________________________________________
concatenate_272 (Concatenate) (None, 32, 32, 208) 0 concatenate_271[0][0]
dropout_280[0][0]
__________________________________________________________________________________________________
batch_normalization_283 (BatchN (None, 32, 32, 208) 832 concatenate_272[0][0]
__________________________________________________________________________________________________
activation_283 (Activation) (None, 32, 32, 208) 0 batch_normalization_283[0][0]
__________________________________________________________________________________________________
conv2d_129 (Conv2D) (None, 32, 32, 16) 3328 activation_283[0][0]
__________________________________________________________________________________________________
dropout_281 (Dropout) (None, 32, 32, 16) 0 conv2d_129[0][0]
__________________________________________________________________________________________________
average_pooling2d_11 (AveragePo (None, 16, 16, 16) 0 dropout_281[0][0]
__________________________________________________________________________________________________
batch_normalization_284 (BatchN (None, 16, 16, 16) 64 average_pooling2d_11[0][0]
__________________________________________________________________________________________________
activation_284 (Activation) (None, 16, 16, 16) 0 batch_normalization_284[0][0]
__________________________________________________________________________________________________
conv2d_130 (Conv2D) (None, 16, 16, 32) 4608 activation_284[0][0]
__________________________________________________________________________________________________
dropout_282 (Dropout) (None, 16, 16, 32) 0 conv2d_130[0][0]
__________________________________________________________________________________________________
concatenate_273 (Concatenate) (None, 16, 16, 48) 0 average_pooling2d_11[0][0]
dropout_282[0][0]
__________________________________________________________________________________________________
batch_normalization_285 (BatchN (None, 16, 16, 48) 192 concatenate_273[0][0]
__________________________________________________________________________________________________
activation_285 (Activation) (None, 16, 16, 48) 0 batch_normalization_285[0][0]
__________________________________________________________________________________________________
conv2d_131 (Conv2D) (None, 16, 16, 32) 13824 activation_285[0][0]
__________________________________________________________________________________________________
dropout_283 (Dropout) (None, 16, 16, 32) 0 conv2d_131[0][0]
__________________________________________________________________________________________________
concatenate_274 (Concatenate) (None, 16, 16, 80) 0 concatenate_273[0][0]
dropout_283[0][0]
__________________________________________________________________________________________________
batch_normalization_286 (BatchN (None, 16, 16, 80) 320 concatenate_274[0][0]
__________________________________________________________________________________________________
activation_286 (Activation) (None, 16, 16, 80) 0 batch_normalization_286[0][0]
__________________________________________________________________________________________________
conv2d_132 (Conv2D) (None, 16, 16, 32) 23040 activation_286[0][0]
__________________________________________________________________________________________________
dropout_284 (Dropout) (None, 16, 16, 32) 0 conv2d_132[0][0]
__________________________________________________________________________________________________
concatenate_275 (Concatenate) (None, 16, 16, 112) 0 concatenate_274[0][0]
dropout_284[0][0]
__________________________________________________________________________________________________
batch_normalization_287 (BatchN (None, 16, 16, 112) 448 concatenate_275[0][0]
__________________________________________________________________________________________________
activation_287 (Activation) (None, 16, 16, 112) 0 batch_normalization_287[0][0]
__________________________________________________________________________________________________
conv2d_133 (Conv2D) (None, 16, 16, 32) 32256 activation_287[0][0]
__________________________________________________________________________________________________
dropout_285 (Dropout) (None, 16, 16, 32) 0 conv2d_133[0][0]
__________________________________________________________________________________________________
concatenate_276 (Concatenate) (None, 16, 16, 144) 0 concatenate_275[0][0]
dropout_285[0][0]
__________________________________________________________________________________________________
batch_normalization_288 (BatchN (None, 16, 16, 144) 576 concatenate_276[0][0]
__________________________________________________________________________________________________
activation_288 (Activation) (None, 16, 16, 144) 0 batch_normalization_288[0][0]
__________________________________________________________________________________________________
conv2d_134 (Conv2D) (None, 16, 16, 32) 41472 activation_288[0][0]
__________________________________________________________________________________________________
dropout_286 (Dropout) (None, 16, 16, 32) 0 conv2d_134[0][0]
__________________________________________________________________________________________________
concatenate_277 (Concatenate) (None, 16, 16, 176) 0 concatenate_276[0][0]
dropout_286[0][0]
__________________________________________________________________________________________________
batch_normalization_289 (BatchN (None, 16, 16, 176) 704 concatenate_277[0][0]
__________________________________________________________________________________________________
activation_289 (Activation) (None, 16, 16, 176) 0 batch_normalization_289[0][0]
__________________________________________________________________________________________________
conv2d_135 (Conv2D) (None, 16, 16, 32) 50688 activation_289[0][0]
__________________________________________________________________________________________________
dropout_287 (Dropout) (None, 16, 16, 32) 0 conv2d_135[0][0]
__________________________________________________________________________________________________
concatenate_278 (Concatenate) (None, 16, 16, 208) 0 concatenate_277[0][0]
dropout_287[0][0]
__________________________________________________________________________________________________
batch_normalization_290 (BatchN (None, 16, 16, 208) 832 concatenate_278[0][0]
__________________________________________________________________________________________________
activation_290 (Activation) (None, 16, 16, 208) 0 batch_normalization_290[0][0]
__________________________________________________________________________________________________
conv2d_136 (Conv2D) (None, 16, 16, 32) 59904 activation_290[0][0]
__________________________________________________________________________________________________
dropout_288 (Dropout) (None, 16, 16, 32) 0 conv2d_136[0][0]
__________________________________________________________________________________________________
concatenate_279 (Concatenate) (None, 16, 16, 240) 0 concatenate_278[0][0]
dropout_288[0][0]
__________________________________________________________________________________________________
batch_normalization_291 (BatchN (None, 16, 16, 240) 960 concatenate_279[0][0]
__________________________________________________________________________________________________
activation_291 (Activation) (None, 16, 16, 240) 0 batch_normalization_291[0][0]
__________________________________________________________________________________________________
conv2d_137 (Conv2D) (None, 16, 16, 32) 69120 activation_291[0][0]
__________________________________________________________________________________________________
dropout_289 (Dropout) (None, 16, 16, 32) 0 conv2d_137[0][0]
__________________________________________________________________________________________________
concatenate_280 (Concatenate) (None, 16, 16, 272) 0 concatenate_279[0][0]
dropout_289[0][0]
__________________________________________________________________________________________________
batch_normalization_292 (BatchN (None, 16, 16, 272) 1088 concatenate_280[0][0]
__________________________________________________________________________________________________
activation_292 (Activation) (None, 16, 16, 272) 0 batch_normalization_292[0][0]
__________________________________________________________________________________________________
conv2d_138 (Conv2D) (None, 16, 16, 32) 78336 activation_292[0][0]
__________________________________________________________________________________________________
dropout_290 (Dropout) (None, 16, 16, 32) 0 conv2d_138[0][0]
__________________________________________________________________________________________________
concatenate_281 (Concatenate) (None, 16, 16, 304) 0 concatenate_280[0][0]
dropout_290[0][0]
__________________________________________________________________________________________________
batch_normalization_293 (BatchN (None, 16, 16, 304) 1216 concatenate_281[0][0]
__________________________________________________________________________________________________
activation_293 (Activation) (None, 16, 16, 304) 0 batch_normalization_293[0][0]
__________________________________________________________________________________________________
conv2d_139 (Conv2D) (None, 16, 16, 32) 87552 activation_293[0][0]
__________________________________________________________________________________________________
dropout_291 (Dropout) (None, 16, 16, 32) 0 conv2d_139[0][0]
__________________________________________________________________________________________________
concatenate_282 (Concatenate) (None, 16, 16, 336) 0 concatenate_281[0][0]
dropout_291[0][0]
__________________________________________________________________________________________________
batch_normalization_294 (BatchN (None, 16, 16, 336) 1344 concatenate_282[0][0]
__________________________________________________________________________________________________
activation_294 (Activation) (None, 16, 16, 336) 0 batch_normalization_294[0][0]
__________________________________________________________________________________________________
conv2d_140 (Conv2D) (None, 16, 16, 32) 96768 activation_294[0][0]
__________________________________________________________________________________________________
dropout_292 (Dropout) (None, 16, 16, 32) 0 conv2d_140[0][0]
__________________________________________________________________________________________________
concatenate_283 (Concatenate) (None, 16, 16, 368) 0 concatenate_282[0][0]
dropout_292[0][0]
__________________________________________________________________________________________________
batch_normalization_295 (BatchN (None, 16, 16, 368) 1472 concatenate_283[0][0]
__________________________________________________________________________________________________
activation_295 (Activation) (None, 16, 16, 368) 0 batch_normalization_295[0][0]
__________________________________________________________________________________________________
conv2d_141 (Conv2D) (None, 16, 16, 32) 105984 activation_295[0][0]
__________________________________________________________________________________________________
dropout_293 (Dropout) (None, 16, 16, 32) 0 conv2d_141[0][0]
__________________________________________________________________________________________________
concatenate_284 (Concatenate) (None, 16, 16, 400) 0 concatenate_283[0][0]
dropout_293[0][0]
__________________________________________________________________________________________________
batch_normalization_296 (BatchN (None, 16, 16, 400) 1600 concatenate_284[0][0]
__________________________________________________________________________________________________
activation_296 (Activation) (None, 16, 16, 400) 0 batch_normalization_296[0][0]
__________________________________________________________________________________________________
conv2d_142 (Conv2D) (None, 16, 16, 32) 12800 activation_296[0][0]
__________________________________________________________________________________________________
dropout_294 (Dropout) (None, 16, 16, 32) 0 conv2d_142[0][0]
__________________________________________________________________________________________________
average_pooling2d_12 (AveragePo (None, 8, 8, 32) 0 dropout_294[0][0]
__________________________________________________________________________________________________
batch_normalization_297 (BatchN (None, 8, 8, 32) 128 average_pooling2d_12[0][0]
__________________________________________________________________________________________________
activation_297 (Activation) (None, 8, 8, 32) 0 batch_normalization_297[0][0]
__________________________________________________________________________________________________
separable_conv2d_157 (Separable (None, 8, 8, 32) 1312 activation_297[0][0]
__________________________________________________________________________________________________
dropout_295 (Dropout) (None, 8, 8, 32) 0 separable_conv2d_157[0][0]
__________________________________________________________________________________________________
concatenate_285 (Concatenate) (None, 8, 8, 64) 0 average_pooling2d_12[0][0]
dropout_295[0][0]
__________________________________________________________________________________________________
batch_normalization_298 (BatchN (None, 8, 8, 64) 256 concatenate_285[0][0]
__________________________________________________________________________________________________
activation_298 (Activation) (None, 8, 8, 64) 0 batch_normalization_298[0][0]
__________________________________________________________________________________________________
separable_conv2d_158 (Separable (None, 8, 8, 32) 2624 activation_298[0][0]
__________________________________________________________________________________________________
dropout_296 (Dropout) (None, 8, 8, 32) 0 separable_conv2d_158[0][0]
__________________________________________________________________________________________________
concatenate_286 (Concatenate) (None, 8, 8, 96) 0 concatenate_285[0][0]
dropout_296[0][0]
__________________________________________________________________________________________________
batch_normalization_299 (BatchN (None, 8, 8, 96) 384 concatenate_286[0][0]
__________________________________________________________________________________________________
activation_299 (Activation) (None, 8, 8, 96) 0 batch_normalization_299[0][0]
__________________________________________________________________________________________________
separable_conv2d_159 (Separable (None, 8, 8, 32) 3936 activation_299[0][0]
__________________________________________________________________________________________________
dropout_297 (Dropout) (None, 8, 8, 32) 0 separable_conv2d_159[0][0]
__________________________________________________________________________________________________
concatenate_287 (Concatenate) (None, 8, 8, 128) 0 concatenate_286[0][0]
dropout_297[0][0]
__________________________________________________________________________________________________
batch_normalization_300 (BatchN (None, 8, 8, 128) 512 concatenate_287[0][0]
__________________________________________________________________________________________________
activation_300 (Activation) (None, 8, 8, 128) 0 batch_normalization_300[0][0]
__________________________________________________________________________________________________
separable_conv2d_160 (Separable (None, 8, 8, 32) 5248 activation_300[0][0]
__________________________________________________________________________________________________
dropout_298 (Dropout) (None, 8, 8, 32) 0 separable_conv2d_160[0][0]
__________________________________________________________________________________________________
concatenate_288 (Concatenate) (None, 8, 8, 160) 0 concatenate_287[0][0]
dropout_298[0][0]
__________________________________________________________________________________________________
batch_normalization_301 (BatchN (None, 8, 8, 160) 640 concatenate_288[0][0]
__________________________________________________________________________________________________
activation_301 (Activation) (None, 8, 8, 160) 0 batch_normalization_301[0][0]
__________________________________________________________________________________________________
separable_conv2d_161 (Separable (None, 8, 8, 32) 6560 activation_301[0][0]
__________________________________________________________________________________________________
dropout_299 (Dropout) (None, 8, 8, 32) 0 separable_conv2d_161[0][0]
__________________________________________________________________________________________________
concatenate_289 (Concatenate) (None, 8, 8, 192) 0 concatenate_288[0][0]
dropout_299[0][0]
__________________________________________________________________________________________________
batch_normalization_302 (BatchN (None, 8, 8, 192) 768 concatenate_289[0][0]
__________________________________________________________________________________________________
activation_302 (Activation) (None, 8, 8, 192) 0 batch_normalization_302[0][0]
__________________________________________________________________________________________________
separable_conv2d_162 (Separable (None, 8, 8, 32) 7872 activation_302[0][0]
__________________________________________________________________________________________________
dropout_300 (Dropout) (None, 8, 8, 32) 0 separable_conv2d_162[0][0]
__________________________________________________________________________________________________
concatenate_290 (Concatenate) (None, 8, 8, 224) 0 concatenate_289[0][0]
dropout_300[0][0]
__________________________________________________________________________________________________
batch_normalization_303 (BatchN (None, 8, 8, 224) 896 concatenate_290[0][0]
__________________________________________________________________________________________________
activation_303 (Activation) (None, 8, 8, 224) 0 batch_normalization_303[0][0]
__________________________________________________________________________________________________
separable_conv2d_163 (Separable (None, 8, 8, 32) 9184 activation_303[0][0]
__________________________________________________________________________________________________
dropout_301 (Dropout) (None, 8, 8, 32) 0 separable_conv2d_163[0][0]
__________________________________________________________________________________________________
concatenate_291 (Concatenate) (None, 8, 8, 256) 0 concatenate_290[0][0]
dropout_301[0][0]
__________________________________________________________________________________________________
batch_normalization_304 (BatchN (None, 8, 8, 256) 1024 concatenate_291[0][0]
__________________________________________________________________________________________________
activation_304 (Activation) (None, 8, 8, 256) 0 batch_normalization_304[0][0]
__________________________________________________________________________________________________
separable_conv2d_164 (Separable (None, 8, 8, 32) 10496 activation_304[0][0]
__________________________________________________________________________________________________
dropout_302 (Dropout) (None, 8, 8, 32) 0 separable_conv2d_164[0][0]
__________________________________________________________________________________________________
concatenate_292 (Concatenate) (None, 8, 8, 288) 0 concatenate_291[0][0]
dropout_302[0][0]
__________________________________________________________________________________________________
batch_normalization_305 (BatchN (None, 8, 8, 288) 1152 concatenate_292[0][0]
__________________________________________________________________________________________________
activation_305 (Activation) (None, 8, 8, 288) 0 batch_normalization_305[0][0]
__________________________________________________________________________________________________
separable_conv2d_165 (Separable (None, 8, 8, 32) 11808 activation_305[0][0]
__________________________________________________________________________________________________
dropout_303 (Dropout) (None, 8, 8, 32) 0 separable_conv2d_165[0][0]
__________________________________________________________________________________________________
concatenate_293 (Concatenate) (None, 8, 8, 320) 0 concatenate_292[0][0]
dropout_303[0][0]
__________________________________________________________________________________________________
batch_normalization_306 (BatchN (None, 8, 8, 320) 1280 concatenate_293[0][0]
__________________________________________________________________________________________________
activation_306 (Activation) (None, 8, 8, 320) 0 batch_normalization_306[0][0]
__________________________________________________________________________________________________
separable_conv2d_166 (Separable (None, 8, 8, 32) 13120 activation_306[0][0]
__________________________________________________________________________________________________
dropout_304 (Dropout) (None, 8, 8, 32) 0 separable_conv2d_166[0][0]
__________________________________________________________________________________________________
concatenate_294 (Concatenate) (None, 8, 8, 352) 0 concatenate_293[0][0]
dropout_304[0][0]
__________________________________________________________________________________________________
batch_normalization_307 (BatchN (None, 8, 8, 352) 1408 concatenate_294[0][0]
__________________________________________________________________________________________________
activation_307 (Activation) (None, 8, 8, 352) 0 batch_normalization_307[0][0]
__________________________________________________________________________________________________
separable_conv2d_167 (Separable (None, 8, 8, 32) 14432 activation_307[0][0]
__________________________________________________________________________________________________
dropout_305 (Dropout) (None, 8, 8, 32) 0 separable_conv2d_167[0][0]
__________________________________________________________________________________________________
concatenate_295 (Concatenate) (None, 8, 8, 384) 0 concatenate_294[0][0]
dropout_305[0][0]
__________________________________________________________________________________________________
batch_normalization_308 (BatchN (None, 8, 8, 384) 1536 concatenate_295[0][0]
__________________________________________________________________________________________________
activation_308 (Activation) (None, 8, 8, 384) 0 batch_normalization_308[0][0]
__________________________________________________________________________________________________
separable_conv2d_168 (Separable (None, 8, 8, 32) 15744 activation_308[0][0]
__________________________________________________________________________________________________
dropout_306 (Dropout) (None, 8, 8, 32) 0 separable_conv2d_168[0][0]
__________________________________________________________________________________________________
concatenate_296 (Concatenate) (None, 8, 8, 416) 0 concatenate_295[0][0]
dropout_306[0][0]
__________________________________________________________________________________________________
batch_normalization_309 (BatchN (None, 8, 8, 416) 1664 concatenate_296[0][0]
__________________________________________________________________________________________________
activation_309 (Activation) (None, 8, 8, 416) 0 batch_normalization_309[0][0]
__________________________________________________________________________________________________
conv2d_143 (Conv2D) (None, 8, 8, 32) 13312 activation_309[0][0]
__________________________________________________________________________________________________
dropout_307 (Dropout) (None, 8, 8, 32) 0 conv2d_143[0][0]
__________________________________________________________________________________________________
average_pooling2d_13 (AveragePo (None, 4, 4, 32) 0 dropout_307[0][0]
__________________________________________________________________________________________________
batch_normalization_310 (BatchN (None, 4, 4, 32) 128 average_pooling2d_13[0][0]
__________________________________________________________________________________________________
activation_310 (Activation) (None, 4, 4, 32) 0 batch_normalization_310[0][0]
__________________________________________________________________________________________________
separable_conv2d_169 (Separable (None, 4, 4, 16) 800 activation_310[0][0]
__________________________________________________________________________________________________
dropout_308 (Dropout) (None, 4, 4, 16) 0 separable_conv2d_169[0][0]
__________________________________________________________________________________________________
concatenate_297 (Concatenate) (None, 4, 4, 48) 0 average_pooling2d_13[0][0]
dropout_308[0][0]
__________________________________________________________________________________________________
batch_normalization_311 (BatchN (None, 4, 4, 48) 192 concatenate_297[0][0]
__________________________________________________________________________________________________
activation_311 (Activation) (None, 4, 4, 48) 0 batch_normalization_311[0][0]
__________________________________________________________________________________________________
separable_conv2d_170 (Separable (None, 4, 4, 16) 1200 activation_311[0][0]
__________________________________________________________________________________________________
dropout_309 (Dropout) (None, 4, 4, 16) 0 separable_conv2d_170[0][0]
__________________________________________________________________________________________________
concatenate_298 (Concatenate) (None, 4, 4, 64) 0 concatenate_297[0][0]
dropout_309[0][0]
__________________________________________________________________________________________________
batch_normalization_312 (BatchN (None, 4, 4, 64) 256 concatenate_298[0][0]
__________________________________________________________________________________________________
activation_312 (Activation) (None, 4, 4, 64) 0 batch_normalization_312[0][0]
__________________________________________________________________________________________________
separable_conv2d_171 (Separable (None, 4, 4, 16) 1600 activation_312[0][0]
__________________________________________________________________________________________________
dropout_310 (Dropout) (None, 4, 4, 16) 0 separable_conv2d_171[0][0]
__________________________________________________________________________________________________
concatenate_299 (Concatenate) (None, 4, 4, 80) 0 concatenate_298[0][0]
dropout_310[0][0]
__________________________________________________________________________________________________
batch_normalization_313 (BatchN (None, 4, 4, 80) 320 concatenate_299[0][0]
__________________________________________________________________________________________________
activation_313 (Activation) (None, 4, 4, 80) 0 batch_normalization_313[0][0]
__________________________________________________________________________________________________
separable_conv2d_172 (Separable (None, 4, 4, 16) 2000 activation_313[0][0]
__________________________________________________________________________________________________
dropout_311 (Dropout) (None, 4, 4, 16) 0 separable_conv2d_172[0][0]
__________________________________________________________________________________________________
concatenate_300 (Concatenate) (None, 4, 4, 96) 0 concatenate_299[0][0]
dropout_311[0][0]
__________________________________________________________________________________________________
batch_normalization_314 (BatchN (None, 4, 4, 96) 384 concatenate_300[0][0]
__________________________________________________________________________________________________
activation_314 (Activation) (None, 4, 4, 96) 0 batch_normalization_314[0][0]
__________________________________________________________________________________________________
separable_conv2d_173 (Separable (None, 4, 4, 16) 2400 activation_314[0][0]
__________________________________________________________________________________________________
dropout_312 (Dropout) (None, 4, 4, 16) 0 separable_conv2d_173[0][0]
__________________________________________________________________________________________________
concatenate_301 (Concatenate) (None, 4, 4, 112) 0 concatenate_300[0][0]
dropout_312[0][0]
__________________________________________________________________________________________________
batch_normalization_315 (BatchN (None, 4, 4, 112) 448 concatenate_301[0][0]
__________________________________________________________________________________________________
activation_315 (Activation) (None, 4, 4, 112) 0 batch_normalization_315[0][0]
__________________________________________________________________________________________________
separable_conv2d_174 (Separable (None, 4, 4, 16) 2800 activation_315[0][0]
__________________________________________________________________________________________________
dropout_313 (Dropout) (None, 4, 4, 16) 0 separable_conv2d_174[0][0]
__________________________________________________________________________________________________
concatenate_302 (Concatenate) (None, 4, 4, 128) 0 concatenate_301[0][0]
dropout_313[0][0]
__________________________________________________________________________________________________
batch_normalization_316 (BatchN (None, 4, 4, 128) 512 concatenate_302[0][0]
__________________________________________________________________________________________________
activation_316 (Activation) (None, 4, 4, 128) 0 batch_normalization_316[0][0]
__________________________________________________________________________________________________
separable_conv2d_175 (Separable (None, 4, 4, 16) 3200 activation_316[0][0]
__________________________________________________________________________________________________
dropout_314 (Dropout) (None, 4, 4, 16) 0 separable_conv2d_175[0][0]
__________________________________________________________________________________________________
concatenate_303 (Concatenate) (None, 4, 4, 144) 0 concatenate_302[0][0]
dropout_314[0][0]
__________________________________________________________________________________________________
batch_normalization_317 (BatchN (None, 4, 4, 144) 576 concatenate_303[0][0]
__________________________________________________________________________________________________
activation_317 (Activation) (None, 4, 4, 144) 0 batch_normalization_317[0][0]
__________________________________________________________________________________________________
separable_conv2d_176 (Separable (None, 4, 4, 16) 3600 activation_317[0][0]
__________________________________________________________________________________________________
dropout_315 (Dropout) (None, 4, 4, 16) 0 separable_conv2d_176[0][0]
__________________________________________________________________________________________________
concatenate_304 (Concatenate) (None, 4, 4, 160) 0 concatenate_303[0][0]
dropout_315[0][0]
__________________________________________________________________________________________________
batch_normalization_318 (BatchN (None, 4, 4, 160) 640 concatenate_304[0][0]
__________________________________________________________________________________________________
activation_318 (Activation) (None, 4, 4, 160) 0 batch_normalization_318[0][0]
__________________________________________________________________________________________________
separable_conv2d_177 (Separable (None, 4, 4, 16) 4000 activation_318[0][0]
__________________________________________________________________________________________________
dropout_316 (Dropout) (None, 4, 4, 16) 0 separable_conv2d_177[0][0]
__________________________________________________________________________________________________
concatenate_305 (Concatenate) (None, 4, 4, 176) 0 concatenate_304[0][0]
dropout_316[0][0]
__________________________________________________________________________________________________
batch_normalization_319 (BatchN (None, 4, 4, 176) 704 concatenate_305[0][0]
__________________________________________________________________________________________________
activation_319 (Activation) (None, 4, 4, 176) 0 batch_normalization_319[0][0]
__________________________________________________________________________________________________
separable_conv2d_178 (Separable (None, 4, 4, 16) 4400 activation_319[0][0]
__________________________________________________________________________________________________
dropout_317 (Dropout) (None, 4, 4, 16) 0 separable_conv2d_178[0][0]
__________________________________________________________________________________________________
concatenate_306 (Concatenate) (None, 4, 4, 192) 0 concatenate_305[0][0]
dropout_317[0][0]
__________________________________________________________________________________________________
batch_normalization_320 (BatchN (None, 4, 4, 192) 768 concatenate_306[0][0]
__________________________________________________________________________________________________
activation_320 (Activation) (None, 4, 4, 192) 0 batch_normalization_320[0][0]
__________________________________________________________________________________________________
separable_conv2d_179 (Separable (None, 4, 4, 16) 4800 activation_320[0][0]
__________________________________________________________________________________________________
dropout_318 (Dropout) (None, 4, 4, 16) 0 separable_conv2d_179[0][0]
__________________________________________________________________________________________________
concatenate_307 (Concatenate) (None, 4, 4, 208) 0 concatenate_306[0][0]
dropout_318[0][0]
__________________________________________________________________________________________________
batch_normalization_321 (BatchN (None, 4, 4, 208) 832 concatenate_307[0][0]
__________________________________________________________________________________________________
activation_321 (Activation) (None, 4, 4, 208) 0 batch_normalization_321[0][0]
__________________________________________________________________________________________________
separable_conv2d_180 (Separable (None, 4, 4, 16) 5200 activation_321[0][0]
__________________________________________________________________________________________________
dropout_319 (Dropout) (None, 4, 4, 16) 0 separable_conv2d_180[0][0]
__________________________________________________________________________________________________
concatenate_308 (Concatenate) (None, 4, 4, 224) 0 concatenate_307[0][0]
dropout_319[0][0]
__________________________________________________________________________________________________
batch_normalization_322 (BatchN (None, 4, 4, 224) 896 concatenate_308[0][0]
__________________________________________________________________________________________________
activation_322 (Activation) (None, 4, 4, 224) 0 batch_normalization_322[0][0]
__________________________________________________________________________________________________
conv2d_144 (Conv2D) (None, 4, 4, 16) 3584 activation_322[0][0]
__________________________________________________________________________________________________
dropout_320 (Dropout) (None, 4, 4, 16) 0 conv2d_144[0][0]
__________________________________________________________________________________________________
average_pooling2d_14 (AveragePo (None, 2, 2, 16) 0 dropout_320[0][0]
__________________________________________________________________________________________________
batch_normalization_323 (BatchN (None, 2, 2, 16) 64 average_pooling2d_14[0][0]
__________________________________________________________________________________________________
activation_323 (Activation) (None, 2, 2, 16) 0 batch_normalization_323[0][0]
__________________________________________________________________________________________________
conv2d_145 (Conv2D) (None, 2, 2, 8) 1152 activation_323[0][0]
__________________________________________________________________________________________________
dropout_321 (Dropout) (None, 2, 2, 8) 0 conv2d_145[0][0]
__________________________________________________________________________________________________
concatenate_309 (Concatenate) (None, 2, 2, 24) 0 average_pooling2d_14[0][0]
dropout_321[0][0]
__________________________________________________________________________________________________
batch_normalization_324 (BatchN (None, 2, 2, 24) 96 concatenate_309[0][0]
__________________________________________________________________________________________________
activation_324 (Activation) (None, 2, 2, 24) 0 batch_normalization_324[0][0]
__________________________________________________________________________________________________
conv2d_146 (Conv2D) (None, 2, 2, 8) 1728 activation_324[0][0]
__________________________________________________________________________________________________
dropout_322 (Dropout) (None, 2, 2, 8) 0 conv2d_146[0][0]
__________________________________________________________________________________________________
concatenate_310 (Concatenate) (None, 2, 2, 32) 0 concatenate_309[0][0]
dropout_322[0][0]
__________________________________________________________________________________________________
batch_normalization_325 (BatchN (None, 2, 2, 32) 128 concatenate_310[0][0]
__________________________________________________________________________________________________
activation_325 (Activation) (None, 2, 2, 32) 0 batch_normalization_325[0][0]
__________________________________________________________________________________________________
conv2d_147 (Conv2D) (None, 2, 2, 8) 2304 activation_325[0][0]
__________________________________________________________________________________________________
dropout_323 (Dropout) (None, 2, 2, 8) 0 conv2d_147[0][0]
__________________________________________________________________________________________________
concatenate_311 (Concatenate) (None, 2, 2, 40) 0 concatenate_310[0][0]
dropout_323[0][0]
__________________________________________________________________________________________________
batch_normalization_326 (BatchN (None, 2, 2, 40) 160 concatenate_311[0][0]
__________________________________________________________________________________________________
activation_326 (Activation) (None, 2, 2, 40) 0 batch_normalization_326[0][0]
__________________________________________________________________________________________________
conv2d_148 (Conv2D) (None, 2, 2, 8) 2880 activation_326[0][0]
__________________________________________________________________________________________________
dropout_324 (Dropout) (None, 2, 2, 8) 0 conv2d_148[0][0]
__________________________________________________________________________________________________
concatenate_312 (Concatenate) (None, 2, 2, 48) 0 concatenate_311[0][0]
dropout_324[0][0]
__________________________________________________________________________________________________
batch_normalization_327 (BatchN (None, 2, 2, 48) 192 concatenate_312[0][0]
__________________________________________________________________________________________________
activation_327 (Activation) (None, 2, 2, 48) 0 batch_normalization_327[0][0]
__________________________________________________________________________________________________
conv2d_149 (Conv2D) (None, 2, 2, 8) 3456 activation_327[0][0]
__________________________________________________________________________________________________
dropout_325 (Dropout) (None, 2, 2, 8) 0 conv2d_149[0][0]
__________________________________________________________________________________________________
concatenate_313 (Concatenate) (None, 2, 2, 56) 0 concatenate_312[0][0]
dropout_325[0][0]
__________________________________________________________________________________________________
batch_normalization_328 (BatchN (None, 2, 2, 56) 224 concatenate_313[0][0]
__________________________________________________________________________________________________
activation_328 (Activation) (None, 2, 2, 56) 0 batch_normalization_328[0][0]
__________________________________________________________________________________________________
conv2d_150 (Conv2D) (None, 2, 2, 8) 4032 activation_328[0][0]
__________________________________________________________________________________________________
dropout_326 (Dropout) (None, 2, 2, 8) 0 conv2d_150[0][0]
__________________________________________________________________________________________________
concatenate_314 (Concatenate) (None, 2, 2, 64) 0 concatenate_313[0][0]
dropout_326[0][0]
__________________________________________________________________________________________________
batch_normalization_329 (BatchN (None, 2, 2, 64) 256 concatenate_314[0][0]
__________________________________________________________________________________________________
activation_329 (Activation) (None, 2, 2, 64) 0 batch_normalization_329[0][0]
__________________________________________________________________________________________________
conv2d_151 (Conv2D) (None, 2, 2, 8) 4608 activation_329[0][0]
__________________________________________________________________________________________________
dropout_327 (Dropout) (None, 2, 2, 8) 0 conv2d_151[0][0]
__________________________________________________________________________________________________
concatenate_315 (Concatenate) (None, 2, 2, 72) 0 concatenate_314[0][0]
dropout_327[0][0]
__________________________________________________________________________________________________
batch_normalization_330 (BatchN (None, 2, 2, 72) 288 concatenate_315[0][0]
__________________________________________________________________________________________________
activation_330 (Activation) (None, 2, 2, 72) 0 batch_normalization_330[0][0]
__________________________________________________________________________________________________
conv2d_152 (Conv2D) (None, 2, 2, 8) 5184 activation_330[0][0]
__________________________________________________________________________________________________
dropout_328 (Dropout) (None, 2, 2, 8) 0 conv2d_152[0][0]
__________________________________________________________________________________________________
concatenate_316 (Concatenate) (None, 2, 2, 80) 0 concatenate_315[0][0]
dropout_328[0][0]
__________________________________________________________________________________________________
batch_normalization_331 (BatchN (None, 2, 2, 80) 320 concatenate_316[0][0]
__________________________________________________________________________________________________
activation_331 (Activation) (None, 2, 2, 80) 0 batch_normalization_331[0][0]
__________________________________________________________________________________________________
conv2d_153 (Conv2D) (None, 2, 2, 8) 5760 activation_331[0][0]
__________________________________________________________________________________________________
dropout_329 (Dropout) (None, 2, 2, 8) 0 conv2d_153[0][0]
__________________________________________________________________________________________________
concatenate_317 (Concatenate) (None, 2, 2, 88) 0 concatenate_316[0][0]
dropout_329[0][0]
__________________________________________________________________________________________________
batch_normalization_332 (BatchN (None, 2, 2, 88) 352 concatenate_317[0][0]
__________________________________________________________________________________________________
activation_332 (Activation) (None, 2, 2, 88) 0 batch_normalization_332[0][0]
__________________________________________________________________________________________________
conv2d_154 (Conv2D) (None, 2, 2, 8) 6336 activation_332[0][0]
__________________________________________________________________________________________________
dropout_330 (Dropout) (None, 2, 2, 8) 0 conv2d_154[0][0]
__________________________________________________________________________________________________
concatenate_318 (Concatenate) (None, 2, 2, 96) 0 concatenate_317[0][0]
dropout_330[0][0]
__________________________________________________________________________________________________
batch_normalization_333 (BatchN (None, 2, 2, 96) 384 concatenate_318[0][0]
__________________________________________________________________________________________________
activation_333 (Activation) (None, 2, 2, 96) 0 batch_normalization_333[0][0]
__________________________________________________________________________________________________
conv2d_155 (Conv2D) (None, 2, 2, 8) 6912 activation_333[0][0]
__________________________________________________________________________________________________
dropout_331 (Dropout) (None, 2, 2, 8) 0 conv2d_155[0][0]
__________________________________________________________________________________________________
concatenate_319 (Concatenate) (None, 2, 2, 104) 0 concatenate_318[0][0]
dropout_331[0][0]
__________________________________________________________________________________________________
batch_normalization_334 (BatchN (None, 2, 2, 104) 416 concatenate_319[0][0]
__________________________________________________________________________________________________
activation_334 (Activation) (None, 2, 2, 104) 0 batch_normalization_334[0][0]
__________________________________________________________________________________________________
conv2d_156 (Conv2D) (None, 2, 2, 8) 7488 activation_334[0][0]
__________________________________________________________________________________________________
dropout_332 (Dropout) (None, 2, 2, 8) 0 conv2d_156[0][0]
__________________________________________________________________________________________________
concatenate_320 (Concatenate) (None, 2, 2, 112) 0 concatenate_319[0][0]
dropout_332[0][0]
__________________________________________________________________________________________________
batch_normalization_335 (BatchN (None, 2, 2, 112) 448 concatenate_320[0][0]
__________________________________________________________________________________________________
activation_335 (Activation) (None, 2, 2, 112) 0 batch_normalization_335[0][0]
__________________________________________________________________________________________________
average_pooling2d_15 (AveragePo (None, 1, 1, 112) 0 activation_335[0][0]
__________________________________________________________________________________________________
flatten_3 (Flatten) (None, 112) 0 average_pooling2d_15[0][0]
__________________________________________________________________________________________________
dense_3 (Dense) (None, 10) 1130 flatten_3[0][0]
==================================================================================================
Total params: 1,106,298
Trainable params: 1,087,162
Non-trainable params: 19,136
__________________________________________________________________________________________________
In [0]:
# determine Loss function and Optimizer
learning_rate = 0.1
decay_rate = learning_rate / epochs
momentum = 0.8
sgd = SGD(lr=learning_rate, momentum=momentum, decay=decay_rate, nesterov=False)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
In [28]:
# alternative to model.fit_generator
# src: https://stackoverflow.com/questions/48820093/how-to-incorporate-validation-data-in-kerass-datagen-flow
# for e in range(epochs):
# print('================')
# print('| Epoch ', e, ' |')
# print('================')
# batches = 0
# # combine both generators, in python 3 using zip()
# for (x_batch, y_batch), (val_x, val_y) in zip(
# tr_datagen.flow(x_train, y_train, batch_size=batch_size),
# val_datagen.flow(x_test, y_test, batch_size=batch_size)):
# model.fit(x_batch, y_batch, validation_data=(val_x, val_y))
# batches += 1
# if batches >= len(x_train) / batch_size:
# # we need to break the loop by hand because
# # the generator loops indefinitely
# break
filepath="file_name-{epoch:02d}-{val_acc:.2f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
model.fit_generator(
train_datagen.flow(x_train, y_train),
steps_per_epoch = len(x_train) / batch_size,
epochs = epochs,
validation_data = validation_datagen.flow(x_test, y_test),
validation_steps = len(x_test) / batch_size,
callbacks = callbacks_list,
use_multiprocessing=True,
workers=16
)
WARNING:tensorflow:Variable *= will be deprecated. Use `var.assign(var * other)` if you want assignment to the variable value or `x = x * y` if you want a new python Tensor object.
Epoch 1/250
391/390 [==============================] - 118s 303ms/step - loss: 1.9008 - acc: 0.2915 - val_loss: 1.7308 - val_acc: 0.3517
Epoch 00001: val_acc improved from -inf to 0.35166, saving model to file_name-01-0.35.hdf5
Epoch 2/250
391/390 [==============================] - 97s 249ms/step - loss: 1.6080 - acc: 0.4062 - val_loss: 1.9893 - val_acc: 0.3299
Epoch 00002: val_acc did not improve from 0.35166
Epoch 3/250
391/390 [==============================] - 98s 251ms/step - loss: 1.4706 - acc: 0.4617 - val_loss: 2.0973 - val_acc: 0.3956
Epoch 00003: val_acc improved from 0.35166 to 0.39557, saving model to file_name-03-0.40.hdf5
Epoch 4/250
391/390 [==============================] - 99s 254ms/step - loss: 1.4100 - acc: 0.4884 - val_loss: 1.8381 - val_acc: 0.4165
Epoch 00004: val_acc improved from 0.39557 to 0.41653, saving model to file_name-04-0.42.hdf5
Epoch 5/250
391/390 [==============================] - 98s 250ms/step - loss: 1.3258 - acc: 0.5225 - val_loss: 2.0565 - val_acc: 0.4312
Epoch 00005: val_acc improved from 0.41653 to 0.43117, saving model to file_name-05-0.43.hdf5
Epoch 6/250
391/390 [==============================] - 98s 250ms/step - loss: 1.2793 - acc: 0.5360 - val_loss: 2.0870 - val_acc: 0.4165
Epoch 00006: val_acc did not improve from 0.43117
Epoch 7/250
391/390 [==============================] - 99s 252ms/step - loss: 1.2363 - acc: 0.5558 - val_loss: 1.9100 - val_acc: 0.4624
Epoch 00007: val_acc improved from 0.43117 to 0.46242, saving model to file_name-07-0.46.hdf5
Epoch 8/250
391/390 [==============================] - 99s 254ms/step - loss: 1.2047 - acc: 0.5770 - val_loss: 2.5978 - val_acc: 0.4173
Epoch 00008: val_acc did not improve from 0.46242
Epoch 9/250
391/390 [==============================] - 98s 252ms/step - loss: 1.1522 - acc: 0.5938 - val_loss: 1.4406 - val_acc: 0.5771
Epoch 00009: val_acc improved from 0.46242 to 0.57714, saving model to file_name-09-0.58.hdf5
Epoch 10/250
391/390 [==============================] - 98s 249ms/step - loss: 1.1275 - acc: 0.6022 - val_loss: 1.2662 - val_acc: 0.6076
Epoch 00010: val_acc improved from 0.57714 to 0.60759, saving model to file_name-10-0.61.hdf5
Epoch 11/250
391/390 [==============================] - 97s 247ms/step - loss: 1.1048 - acc: 0.6115 - val_loss: 1.3077 - val_acc: 0.6021
Epoch 00011: val_acc did not improve from 0.60759
Epoch 12/250
391/390 [==============================] - 99s 253ms/step - loss: 1.0620 - acc: 0.6278 - val_loss: 2.7009 - val_acc: 0.4411
Epoch 00012: val_acc did not improve from 0.60759
Epoch 13/250
391/390 [==============================] - 97s 248ms/step - loss: 1.0196 - acc: 0.6444 - val_loss: 2.0617 - val_acc: 0.4604
Epoch 00013: val_acc did not improve from 0.60759
Epoch 14/250
391/390 [==============================] - 97s 248ms/step - loss: 1.0102 - acc: 0.6472 - val_loss: 1.4756 - val_acc: 0.5795
Epoch 00014: val_acc did not improve from 0.60759
Epoch 15/250
391/390 [==============================] - 97s 248ms/step - loss: 1.0000 - acc: 0.6472 - val_loss: 1.1170 - val_acc: 0.6432
Epoch 00015: val_acc improved from 0.60759 to 0.64320, saving model to file_name-15-0.64.hdf5
Epoch 16/250
391/390 [==============================] - 98s 251ms/step - loss: 0.9658 - acc: 0.6630 - val_loss: 1.2958 - val_acc: 0.6203
Epoch 00016: val_acc did not improve from 0.64320
Epoch 17/250
391/390 [==============================] - 97s 249ms/step - loss: 0.9468 - acc: 0.6714 - val_loss: 1.0492 - val_acc: 0.6661
Epoch 00017: val_acc improved from 0.64320 to 0.66614, saving model to file_name-17-0.67.hdf5
Epoch 18/250
391/390 [==============================] - 97s 248ms/step - loss: 0.9221 - acc: 0.6735 - val_loss: 1.2131 - val_acc: 0.6420
Epoch 00018: val_acc did not improve from 0.66614
Epoch 19/250
391/390 [==============================] - 97s 249ms/step - loss: 0.9204 - acc: 0.6769 - val_loss: 1.4557 - val_acc: 0.6013
Epoch 00019: val_acc did not improve from 0.66614
Epoch 20/250
391/390 [==============================] - 99s 252ms/step - loss: 0.8984 - acc: 0.6843 - val_loss: 1.1789 - val_acc: 0.6559
Epoch 00020: val_acc did not improve from 0.66614
Epoch 21/250
391/390 [==============================] - 97s 248ms/step - loss: 0.8918 - acc: 0.6881 - val_loss: 1.0824 - val_acc: 0.6744
Epoch 00021: val_acc improved from 0.66614 to 0.67445, saving model to file_name-21-0.67.hdf5
Epoch 22/250
391/390 [==============================] - 96s 247ms/step - loss: 0.8396 - acc: 0.7066 - val_loss: 1.7209 - val_acc: 0.6028
Epoch 00022: val_acc did not improve from 0.67445
Epoch 23/250
391/390 [==============================] - 97s 248ms/step - loss: 0.8728 - acc: 0.6931 - val_loss: 0.9785 - val_acc: 0.7128
Epoch 00023: val_acc improved from 0.67445 to 0.71282, saving model to file_name-23-0.71.hdf5
Epoch 24/250
391/390 [==============================] - 100s 254ms/step - loss: 0.8450 - acc: 0.7056 - val_loss: 1.3540 - val_acc: 0.6369
Epoch 00024: val_acc did not improve from 0.71282
Epoch 25/250
391/390 [==============================] - 99s 253ms/step - loss: 0.8261 - acc: 0.7148 - val_loss: 1.0270 - val_acc: 0.6958
Epoch 00025: val_acc did not improve from 0.71282
Epoch 26/250
391/390 [==============================] - 99s 253ms/step - loss: 0.8126 - acc: 0.7157 - val_loss: 1.2789 - val_acc: 0.6503
Epoch 00026: val_acc did not improve from 0.71282
Epoch 27/250
391/390 [==============================] - 98s 251ms/step - loss: 0.8183 - acc: 0.7202 - val_loss: 1.0027 - val_acc: 0.7029
Epoch 00027: val_acc did not improve from 0.71282
Epoch 28/250
391/390 [==============================] - 100s 256ms/step - loss: 0.8125 - acc: 0.7160 - val_loss: 0.9795 - val_acc: 0.7144
Epoch 00028: val_acc improved from 0.71282 to 0.71440, saving model to file_name-28-0.71.hdf5
Epoch 29/250
391/390 [==============================] - 98s 251ms/step - loss: 0.7737 - acc: 0.7292 - val_loss: 0.8747 - val_acc: 0.7251
Epoch 00029: val_acc improved from 0.71440 to 0.72508, saving model to file_name-29-0.73.hdf5
Epoch 30/250
391/390 [==============================] - 98s 251ms/step - loss: 0.7917 - acc: 0.7258 - val_loss: 1.0572 - val_acc: 0.6982
Epoch 00030: val_acc did not improve from 0.72508
Epoch 31/250
391/390 [==============================] - 98s 251ms/step - loss: 0.7893 - acc: 0.7231 - val_loss: 0.9365 - val_acc: 0.7065
Epoch 00031: val_acc did not improve from 0.72508
Epoch 32/250
391/390 [==============================] - 100s 256ms/step - loss: 0.7626 - acc: 0.7360 - val_loss: 1.1045 - val_acc: 0.7029
Epoch 00032: val_acc did not improve from 0.72508
Epoch 33/250
391/390 [==============================] - 98s 250ms/step - loss: 0.7549 - acc: 0.7369 - val_loss: 0.9889 - val_acc: 0.6970
Epoch 00033: val_acc did not improve from 0.72508
Epoch 34/250
391/390 [==============================] - 97s 248ms/step - loss: 0.7378 - acc: 0.7422 - val_loss: 0.9170 - val_acc: 0.7282
Epoch 00034: val_acc improved from 0.72508 to 0.72824, saving model to file_name-34-0.73.hdf5
Epoch 35/250
391/390 [==============================] - 97s 247ms/step - loss: 0.7468 - acc: 0.7400 - val_loss: 1.1174 - val_acc: 0.6871
Epoch 00035: val_acc did not improve from 0.72824
Epoch 36/250
391/390 [==============================] - 99s 253ms/step - loss: 0.7444 - acc: 0.7460 - val_loss: 1.0497 - val_acc: 0.7144
Epoch 00036: val_acc did not improve from 0.72824
Epoch 37/250
391/390 [==============================] - 97s 248ms/step - loss: 0.7226 - acc: 0.7501 - val_loss: 0.8920 - val_acc: 0.7421
Epoch 00037: val_acc improved from 0.72824 to 0.74209, saving model to file_name-37-0.74.hdf5
Epoch 38/250
391/390 [==============================] - 98s 251ms/step - loss: 0.7212 - acc: 0.7467 - val_loss: 1.2433 - val_acc: 0.6776
Epoch 00038: val_acc did not improve from 0.74209
Epoch 39/250
391/390 [==============================] - 99s 253ms/step - loss: 0.7249 - acc: 0.7478 - val_loss: 0.9351 - val_acc: 0.7278
Epoch 00039: val_acc did not improve from 0.74209
Epoch 40/250
391/390 [==============================] - 100s 256ms/step - loss: 0.7263 - acc: 0.7502 - val_loss: 0.9810 - val_acc: 0.7310
Epoch 00040: val_acc did not improve from 0.74209
Epoch 41/250
391/390 [==============================] - 99s 253ms/step - loss: 0.7074 - acc: 0.7591 - val_loss: 1.0603 - val_acc: 0.7017
Epoch 00041: val_acc did not improve from 0.74209
Epoch 42/250
391/390 [==============================] - 99s 252ms/step - loss: 0.6958 - acc: 0.7578 - val_loss: 0.8167 - val_acc: 0.7520
Epoch 00042: val_acc improved from 0.74209 to 0.75198, saving model to file_name-42-0.75.hdf5
Epoch 43/250
391/390 [==============================] - 98s 251ms/step - loss: 0.7017 - acc: 0.7618 - val_loss: 0.8197 - val_acc: 0.7627
Epoch 00043: val_acc improved from 0.75198 to 0.76266, saving model to file_name-43-0.76.hdf5
Epoch 44/250
391/390 [==============================] - 100s 256ms/step - loss: 0.6839 - acc: 0.7621 - val_loss: 0.9528 - val_acc: 0.7215
Epoch 00044: val_acc did not improve from 0.76266
Epoch 45/250
391/390 [==============================] - 99s 253ms/step - loss: 0.6599 - acc: 0.7709 - val_loss: 0.8129 - val_acc: 0.7650
Epoch 00045: val_acc improved from 0.76266 to 0.76503, saving model to file_name-45-0.77.hdf5
Epoch 46/250
391/390 [==============================] - 98s 252ms/step - loss: 0.6705 - acc: 0.7677 - val_loss: 0.8283 - val_acc: 0.7532
Epoch 00046: val_acc did not improve from 0.76503
Epoch 47/250
391/390 [==============================] - 98s 250ms/step - loss: 0.6861 - acc: 0.7593 - val_loss: 1.0257 - val_acc: 0.7061
Epoch 00047: val_acc did not improve from 0.76503
Epoch 48/250
391/390 [==============================] - 99s 254ms/step - loss: 0.6562 - acc: 0.7753 - val_loss: 0.9842 - val_acc: 0.7334
Epoch 00048: val_acc did not improve from 0.76503
Epoch 49/250
391/390 [==============================] - 97s 247ms/step - loss: 0.6571 - acc: 0.7738 - val_loss: 0.7682 - val_acc: 0.7749
Epoch 00049: val_acc improved from 0.76503 to 0.77492, saving model to file_name-49-0.77.hdf5
Epoch 50/250
391/390 [==============================] - 97s 249ms/step - loss: 0.6517 - acc: 0.7732 - val_loss: 0.9134 - val_acc: 0.7555
Epoch 00050: val_acc did not improve from 0.77492
Epoch 51/250
391/390 [==============================] - 98s 251ms/step - loss: 0.6761 - acc: 0.7653 - val_loss: 0.7737 - val_acc: 0.7627
Epoch 00051: val_acc did not improve from 0.77492
Epoch 52/250
391/390 [==============================] - 99s 254ms/step - loss: 0.6530 - acc: 0.7721 - val_loss: 0.7924 - val_acc: 0.7690
Epoch 00052: val_acc did not improve from 0.77492
Epoch 53/250
391/390 [==============================] - 98s 250ms/step - loss: 0.6394 - acc: 0.7774 - val_loss: 0.8683 - val_acc: 0.7540
Epoch 00053: val_acc did not improve from 0.77492
Epoch 54/250
391/390 [==============================] - 98s 250ms/step - loss: 0.6493 - acc: 0.7777 - val_loss: 0.9384 - val_acc: 0.7393
Epoch 00054: val_acc did not improve from 0.77492
Epoch 55/250
391/390 [==============================] - 98s 249ms/step - loss: 0.6290 - acc: 0.7855 - val_loss: 0.9930 - val_acc: 0.7239
Epoch 00055: val_acc did not improve from 0.77492
Epoch 56/250
391/390 [==============================] - 100s 256ms/step - loss: 0.6418 - acc: 0.7781 - val_loss: 0.7567 - val_acc: 0.7725
Epoch 00056: val_acc did not improve from 0.77492
Epoch 57/250
391/390 [==============================] - 99s 252ms/step - loss: 0.6274 - acc: 0.7790 - val_loss: 0.8115 - val_acc: 0.7619
Epoch 00057: val_acc did not improve from 0.77492
Epoch 58/250
391/390 [==============================] - 99s 252ms/step - loss: 0.6156 - acc: 0.7872 - val_loss: 1.2022 - val_acc: 0.7041
Epoch 00058: val_acc did not improve from 0.77492
Epoch 59/250
391/390 [==============================] - 98s 251ms/step - loss: 0.6249 - acc: 0.7801 - val_loss: 0.7859 - val_acc: 0.7686
Epoch 00059: val_acc did not improve from 0.77492
Epoch 60/250
391/390 [==============================] - 99s 253ms/step - loss: 0.6197 - acc: 0.7887 - val_loss: 0.6969 - val_acc: 0.7896
Epoch 00060: val_acc improved from 0.77492 to 0.78956, saving model to file_name-60-0.79.hdf5
Epoch 61/250
391/390 [==============================] - 97s 247ms/step - loss: 0.6061 - acc: 0.7912 - val_loss: 0.7165 - val_acc: 0.7955
Epoch 00061: val_acc improved from 0.78956 to 0.79549, saving model to file_name-61-0.80.hdf5
Epoch 62/250
391/390 [==============================] - 97s 248ms/step - loss: 0.6272 - acc: 0.7861 - val_loss: 0.9935 - val_acc: 0.7397
Epoch 00062: val_acc did not improve from 0.79549
Epoch 63/250
391/390 [==============================] - 98s 250ms/step - loss: 0.6094 - acc: 0.7913 - val_loss: 0.6591 - val_acc: 0.7943
Epoch 00063: val_acc did not improve from 0.79549
Epoch 64/250
391/390 [==============================] - 99s 253ms/step - loss: 0.6155 - acc: 0.7888 - val_loss: 1.0113 - val_acc: 0.7385
Epoch 00064: val_acc did not improve from 0.79549
Epoch 65/250
391/390 [==============================] - 98s 250ms/step - loss: 0.6004 - acc: 0.7922 - val_loss: 0.9009 - val_acc: 0.7500
Epoch 00065: val_acc did not improve from 0.79549
Epoch 66/250
391/390 [==============================] - 98s 250ms/step - loss: 0.5988 - acc: 0.7925 - val_loss: 0.7342 - val_acc: 0.7848
Epoch 00066: val_acc did not improve from 0.79549
Epoch 67/250
391/390 [==============================] - 98s 250ms/step - loss: 0.5994 - acc: 0.7907 - val_loss: 0.6886 - val_acc: 0.7892
Epoch 00067: val_acc did not improve from 0.79549
Epoch 68/250
391/390 [==============================] - 99s 254ms/step - loss: 0.5950 - acc: 0.7944 - val_loss: 0.8588 - val_acc: 0.7694
Epoch 00068: val_acc did not improve from 0.79549
Epoch 69/250
391/390 [==============================] - 98s 250ms/step - loss: 0.5945 - acc: 0.7940 - val_loss: 0.6589 - val_acc: 0.8117
Epoch 00069: val_acc improved from 0.79549 to 0.81171, saving model to file_name-69-0.81.hdf5
Epoch 70/250
391/390 [==============================] - 97s 247ms/step - loss: 0.5904 - acc: 0.7978 - val_loss: 0.7303 - val_acc: 0.7915
Epoch 00070: val_acc did not improve from 0.81171
Epoch 71/250
391/390 [==============================] - 97s 247ms/step - loss: 0.5747 - acc: 0.8016 - val_loss: 0.6891 - val_acc: 0.8006
Epoch 00071: val_acc did not improve from 0.81171
Epoch 72/250
391/390 [==============================] - 100s 255ms/step - loss: 0.5840 - acc: 0.7996 - val_loss: 0.7175 - val_acc: 0.7967
Epoch 00072: val_acc did not improve from 0.81171
Epoch 73/250
391/390 [==============================] - 98s 251ms/step - loss: 0.5739 - acc: 0.8052 - val_loss: 0.8005 - val_acc: 0.7789
Epoch 00073: val_acc did not improve from 0.81171
Epoch 74/250
391/390 [==============================] - 98s 251ms/step - loss: 0.5750 - acc: 0.8047 - val_loss: 0.7571 - val_acc: 0.7888
Epoch 00074: val_acc did not improve from 0.81171
Epoch 75/250
391/390 [==============================] - 97s 249ms/step - loss: 0.5622 - acc: 0.8109 - val_loss: 0.6270 - val_acc: 0.8078
Epoch 00075: val_acc did not improve from 0.81171
Epoch 76/250
391/390 [==============================] - 99s 252ms/step - loss: 0.5874 - acc: 0.7997 - val_loss: 0.6966 - val_acc: 0.7939
Epoch 00076: val_acc did not improve from 0.81171
Epoch 77/250
391/390 [==============================] - 97s 248ms/step - loss: 0.5718 - acc: 0.7979 - val_loss: 0.7442 - val_acc: 0.7900
Epoch 00077: val_acc did not improve from 0.81171
Epoch 78/250
391/390 [==============================] - 97s 248ms/step - loss: 0.5617 - acc: 0.8095 - val_loss: 0.7803 - val_acc: 0.7797
Epoch 00078: val_acc did not improve from 0.81171
Epoch 79/250
391/390 [==============================] - 97s 248ms/step - loss: 0.5649 - acc: 0.8068 - val_loss: 0.7503 - val_acc: 0.7900
Epoch 00079: val_acc did not improve from 0.81171
Epoch 80/250
391/390 [==============================] - 99s 252ms/step - loss: 0.5520 - acc: 0.8070 - val_loss: 0.7219 - val_acc: 0.7991
Epoch 00080: val_acc did not improve from 0.81171
Epoch 81/250
391/390 [==============================] - 97s 248ms/step - loss: 0.5531 - acc: 0.8091 - val_loss: 0.7094 - val_acc: 0.8062
Epoch 00081: val_acc did not improve from 0.81171
Epoch 82/250
391/390 [==============================] - 97s 248ms/step - loss: 0.5578 - acc: 0.8077 - val_loss: 0.6265 - val_acc: 0.8093
Epoch 00082: val_acc did not improve from 0.81171
Epoch 83/250
391/390 [==============================] - 98s 249ms/step - loss: 0.5377 - acc: 0.8140 - val_loss: 0.6033 - val_acc: 0.8184
Epoch 00083: val_acc improved from 0.81171 to 0.81843, saving model to file_name-83-0.82.hdf5
Epoch 84/250
391/390 [==============================] - 99s 252ms/step - loss: 0.5626 - acc: 0.8043 - val_loss: 0.5878 - val_acc: 0.8188
Epoch 00084: val_acc improved from 0.81843 to 0.81883, saving model to file_name-84-0.82.hdf5
Epoch 85/250
391/390 [==============================] - 98s 251ms/step - loss: 0.5379 - acc: 0.8131 - val_loss: 0.6661 - val_acc: 0.8030
Epoch 00085: val_acc did not improve from 0.81883
Epoch 86/250
391/390 [==============================] - 98s 251ms/step - loss: 0.5419 - acc: 0.8131 - val_loss: 0.6986 - val_acc: 0.8074
Epoch 00086: val_acc did not improve from 0.81883
Epoch 87/250
391/390 [==============================] - 98s 252ms/step - loss: 0.5634 - acc: 0.8079 - val_loss: 0.6436 - val_acc: 0.8208
Epoch 00087: val_acc improved from 0.81883 to 0.82081, saving model to file_name-87-0.82.hdf5
Epoch 88/250
391/390 [==============================] - 100s 255ms/step - loss: 0.5330 - acc: 0.8156 - val_loss: 0.7208 - val_acc: 0.7951
Epoch 00088: val_acc did not improve from 0.82081
Epoch 89/250
391/390 [==============================] - 99s 253ms/step - loss: 0.5423 - acc: 0.8155 - val_loss: 0.8183 - val_acc: 0.7816
Epoch 00089: val_acc did not improve from 0.82081
Epoch 90/250
391/390 [==============================] - 99s 253ms/step - loss: 0.5276 - acc: 0.8169 - val_loss: 0.5766 - val_acc: 0.8232
Epoch 00090: val_acc improved from 0.82081 to 0.82318, saving model to file_name-90-0.82.hdf5
Epoch 91/250
391/390 [==============================] - 99s 252ms/step - loss: 0.5328 - acc: 0.8115 - val_loss: 0.6978 - val_acc: 0.7967
Epoch 00091: val_acc did not improve from 0.82318
Epoch 92/250
391/390 [==============================] - 101s 257ms/step - loss: 0.5417 - acc: 0.8131 - val_loss: 0.7714 - val_acc: 0.7915
Epoch 00092: val_acc did not improve from 0.82318
Epoch 93/250
391/390 [==============================] - 98s 251ms/step - loss: 0.5348 - acc: 0.8161 - val_loss: 0.6166 - val_acc: 0.8125
Epoch 00093: val_acc did not improve from 0.82318
Epoch 94/250
391/390 [==============================] - 98s 250ms/step - loss: 0.5225 - acc: 0.8196 - val_loss: 0.7359 - val_acc: 0.7991
Epoch 00094: val_acc did not improve from 0.82318
Epoch 95/250
391/390 [==============================] - 98s 250ms/step - loss: 0.5305 - acc: 0.8199 - val_loss: 0.7968 - val_acc: 0.7888
Epoch 00095: val_acc did not improve from 0.82318
Epoch 96/250
391/390 [==============================] - 100s 255ms/step - loss: 0.5369 - acc: 0.8116 - val_loss: 0.6596 - val_acc: 0.8129
Epoch 00096: val_acc did not improve from 0.82318
Epoch 97/250
391/390 [==============================] - 99s 252ms/step - loss: 0.5313 - acc: 0.8178 - val_loss: 0.5557 - val_acc: 0.8335
Epoch 00097: val_acc improved from 0.82318 to 0.83347, saving model to file_name-97-0.83.hdf5
Epoch 98/250
391/390 [==============================] - 97s 248ms/step - loss: 0.5185 - acc: 0.8233 - val_loss: 0.6876 - val_acc: 0.8157
Epoch 00098: val_acc did not improve from 0.83347
Epoch 99/250
391/390 [==============================] - 98s 250ms/step - loss: 0.5295 - acc: 0.8159 - val_loss: 0.6873 - val_acc: 0.8121
Epoch 00099: val_acc did not improve from 0.83347
Epoch 100/250
391/390 [==============================] - 100s 254ms/step - loss: 0.5140 - acc: 0.8243 - val_loss: 0.6099 - val_acc: 0.8267
Epoch 00100: val_acc did not improve from 0.83347
Epoch 101/250
391/390 [==============================] - 98s 251ms/step - loss: 0.5225 - acc: 0.8203 - val_loss: 0.5999 - val_acc: 0.8303
Epoch 00101: val_acc did not improve from 0.83347
Epoch 102/250
391/390 [==============================] - 98s 251ms/step - loss: 0.5178 - acc: 0.8206 - val_loss: 0.6381 - val_acc: 0.8204
Epoch 00102: val_acc did not improve from 0.83347
Epoch 103/250
391/390 [==============================] - 98s 251ms/step - loss: 0.5110 - acc: 0.8228 - val_loss: 0.5812 - val_acc: 0.8339
Epoch 00103: val_acc improved from 0.83347 to 0.83386, saving model to file_name-103-0.83.hdf5
Epoch 104/250
391/390 [==============================] - 99s 254ms/step - loss: 0.5006 - acc: 0.8270 - val_loss: 0.5543 - val_acc: 0.8327
Epoch 00104: val_acc did not improve from 0.83386
Epoch 105/250
391/390 [==============================] - 98s 250ms/step - loss: 0.4917 - acc: 0.8309 - val_loss: 0.7126 - val_acc: 0.8030
Epoch 00105: val_acc did not improve from 0.83386
Epoch 106/250
391/390 [==============================] - 98s 250ms/step - loss: 0.5042 - acc: 0.8276 - val_loss: 0.7495 - val_acc: 0.7959
Epoch 00106: val_acc did not improve from 0.83386
Epoch 107/250
391/390 [==============================] - 98s 251ms/step - loss: 0.5065 - acc: 0.8253 - val_loss: 0.5596 - val_acc: 0.8323
Epoch 00107: val_acc did not improve from 0.83386
Epoch 108/250
391/390 [==============================] - 100s 256ms/step - loss: 0.5148 - acc: 0.8258 - val_loss: 0.6186 - val_acc: 0.8327
Epoch 00108: val_acc did not improve from 0.83386
Epoch 109/250
391/390 [==============================] - 98s 251ms/step - loss: 0.4986 - acc: 0.8257 - val_loss: 0.5177 - val_acc: 0.8525
Epoch 00109: val_acc improved from 0.83386 to 0.85245, saving model to file_name-109-0.85.hdf5
Epoch 110/250
391/390 [==============================] - 98s 250ms/step - loss: 0.5143 - acc: 0.8241 - val_loss: 0.6568 - val_acc: 0.8137
Epoch 00110: val_acc did not improve from 0.85245
Epoch 111/250
391/390 [==============================] - 98s 250ms/step - loss: 0.5074 - acc: 0.8262 - val_loss: 0.6640 - val_acc: 0.8157
Epoch 00111: val_acc did not improve from 0.85245
Epoch 112/250
391/390 [==============================] - 100s 255ms/step - loss: 0.5000 - acc: 0.8280 - val_loss: 0.6418 - val_acc: 0.8200
Epoch 00112: val_acc did not improve from 0.85245
Epoch 113/250
391/390 [==============================] - 98s 251ms/step - loss: 0.5112 - acc: 0.8266 - val_loss: 0.7174 - val_acc: 0.8050
Epoch 00113: val_acc did not improve from 0.85245
Epoch 114/250
391/390 [==============================] - 98s 251ms/step - loss: 0.4971 - acc: 0.8255 - val_loss: 0.5613 - val_acc: 0.8335
Epoch 00114: val_acc did not improve from 0.85245
Epoch 115/250
391/390 [==============================] - 98s 251ms/step - loss: 0.4818 - acc: 0.8318 - val_loss: 0.6183 - val_acc: 0.8244
Epoch 00115: val_acc did not improve from 0.85245
Epoch 116/250
391/390 [==============================] - 100s 255ms/step - loss: 0.4731 - acc: 0.8369 - val_loss: 0.7800 - val_acc: 0.7892
Epoch 00116: val_acc did not improve from 0.85245
Epoch 117/250
391/390 [==============================] - 98s 251ms/step - loss: 0.4827 - acc: 0.8346 - val_loss: 0.6033 - val_acc: 0.8244
Epoch 00117: val_acc did not improve from 0.85245
Epoch 118/250
391/390 [==============================] - 98s 251ms/step - loss: 0.4865 - acc: 0.8326 - val_loss: 0.6072 - val_acc: 0.8216
Epoch 00118: val_acc did not improve from 0.85245
Epoch 119/250
391/390 [==============================] - 98s 251ms/step - loss: 0.4823 - acc: 0.8382 - val_loss: 0.6231 - val_acc: 0.8149
Epoch 00119: val_acc did not improve from 0.85245
Epoch 120/250
391/390 [==============================] - 99s 254ms/step - loss: 0.4952 - acc: 0.8307 - val_loss: 0.6701 - val_acc: 0.8105
Epoch 00120: val_acc did not improve from 0.85245
Epoch 121/250
391/390 [==============================] - 98s 250ms/step - loss: 0.4797 - acc: 0.8333 - val_loss: 0.5038 - val_acc: 0.8441
Epoch 00121: val_acc did not improve from 0.85245
Epoch 122/250
391/390 [==============================] - 97s 249ms/step - loss: 0.4766 - acc: 0.8354 - val_loss: 0.4941 - val_acc: 0.8497
Epoch 00122: val_acc did not improve from 0.85245
Epoch 123/250
391/390 [==============================] - 98s 249ms/step - loss: 0.4848 - acc: 0.8351 - val_loss: 0.5802 - val_acc: 0.8244
Epoch 00123: val_acc did not improve from 0.85245
Epoch 124/250
391/390 [==============================] - 99s 254ms/step - loss: 0.4827 - acc: 0.8344 - val_loss: 0.6445 - val_acc: 0.8125
Epoch 00124: val_acc did not improve from 0.85245
Epoch 125/250
391/390 [==============================] - 98s 249ms/step - loss: 0.4693 - acc: 0.8382 - val_loss: 0.6242 - val_acc: 0.8228
Epoch 00125: val_acc did not improve from 0.85245
Epoch 126/250
391/390 [==============================] - 98s 250ms/step - loss: 0.4823 - acc: 0.8346 - val_loss: 0.5710 - val_acc: 0.8323
Epoch 00126: val_acc did not improve from 0.85245
Epoch 127/250
391/390 [==============================] - 98s 250ms/step - loss: 0.4822 - acc: 0.8346 - val_loss: 0.5616 - val_acc: 0.8366
Epoch 00127: val_acc did not improve from 0.85245
Epoch 128/250
391/390 [==============================] - 99s 254ms/step - loss: 0.4841 - acc: 0.8361 - val_loss: 0.6606 - val_acc: 0.8192
Epoch 00128: val_acc did not improve from 0.85245
Epoch 129/250
391/390 [==============================] - 98s 250ms/step - loss: 0.4686 - acc: 0.8412 - val_loss: 0.5931 - val_acc: 0.8331
Epoch 00129: val_acc did not improve from 0.85245
Epoch 130/250
391/390 [==============================] - 98s 251ms/step - loss: 0.4699 - acc: 0.8385 - val_loss: 0.6209 - val_acc: 0.8263
Epoch 00130: val_acc did not improve from 0.85245
Epoch 131/250
391/390 [==============================] - 98s 250ms/step - loss: 0.4831 - acc: 0.8338 - val_loss: 0.5313 - val_acc: 0.8461
Epoch 00131: val_acc did not improve from 0.85245
Epoch 132/250
391/390 [==============================] - 99s 254ms/step - loss: 0.4767 - acc: 0.8331 - val_loss: 0.5920 - val_acc: 0.8279
Epoch 00132: val_acc did not improve from 0.85245
Epoch 133/250
391/390 [==============================] - 98s 251ms/step - loss: 0.4752 - acc: 0.8324 - val_loss: 0.5952 - val_acc: 0.8232
Epoch 00133: val_acc did not improve from 0.85245
Epoch 134/250
391/390 [==============================] - 98s 251ms/step - loss: 0.4459 - acc: 0.8436 - val_loss: 0.5479 - val_acc: 0.8374
Epoch 00134: val_acc did not improve from 0.85245
Epoch 135/250
391/390 [==============================] - 98s 250ms/step - loss: 0.4780 - acc: 0.8357 - val_loss: 0.5626 - val_acc: 0.8311
Epoch 00135: val_acc did not improve from 0.85245
Epoch 136/250
391/390 [==============================] - 100s 255ms/step - loss: 0.4729 - acc: 0.8374 - val_loss: 0.7785 - val_acc: 0.8026
Epoch 00136: val_acc did not improve from 0.85245
Epoch 137/250
391/390 [==============================] - 98s 252ms/step - loss: 0.4636 - acc: 0.8389 - val_loss: 0.5693 - val_acc: 0.8406
Epoch 00137: val_acc did not improve from 0.85245
Epoch 138/250
391/390 [==============================] - 98s 250ms/step - loss: 0.4533 - acc: 0.8405 - val_loss: 0.5761 - val_acc: 0.8461
Epoch 00138: val_acc did not improve from 0.85245
Epoch 139/250
391/390 [==============================] - 98s 251ms/step - loss: 0.4680 - acc: 0.8401 - val_loss: 0.5914 - val_acc: 0.8362
Epoch 00139: val_acc did not improve from 0.85245
Epoch 140/250
391/390 [==============================] - 99s 252ms/step - loss: 0.4490 - acc: 0.8473 - val_loss: 0.6120 - val_acc: 0.8339
Epoch 00140: val_acc did not improve from 0.85245
Epoch 141/250
391/390 [==============================] - 97s 248ms/step - loss: 0.4644 - acc: 0.8407 - val_loss: 0.5318 - val_acc: 0.8525
Epoch 00141: val_acc did not improve from 0.85245
Epoch 142/250
391/390 [==============================] - 97s 248ms/step - loss: 0.4465 - acc: 0.8424 - val_loss: 0.5731 - val_acc: 0.8398
Epoch 00142: val_acc did not improve from 0.85245
Epoch 143/250
391/390 [==============================] - 97s 248ms/step - loss: 0.4604 - acc: 0.8385 - val_loss: 0.5579 - val_acc: 0.8406
Epoch 00143: val_acc did not improve from 0.85245
Epoch 144/250
391/390 [==============================] - 98s 251ms/step - loss: 0.4584 - acc: 0.8469 - val_loss: 0.5671 - val_acc: 0.8410
Epoch 00144: val_acc did not improve from 0.85245
Epoch 145/250
391/390 [==============================] - 97s 248ms/step - loss: 0.4417 - acc: 0.8473 - val_loss: 0.4969 - val_acc: 0.8493
Epoch 00145: val_acc did not improve from 0.85245
Epoch 146/250
391/390 [==============================] - 96s 246ms/step - loss: 0.4422 - acc: 0.8480 - val_loss: 0.5097 - val_acc: 0.8525
Epoch 00146: val_acc did not improve from 0.85245
Epoch 147/250
391/390 [==============================] - 97s 247ms/step - loss: 0.4535 - acc: 0.8464 - val_loss: 0.5510 - val_acc: 0.8441
Epoch 00147: val_acc did not improve from 0.85245
Epoch 148/250
391/390 [==============================] - 99s 252ms/step - loss: 0.4518 - acc: 0.8438 - val_loss: 0.5410 - val_acc: 0.8426
Epoch 00148: val_acc did not improve from 0.85245
Epoch 149/250
391/390 [==============================] - 97s 247ms/step - loss: 0.4391 - acc: 0.8513 - val_loss: 0.5817 - val_acc: 0.8390
Epoch 00149: val_acc did not improve from 0.85245
Epoch 150/250
391/390 [==============================] - 96s 246ms/step - loss: 0.4561 - acc: 0.8444 - val_loss: 0.5412 - val_acc: 0.8418
Epoch 00150: val_acc did not improve from 0.85245
Epoch 151/250
391/390 [==============================] - 97s 247ms/step - loss: 0.4578 - acc: 0.8430 - val_loss: 0.5210 - val_acc: 0.8532
Epoch 00151: val_acc improved from 0.85245 to 0.85324, saving model to file_name-151-0.85.hdf5
Epoch 152/250
391/390 [==============================] - 98s 249ms/step - loss: 0.4528 - acc: 0.8408 - val_loss: 0.4973 - val_acc: 0.8477
Epoch 00152: val_acc did not improve from 0.85324
Epoch 153/250
391/390 [==============================] - 96s 246ms/step - loss: 0.4356 - acc: 0.8501 - val_loss: 0.5771 - val_acc: 0.8327
Epoch 00153: val_acc did not improve from 0.85324
Epoch 154/250
391/390 [==============================] - 96s 247ms/step - loss: 0.4342 - acc: 0.8504 - val_loss: 0.5558 - val_acc: 0.8374
Epoch 00154: val_acc did not improve from 0.85324
Epoch 155/250
391/390 [==============================] - 96s 246ms/step - loss: 0.4596 - acc: 0.8406 - val_loss: 0.5518 - val_acc: 0.8358
Epoch 00155: val_acc did not improve from 0.85324
Epoch 156/250
391/390 [==============================] - 98s 251ms/step - loss: 0.4467 - acc: 0.8411 - val_loss: 0.6452 - val_acc: 0.8252
Epoch 00156: val_acc did not improve from 0.85324
Epoch 157/250
391/390 [==============================] - 96s 246ms/step - loss: 0.4406 - acc: 0.8501 - val_loss: 0.6075 - val_acc: 0.8335
Epoch 00157: val_acc did not improve from 0.85324
Epoch 158/250
391/390 [==============================] - 96s 247ms/step - loss: 0.4412 - acc: 0.8429 - val_loss: 0.5539 - val_acc: 0.8441
Epoch 00158: val_acc did not improve from 0.85324
Epoch 159/250
391/390 [==============================] - 96s 246ms/step - loss: 0.4451 - acc: 0.8438 - val_loss: 0.5460 - val_acc: 0.8473
Epoch 00159: val_acc did not improve from 0.85324
Epoch 160/250
391/390 [==============================] - 98s 251ms/step - loss: 0.4457 - acc: 0.8437 - val_loss: 0.5125 - val_acc: 0.8521
Epoch 00160: val_acc did not improve from 0.85324
Epoch 161/250
391/390 [==============================] - 97s 247ms/step - loss: 0.4435 - acc: 0.8459 - val_loss: 0.6015 - val_acc: 0.8386
Epoch 00161: val_acc did not improve from 0.85324
Epoch 162/250
391/390 [==============================] - 96s 246ms/step - loss: 0.4371 - acc: 0.8497 - val_loss: 0.5421 - val_acc: 0.8453
Epoch 00162: val_acc did not improve from 0.85324
Epoch 163/250
391/390 [==============================] - 97s 249ms/step - loss: 0.4315 - acc: 0.8518 - val_loss: 0.5356 - val_acc: 0.8513
Epoch 00163: val_acc did not improve from 0.85324
Epoch 164/250
391/390 [==============================] - 98s 251ms/step - loss: 0.4375 - acc: 0.8485 - val_loss: 0.4955 - val_acc: 0.8647
Epoch 00164: val_acc improved from 0.85324 to 0.86472, saving model to file_name-164-0.86.hdf5
Epoch 165/250
391/390 [==============================] - 96s 246ms/step - loss: 0.4328 - acc: 0.8541 - val_loss: 0.4862 - val_acc: 0.8627
Epoch 00165: val_acc did not improve from 0.86472
Epoch 166/250
391/390 [==============================] - 96s 246ms/step - loss: 0.4227 - acc: 0.8499 - val_loss: 0.4655 - val_acc: 0.8691
Epoch 00166: val_acc improved from 0.86472 to 0.86907, saving model to file_name-166-0.87.hdf5
Epoch 167/250
391/390 [==============================] - 97s 248ms/step - loss: 0.4343 - acc: 0.8525 - val_loss: 0.5587 - val_acc: 0.8481
Epoch 00167: val_acc did not improve from 0.86907
Epoch 168/250
391/390 [==============================] - 99s 254ms/step - loss: 0.4489 - acc: 0.8440 - val_loss: 0.5072 - val_acc: 0.8552
Epoch 00168: val_acc did not improve from 0.86907
Epoch 169/250
391/390 [==============================] - 98s 251ms/step - loss: 0.4331 - acc: 0.8516 - val_loss: 0.4975 - val_acc: 0.8528
Epoch 00169: val_acc did not improve from 0.86907
Epoch 170/250
391/390 [==============================] - 98s 251ms/step - loss: 0.4273 - acc: 0.8545 - val_loss: 0.5698 - val_acc: 0.8374
Epoch 00170: val_acc did not improve from 0.86907
Epoch 171/250
391/390 [==============================] - 98s 251ms/step - loss: 0.4292 - acc: 0.8476 - val_loss: 0.5187 - val_acc: 0.8465
Epoch 00171: val_acc did not improve from 0.86907
Epoch 172/250
391/390 [==============================] - 100s 255ms/step - loss: 0.4268 - acc: 0.8557 - val_loss: 0.5302 - val_acc: 0.8509
Epoch 00172: val_acc did not improve from 0.86907
Epoch 173/250
391/390 [==============================] - 98s 250ms/step - loss: 0.4367 - acc: 0.8490 - val_loss: 0.6016 - val_acc: 0.8339
Epoch 00173: val_acc did not improve from 0.86907
Epoch 174/250
391/390 [==============================] - 98s 251ms/step - loss: 0.4179 - acc: 0.8550 - val_loss: 0.5626 - val_acc: 0.8402
Epoch 00174: val_acc did not improve from 0.86907
Epoch 175/250
391/390 [==============================] - 98s 251ms/step - loss: 0.4257 - acc: 0.8502 - val_loss: 0.5278 - val_acc: 0.8509
Epoch 00175: val_acc did not improve from 0.86907
Epoch 176/250
391/390 [==============================] - 99s 254ms/step - loss: 0.4267 - acc: 0.8498 - val_loss: 0.5141 - val_acc: 0.8513
Epoch 00176: val_acc did not improve from 0.86907
Epoch 177/250
391/390 [==============================] - 97s 248ms/step - loss: 0.4375 - acc: 0.8509 - val_loss: 0.5595 - val_acc: 0.8394
Epoch 00177: val_acc did not improve from 0.86907
Epoch 178/250
391/390 [==============================] - 97s 248ms/step - loss: 0.4170 - acc: 0.8569 - val_loss: 0.5072 - val_acc: 0.8441
Epoch 00178: val_acc did not improve from 0.86907
Epoch 179/250
391/390 [==============================] - 98s 250ms/step - loss: 0.4385 - acc: 0.8497 - val_loss: 0.4709 - val_acc: 0.8623
Epoch 00179: val_acc did not improve from 0.86907
Epoch 180/250
391/390 [==============================] - 100s 255ms/step - loss: 0.4171 - acc: 0.8585 - val_loss: 0.5415 - val_acc: 0.8477
Epoch 00180: val_acc did not improve from 0.86907
Epoch 181/250
391/390 [==============================] - 98s 251ms/step - loss: 0.4119 - acc: 0.8576 - val_loss: 0.5110 - val_acc: 0.8556
Epoch 00181: val_acc did not improve from 0.86907
Epoch 182/250
391/390 [==============================] - 98s 250ms/step - loss: 0.4158 - acc: 0.8564 - val_loss: 0.5309 - val_acc: 0.8485
Epoch 00182: val_acc did not improve from 0.86907
Epoch 183/250
391/390 [==============================] - 98s 251ms/step - loss: 0.4172 - acc: 0.8583 - val_loss: 0.5415 - val_acc: 0.8489
Epoch 00183: val_acc did not improve from 0.86907
Epoch 184/250
391/390 [==============================] - 100s 255ms/step - loss: 0.4266 - acc: 0.8519 - val_loss: 0.4579 - val_acc: 0.8612
Epoch 00184: val_acc did not improve from 0.86907
Epoch 185/250
391/390 [==============================] - 97s 249ms/step - loss: 0.4117 - acc: 0.8595 - val_loss: 0.5126 - val_acc: 0.8477
Epoch 00185: val_acc did not improve from 0.86907
Epoch 186/250
391/390 [==============================] - 97s 248ms/step - loss: 0.4178 - acc: 0.8563 - val_loss: 0.5252 - val_acc: 0.8441
Epoch 00186: val_acc did not improve from 0.86907
Epoch 187/250
391/390 [==============================] - 97s 249ms/step - loss: 0.4152 - acc: 0.8541 - val_loss: 0.5591 - val_acc: 0.8378
Epoch 00187: val_acc did not improve from 0.86907
Epoch 188/250
391/390 [==============================] - 99s 254ms/step - loss: 0.4145 - acc: 0.8583 - val_loss: 0.4944 - val_acc: 0.8572
Epoch 00188: val_acc did not improve from 0.86907
Epoch 189/250
391/390 [==============================] - 98s 250ms/step - loss: 0.4179 - acc: 0.8567 - val_loss: 0.5098 - val_acc: 0.8552
Epoch 00189: val_acc did not improve from 0.86907
Epoch 190/250
391/390 [==============================] - 98s 250ms/step - loss: 0.4126 - acc: 0.8573 - val_loss: 0.5766 - val_acc: 0.8394
Epoch 00190: val_acc did not improve from 0.86907
Epoch 191/250
391/390 [==============================] - 98s 250ms/step - loss: 0.4015 - acc: 0.8625 - val_loss: 0.4700 - val_acc: 0.8647
Epoch 00191: val_acc did not improve from 0.86907
Epoch 192/250
391/390 [==============================] - 99s 253ms/step - loss: 0.4116 - acc: 0.8588 - val_loss: 0.4755 - val_acc: 0.8592
Epoch 00192: val_acc did not improve from 0.86907
Epoch 193/250
391/390 [==============================] - 97s 249ms/step - loss: 0.4114 - acc: 0.8579 - val_loss: 0.5838 - val_acc: 0.8453
Epoch 00193: val_acc did not improve from 0.86907
Epoch 194/250
391/390 [==============================] - 98s 250ms/step - loss: 0.4129 - acc: 0.8578 - val_loss: 0.4753 - val_acc: 0.8604
Epoch 00194: val_acc did not improve from 0.86907
Epoch 195/250
391/390 [==============================] - 98s 251ms/step - loss: 0.4129 - acc: 0.8620 - val_loss: 0.4957 - val_acc: 0.8564
Epoch 00195: val_acc did not improve from 0.86907
Epoch 196/250
391/390 [==============================] - 100s 255ms/step - loss: 0.4064 - acc: 0.8590 - val_loss: 0.4561 - val_acc: 0.8635
Epoch 00196: val_acc did not improve from 0.86907
Epoch 197/250
391/390 [==============================] - 98s 251ms/step - loss: 0.4192 - acc: 0.8555 - val_loss: 0.4239 - val_acc: 0.8675
Epoch 00197: val_acc did not improve from 0.86907
Epoch 198/250
391/390 [==============================] - 98s 250ms/step - loss: 0.4071 - acc: 0.8628 - val_loss: 0.4339 - val_acc: 0.8604
Epoch 00198: val_acc did not improve from 0.86907
Epoch 199/250
391/390 [==============================] - 98s 251ms/step - loss: 0.4123 - acc: 0.8573 - val_loss: 0.4619 - val_acc: 0.8639
Epoch 00199: val_acc did not improve from 0.86907
Epoch 200/250
391/390 [==============================] - 100s 256ms/step - loss: 0.3955 - acc: 0.8625 - val_loss: 0.4694 - val_acc: 0.8663
Epoch 00200: val_acc did not improve from 0.86907
Epoch 201/250
391/390 [==============================] - 98s 250ms/step - loss: 0.3991 - acc: 0.8609 - val_loss: 0.5758 - val_acc: 0.8350
Epoch 00201: val_acc did not improve from 0.86907
Epoch 202/250
391/390 [==============================] - 98s 251ms/step - loss: 0.4104 - acc: 0.8541 - val_loss: 0.4686 - val_acc: 0.8612
Epoch 00202: val_acc did not improve from 0.86907
Epoch 203/250
391/390 [==============================] - 98s 250ms/step - loss: 0.4185 - acc: 0.8537 - val_loss: 0.4728 - val_acc: 0.8635
Epoch 00203: val_acc did not improve from 0.86907
Epoch 204/250
391/390 [==============================] - 99s 254ms/step - loss: 0.4014 - acc: 0.8635 - val_loss: 0.5514 - val_acc: 0.8457
Epoch 00204: val_acc did not improve from 0.86907
Epoch 205/250
391/390 [==============================] - 98s 250ms/step - loss: 0.4046 - acc: 0.8594 - val_loss: 0.4794 - val_acc: 0.8659
Epoch 00205: val_acc did not improve from 0.86907
Epoch 206/250
391/390 [==============================] - 98s 250ms/step - loss: 0.4207 - acc: 0.8521 - val_loss: 0.5386 - val_acc: 0.8505
Epoch 00206: val_acc did not improve from 0.86907
Epoch 207/250
391/390 [==============================] - 98s 251ms/step - loss: 0.4071 - acc: 0.8578 - val_loss: 0.4935 - val_acc: 0.8667
Epoch 00207: val_acc did not improve from 0.86907
Epoch 208/250
391/390 [==============================] - 99s 254ms/step - loss: 0.3978 - acc: 0.8656 - val_loss: 0.5529 - val_acc: 0.8453
Epoch 00208: val_acc did not improve from 0.86907
Epoch 209/250
391/390 [==============================] - 98s 250ms/step - loss: 0.4141 - acc: 0.8581 - val_loss: 0.5020 - val_acc: 0.8556
Epoch 00209: val_acc did not improve from 0.86907
Epoch 210/250
391/390 [==============================] - 98s 250ms/step - loss: 0.4050 - acc: 0.8592 - val_loss: 0.5755 - val_acc: 0.8465
Epoch 00210: val_acc did not improve from 0.86907
Epoch 211/250
391/390 [==============================] - 98s 250ms/step - loss: 0.4016 - acc: 0.8597 - val_loss: 0.4650 - val_acc: 0.8675
Epoch 00211: val_acc did not improve from 0.86907
Epoch 212/250
391/390 [==============================] - 99s 254ms/step - loss: 0.3942 - acc: 0.8673 - val_loss: 0.4286 - val_acc: 0.8742
Epoch 00212: val_acc improved from 0.86907 to 0.87421, saving model to file_name-212-0.87.hdf5
Epoch 213/250
391/390 [==============================] - 97s 248ms/step - loss: 0.3994 - acc: 0.8625 - val_loss: 0.4327 - val_acc: 0.8766
Epoch 00213: val_acc improved from 0.87421 to 0.87658, saving model to file_name-213-0.88.hdf5
Epoch 214/250
391/390 [==============================] - 96s 246ms/step - loss: 0.3920 - acc: 0.8638 - val_loss: 0.4891 - val_acc: 0.8592
Epoch 00214: val_acc did not improve from 0.87658
Epoch 215/250
391/390 [==============================] - 98s 250ms/step - loss: 0.3924 - acc: 0.8652 - val_loss: 0.4350 - val_acc: 0.8746
Epoch 00215: val_acc did not improve from 0.87658
Epoch 216/250
391/390 [==============================] - 100s 255ms/step - loss: 0.4028 - acc: 0.8611 - val_loss: 0.5435 - val_acc: 0.8540
Epoch 00216: val_acc did not improve from 0.87658
Epoch 217/250
391/390 [==============================] - 98s 250ms/step - loss: 0.4018 - acc: 0.8641 - val_loss: 0.4915 - val_acc: 0.8667
Epoch 00217: val_acc did not improve from 0.87658
Epoch 218/250
391/390 [==============================] - 98s 251ms/step - loss: 0.3934 - acc: 0.8660 - val_loss: 0.4536 - val_acc: 0.8738
Epoch 00218: val_acc did not improve from 0.87658
Epoch 219/250
391/390 [==============================] - 98s 252ms/step - loss: 0.4007 - acc: 0.8609 - val_loss: 0.4894 - val_acc: 0.8655
Epoch 00219: val_acc did not improve from 0.87658
Epoch 220/250
391/390 [==============================] - 100s 255ms/step - loss: 0.3986 - acc: 0.8620 - val_loss: 0.4463 - val_acc: 0.8742
Epoch 00220: val_acc did not improve from 0.87658
Epoch 221/250
391/390 [==============================] - 98s 251ms/step - loss: 0.3938 - acc: 0.8652 - val_loss: 0.5056 - val_acc: 0.8525
Epoch 00221: val_acc did not improve from 0.87658
Epoch 222/250
391/390 [==============================] - 98s 251ms/step - loss: 0.3945 - acc: 0.8647 - val_loss: 0.4741 - val_acc: 0.8663
Epoch 00222: val_acc did not improve from 0.87658
Epoch 223/250
391/390 [==============================] - 98s 251ms/step - loss: 0.3897 - acc: 0.8658 - val_loss: 0.4938 - val_acc: 0.8600
Epoch 00223: val_acc did not improve from 0.87658
Epoch 224/250
391/390 [==============================] - 100s 256ms/step - loss: 0.3997 - acc: 0.8659 - val_loss: 0.4875 - val_acc: 0.8643
Epoch 00224: val_acc did not improve from 0.87658
Epoch 225/250
391/390 [==============================] - 98s 251ms/step - loss: 0.3873 - acc: 0.8637 - val_loss: 0.4328 - val_acc: 0.8817
Epoch 00225: val_acc improved from 0.87658 to 0.88172, saving model to file_name-225-0.88.hdf5
Epoch 226/250
391/390 [==============================] - 98s 250ms/step - loss: 0.3976 - acc: 0.8625 - val_loss: 0.4683 - val_acc: 0.8710
Epoch 00226: val_acc did not improve from 0.88172
Epoch 227/250
391/390 [==============================] - 98s 250ms/step - loss: 0.3986 - acc: 0.8630 - val_loss: 0.5072 - val_acc: 0.8639
Epoch 00227: val_acc did not improve from 0.88172
Epoch 228/250
391/390 [==============================] - 100s 255ms/step - loss: 0.3930 - acc: 0.8664 - val_loss: 0.4359 - val_acc: 0.8710
Epoch 00228: val_acc did not improve from 0.88172
Epoch 229/250
391/390 [==============================] - 98s 251ms/step - loss: 0.3955 - acc: 0.8631 - val_loss: 0.4223 - val_acc: 0.8679
Epoch 00229: val_acc did not improve from 0.88172
Epoch 230/250
391/390 [==============================] - 98s 250ms/step - loss: 0.3898 - acc: 0.8650 - val_loss: 0.6118 - val_acc: 0.8331
Epoch 00230: val_acc did not improve from 0.88172
Epoch 231/250
391/390 [==============================] - 98s 251ms/step - loss: 0.3761 - acc: 0.8702 - val_loss: 0.4959 - val_acc: 0.8580
Epoch 00231: val_acc did not improve from 0.88172
Epoch 232/250
391/390 [==============================] - 100s 255ms/step - loss: 0.3903 - acc: 0.8648 - val_loss: 0.5133 - val_acc: 0.8521
Epoch 00232: val_acc did not improve from 0.88172
Epoch 233/250
391/390 [==============================] - 98s 250ms/step - loss: 0.3821 - acc: 0.8689 - val_loss: 0.4472 - val_acc: 0.8671
Epoch 00233: val_acc did not improve from 0.88172
Epoch 234/250
391/390 [==============================] - 99s 253ms/step - loss: 0.3762 - acc: 0.8716 - val_loss: 0.4535 - val_acc: 0.8639
Epoch 00234: val_acc did not improve from 0.88172
Epoch 235/250
391/390 [==============================] - 99s 253ms/step - loss: 0.3861 - acc: 0.8652 - val_loss: 0.4436 - val_acc: 0.8635
Epoch 00235: val_acc did not improve from 0.88172
Epoch 236/250
391/390 [==============================] - 100s 256ms/step - loss: 0.3873 - acc: 0.8661 - val_loss: 0.5223 - val_acc: 0.8521
Epoch 00236: val_acc did not improve from 0.88172
Epoch 237/250
391/390 [==============================] - 99s 253ms/step - loss: 0.3864 - acc: 0.8665 - val_loss: 0.5325 - val_acc: 0.8477
Epoch 00237: val_acc did not improve from 0.88172
Epoch 238/250
391/390 [==============================] - 98s 252ms/step - loss: 0.3738 - acc: 0.8713 - val_loss: 0.4941 - val_acc: 0.8596
Epoch 00238: val_acc did not improve from 0.88172
Epoch 239/250
391/390 [==============================] - 98s 250ms/step - loss: 0.3930 - acc: 0.8648 - val_loss: 0.4523 - val_acc: 0.8770
Epoch 00239: val_acc did not improve from 0.88172
Epoch 240/250
391/390 [==============================] - 99s 252ms/step - loss: 0.3843 - acc: 0.8624 - val_loss: 0.4837 - val_acc: 0.8619
Epoch 00240: val_acc did not improve from 0.88172
Epoch 241/250
391/390 [==============================] - 96s 247ms/step - loss: 0.3727 - acc: 0.8711 - val_loss: 0.4908 - val_acc: 0.8584
Epoch 00241: val_acc did not improve from 0.88172
Epoch 242/250
391/390 [==============================] - 97s 249ms/step - loss: 0.3610 - acc: 0.8751 - val_loss: 0.4383 - val_acc: 0.8782
Epoch 00242: val_acc did not improve from 0.88172
Epoch 243/250
391/390 [==============================] - 98s 251ms/step - loss: 0.3798 - acc: 0.8693 - val_loss: 0.4493 - val_acc: 0.8710
Epoch 00243: val_acc did not improve from 0.88172
Epoch 244/250
391/390 [==============================] - 100s 255ms/step - loss: 0.3768 - acc: 0.8712 - val_loss: 0.4976 - val_acc: 0.8525
Epoch 00244: val_acc did not improve from 0.88172
Epoch 245/250
391/390 [==============================] - 98s 251ms/step - loss: 0.3843 - acc: 0.8668 - val_loss: 0.4786 - val_acc: 0.8651
Epoch 00245: val_acc did not improve from 0.88172
Epoch 246/250
391/390 [==============================] - 98s 250ms/step - loss: 0.3675 - acc: 0.8733 - val_loss: 0.4604 - val_acc: 0.8631
Epoch 00246: val_acc did not improve from 0.88172
Epoch 247/250
391/390 [==============================] - 98s 250ms/step - loss: 0.3843 - acc: 0.8664 - val_loss: 0.4584 - val_acc: 0.8667
Epoch 00247: val_acc did not improve from 0.88172
Epoch 248/250
391/390 [==============================] - 99s 254ms/step - loss: 0.3921 - acc: 0.8676 - val_loss: 0.5427 - val_acc: 0.8532
Epoch 00248: val_acc did not improve from 0.88172
Epoch 249/250
391/390 [==============================] - 98s 251ms/step - loss: 0.3601 - acc: 0.8728 - val_loss: 0.4938 - val_acc: 0.8616
Epoch 00249: val_acc did not improve from 0.88172
Epoch 250/250
391/390 [==============================] - 98s 251ms/step - loss: 0.3794 - acc: 0.8720 - val_loss: 0.4712 - val_acc: 0.8691
Epoch 00250: val_acc did not improve from 0.88172
Out[28]:
<keras.callbacks.History at 0x7fb2b59e5f60>
In [29]:
# Test the model
score = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
10000/10000 [==============================] - 21s 2ms/step
Test loss: 0.4532780726671219
Test accuracy: 0.8701
In [30]:
# Save the trained weights in to .h5 format
model.save_weights("DNST_model.h5")
print("Saved model to disk")
Saved model to disk
In [0]:
from google.colab import files
files.download('DNST_model.h5')
In [0]:
Content source: rhnvrm/mini-projects
Similar notebooks: