no featurewise center weights.265-0.2277.hdf5 28/28 [==============================] - 23s - loss: 0.0857 - acc: 0.9866 - val_loss: 0.2277 - val_acc: 0.9416 valid loss: 0.229860134341 class valid loss: 0.229860134341 class ALB 0.167267 BET 0.692852 DOL 0.750891 LAG 0.208375 OTHER 0.096605 SHARK 0.266442 YFT 0.215996 Name: logloss, dtype: float64 train loss: 0.0194088987134 class ALB 0.026230 BET 0.003189 DOL 0.000596 LAG 0.000126 OTHER 0.001473 SHARK 0.000130 YFT 0.021688

In [2]:
import os, random, glob, pickle, collections, math, json
import numpy as np
import pandas as pd
from __future__ import division
from __future__ import print_function
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.metrics import log_loss
from sklearn.preprocessing import LabelEncoder

import matplotlib.pyplot as plt
%matplotlib inline 

from keras.models import Sequential, Model, load_model, model_from_json
from keras import layers
from keras.layers import GlobalAveragePooling2D, Flatten, Dropout, Dense, LeakyReLU, Conv2D, Input, BatchNormalization, Activation
from keras.optimizers import Adam, RMSprop
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
from keras.preprocessing import image
from keras import backend as K
K.set_image_dim_ordering('tf')


Using TensorFlow backend.

In [3]:
TRAIN_DIR = '../data/train/'
TEST_DIR = '../RFCN/JPEGImages/'
TRAIN_CROP_DIR = '../data/train_crop/'
TEST_CROP_DIR = '../data/test_stg1_crop/'
RFCN_MODEL = 'resnet101_rfcn_ohem_iter_30000'
CROP_MODEL = 'resnet19ss_Hybrid_woNoF'
if not os.path.exists('./' + CROP_MODEL):
    os.mkdir('./' + CROP_MODEL)
CHECKPOINT_DIR = './' + CROP_MODEL + '/checkpoint/'
if not os.path.exists(CHECKPOINT_DIR):
    os.mkdir(CHECKPOINT_DIR)
LOG_DIR = './' + CROP_MODEL + '/log/'
if not os.path.exists(LOG_DIR):
    os.mkdir(LOG_DIR)
OUTPUT_DIR = './' + CROP_MODEL + '/output/'
if not os.path.exists(OUTPUT_DIR):
    os.mkdir(OUTPUT_DIR)
FISH_CLASSES = ['NoF', 'ALB', 'BET', 'DOL', 'LAG', 'OTHER', 'SHARK', 'YFT']
CROP_CLASSES=FISH_CLASSES[:]
CROP_CLASSES.remove('NoF')
CONF_THRESH = 0.8
ROWS = 224
COLS = 224
BATCHSIZE = 128
LEARNINGRATE = 1e-4
def featurewise_center(x):
    mean = np.mean(x, axis=0, keepdims=True)
    mean = np.mean(mean, axis=(1,2), keepdims=True)
    x_centered = x - mean
    return x_centered

def mean(x):
    mean = np.mean(x, axis=0)
    mean = np.mean(mean, axis=(0,1))
    return mean

def load_img(path, bbox, target_size=None):
    img = Image.open(path)
#     img = img.convert('RGB')
    cropped = img.crop((bbox[0],bbox[1],bbox[2],bbox[3]))
    width_cropped, height_cropped = cropped.size
    if height_cropped > width_cropped: cropped = cropped.transpose(method=2)  
    if target_size:
        cropped = cropped.resize((target_size[1], target_size[0]), Image.BILINEAR)
    return cropped

def preprocess_input(x, mean):
    #resnet50 image preprocessing
#     'RGB'->'BGR'
#     x = x[:, :, ::-1]
#     x /= 255.
    x[:, :, 0] -= mean[0]
    x[:, :, 1] -= mean[1]
    x[:, :, 2] -= mean[2]
    return x

def get_best_model(checkpoint_dir = CHECKPOINT_DIR):
    files = glob.glob(checkpoint_dir+'*')
    val_losses = [float(f.split('-')[-1][:-5]) for f in files]
    index = val_losses.index(min(val_losses))
    print('Loading model from checkpoint file ' + files[index])
    model = load_model(files[index])
    model_name = files[index].split('/')[-1]
    print('Loading model Done!')
    return (model, model_name)

In [4]:
# GTbbox_df = ['image_file','crop_index','crop_class','xmin',''ymin','xmax','ymax']

file_name = 'GTbbox_df.pickle'
if os.path.exists(OUTPUT_DIR+file_name):
    print ('Loading from file '+file_name)
    GTbbox_df = pd.read_pickle(OUTPUT_DIR+file_name)
else:
    print ('Generating file '+file_name)       
    GTbbox_df = pd.DataFrame(columns=['image_file','crop_index','crop_class','xmin','ymin','xmax','ymax'])  
    
    for c in CROP_CLASSES:
        print(c)
        j = json.load(open('../data/BBannotations/{}.json'.format(c), 'r'))
        for l in j: 
            filename = l["filename"]
            head, image_file = os.path.split(filename)
            basename, file_extension = os.path.splitext(image_file) 
            image = Image.open(TEST_DIR+image_file)
            width_image, height_image = image.size
            for i in range(len(l["annotations"])):
                a = l["annotations"][i]
                xmin = (a["x"])
                ymin = (a["y"])
                width = (a["width"])
                height = (a["height"])
                xmax = xmin + width
                ymax = ymin + height
                assert max(xmin,0)<min(xmax,width_image)
                assert max(ymin,0)<min(ymax,height_image)
                GTbbox_df.loc[len(GTbbox_df)]=[image_file,i,a["class"],max(xmin,0),max(ymin,0),min(xmax,width_image),min(ymax,height_image)]
                if a["class"] != c: print(GTbbox_df.tail(1))  
    
    test_size = GTbbox_df.shape[0]-int(math.ceil(GTbbox_df.shape[0]*0.8/128)*128)
    train_ind, valid_ind = train_test_split(range(GTbbox_df.shape[0]), test_size=test_size, random_state=1986, stratify=GTbbox_df['crop_class'])
    GTbbox_df['split'] = ['train' if i in train_ind else 'valid' for i in range(GTbbox_df.shape[0])]
    GTbbox_df.to_pickle(OUTPUT_DIR+file_name)


Loading from file GTbbox_df.pickle

In [5]:
#Load data

def data_from_df(df):
    X = np.ndarray((df.shape[0], ROWS, COLS, 3), dtype=np.uint8)
    y = np.zeros((df.shape[0], len(CROP_CLASSES)), dtype=K.floatx())
    i = 0
    for index,row in df.iterrows():
        image_file = row['image_file']
        fish = row['crop_class']
        bbox = [row['xmin'],row['ymin'],row['xmax'],row['ymax']]
        cropped = load_img(TEST_DIR+image_file,bbox,target_size=(ROWS,COLS))
        X[i] = np.asarray(cropped)
        y[i,CROP_CLASSES.index(fish)] = 1
        i += 1
    return (X, y)

def data_load(name):
    file_name = 'data_'+name+'_{}_{}.pickle'.format(ROWS, COLS)
    if os.path.exists(OUTPUT_DIR+file_name):
        print ('Loading from file '+file_name)
        with open(OUTPUT_DIR+file_name, 'rb') as f:
            data = pickle.load(f)
        X = data['X']
        y = data['y']
    else:
        print ('Generating file '+file_name)
        
        if name=='train' or name=='valid': 
            df = GTbbox_df[GTbbox_df['split']==name]
        elif name=='all':
            df = GTbbox_df
        else:
            print('Invalid name '+name)
    
        X, y = data_from_df(df)

        data = {'X': X,'y': y}
        with open(OUTPUT_DIR+file_name, 'wb') as f:
            pickle.dump(data, f)
    return (X, y)
X_train, y_train = data_load('train')
X_valid, y_valid = data_load('valid')
       
print('Loading data done.')
print('train sample ', X_train.shape[0])
print('valid sample ', X_valid.shape[0])
X_train = X_train.astype(np.float32)
X_valid = X_valid.astype(np.float32)
print('Convert to float32 done.')
X_train /= 255.
X_valid /= 255.
print('Rescale by 255 done.')
X_train_centerd = featurewise_center(X_train)
print('mean of X_train is ', mean(X_train))
X_valid_centerd = featurewise_center(X_valid)
print('mean of X_valid is ', mean(X_valid))
print('Featurewise centered done.')


Loading from file data_train_224_224.pickle
Loading from file data_valid_224_224.pickle
Loading data done.
train sample  3584
valid sample  787
Convert to float32 done.
Rescale by 255 done.
mean of X_train is  [ 0.40704539  0.43806663  0.39486334]
mean of X_valid is  [ 0.4065561   0.43584293  0.39404479]
Featurewise centered done.

In [6]:
# #class weight = n_samples / (n_classes * np.bincount(y))
# class_weight_fish = dict(GTbbox_df.groupby('crop_class').size())
# class_weight = {}
# n_samples = GTbbox_df.shape[0]
# for key,value in class_weight_fish.items():
#         class_weight[CROP_CLASSES.index(key)] = n_samples / (len(CROP_CLASSES)*value)
# class_weight

class_weight_fish = dict(GTbbox_df.groupby('crop_class').size())
class_weight = {}
ref = max(class_weight_fish.values())
for key,value in class_weight_fish.items():
    class_weight[CROP_CLASSES.index(key)] = ref/value
class_weight


Out[6]:
{0: 1.0,
 1: 8.212418300653594,
 2: 19.944444444444443,
 3: 23.933333333333334,
 4: 7.5465465465465469,
 5: 13.296296296296296,
 6: 3.1451814768460578}

In [7]:
#data preprocessing

train_datagen = ImageDataGenerator(
    rotation_range=180,
    shear_range=0.2,
    zoom_range=0.1,
    width_shift_range=0.1,
    height_shift_range=0.1,
    horizontal_flip=True,
    vertical_flip=True)
train_generator = train_datagen.flow(X_train, y_train, batch_size=BATCHSIZE, shuffle=True, seed=None)
assert X_train.shape[0]%BATCHSIZE==0
steps_per_epoch = int(X_train.shape[0]/BATCHSIZE)

In [16]:
#callbacks

early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=100, verbose=1, mode='auto')        

model_checkpoint = ModelCheckpoint(filepath=CHECKPOINT_DIR+'weights.{epoch:03d}-{val_loss:.4f}.hdf5', monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, mode='auto')
        
learningrate_schedule = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=40, verbose=1, mode='auto', epsilon=0.001, cooldown=0, min_lr=0)

tensorboard = TensorBoard(log_dir=LOG_DIR, histogram_freq=0, write_graph=False, write_images=True)

In [9]:
def identity_block(input_tensor, kernel_size, filters, stage, block):
    """The identity block is the block that has no conv layer at shortcut.
    # Arguments
        input_tensor: input tensor
        kernel_size: defualt 3, the kernel size of middle conv layer at main path
        filters: list of integers, the filterss of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
    # Returns
        Output tensor for the block.
    """
    filters = filters
    if K.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = Conv2D(filters, kernel_size, padding='same', name=conv_name_base + '2a')(input_tensor)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = Conv2D(filters, kernel_size, padding='same', name=conv_name_base + '2b')(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    x = Activation('relu')(x)

    x = layers.add([x, input_tensor])
    x = Activation('relu')(x)
    return x

In [10]:
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):
    """conv_block is the block that has a conv layer at shortcut
    # Arguments
        input_tensor: input tensor
        kernel_size: defualt 3, the kernel size of middle conv layer at main path
        filters: list of integers, the filterss of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
    # Returns
        Output tensor for the block.
    Note that from stage 3, the first conv layer at main path is with strides=(2,2)
    And the shortcut should have strides=(2,2) as well
    """
    filters = filters
    if K.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = Conv2D(filters, kernel_size, padding='same', strides=strides, name=conv_name_base + '2a')(input_tensor)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = Conv2D(filters, kernel_size, padding='same', name=conv_name_base + '2b')(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    x = Activation('relu')(x)

    shortcut = Conv2D(filters, (1, 1), strides=strides, name=conv_name_base + '1')(input_tensor)
    shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut)

    x = layers.add([x, shortcut])
    x = Activation('relu')(x)
    return x

In [11]:
def create_model_resnet19ss():
    
    img_input = Input(shape=(ROWS, COLS, 3))
    
    x = Conv2D(16, (3, 3), strides=(2, 2), name='conv1')(img_input)
    x = BatchNormalization(name='bn_conv1')(x)
    x = Activation('relu')(x)

    x = conv_block(x, 3, 16, stage=2, block='a')
    x = identity_block(x, 3, 16, stage=2, block='b')
    x = identity_block(x, 3, 16, stage=2, block='c')

    x = conv_block(x, 3, 32, stage=3, block='a')
    x = identity_block(x, 3, 32, stage=3, block='b')
    x = identity_block(x, 3, 32, stage=3, block='c')

    x = conv_block(x, 3, 64, stage=4, block='a')
    x = identity_block(x, 3, 64, stage=4, block='b')
    x = identity_block(x, 3, 64, stage=4, block='c')

#     x = conv_block(x, 3, 128, stage=5, block='a')
#     x = identity_block(x, 3, 128, stage=5, block='b')
#     x = identity_block(x, 3, 128, stage=5, block='c')

    x = GlobalAveragePooling2D()(x)
#     model.add(Dropout(0.8))
    x = Dense(len(CROP_CLASSES), activation='softmax')(x)

    model = Model(img_input, x)
    return model

In [15]:
#train from scratch

model = create_model_resnet19ss()

# compile the model (should be done *after* setting layers to non-trainable)
optimizer = Adam(lr=1e-3)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])

# train the model on the new data for a few epochs
model.fit_generator(train_generator, steps_per_epoch=steps_per_epoch, epochs=300, verbose=1, 
                    callbacks=[model_checkpoint, tensorboard], 
                    validation_data=(X_valid,y_valid), class_weight=class_weight, workers=3, pickle_safe=True)


Epoch 1/300
27/28 [===========================>..] - ETA: 0s - loss: 8.3013 - acc: 0.1646  Epoch 00000: val_loss did not improve
28/28 [==============================] - 26s - loss: 8.2196 - acc: 0.1680 - val_loss: 2.8698 - val_acc: 0.0991
Epoch 2/300
27/28 [===========================>..] - ETA: 0s - loss: 5.1658 - acc: 0.2512 Epoch 00001: val_loss improved from 1.77674 to 1.59296, saving model to ./resnet19ss_Hybrid_woNoF/checkpoint/weights.001-1.5930.hdf5
28/28 [==============================] - 30s - loss: 5.1907 - acc: 0.2503 - val_loss: 1.5930 - val_acc: 0.5375
Epoch 3/300
27/28 [===========================>..] - ETA: 0s - loss: 4.7794 - acc: 0.3310 Epoch 00002: val_loss did not improve
28/28 [==============================] - 23s - loss: 4.7517 - acc: 0.3312 - val_loss: 1.9681 - val_acc: 0.4155
Epoch 4/300
27/28 [===========================>..] - ETA: 0s - loss: 4.1066 - acc: 0.3365 Epoch 00003: val_loss did not improve
28/28 [==============================] - 23s - loss: 4.0843 - acc: 0.3357 - val_loss: 2.5625 - val_acc: 0.3227
Epoch 5/300
27/28 [===========================>..] - ETA: 0s - loss: 4.0187 - acc: 0.3715 Epoch 00004: val_loss did not improve
28/28 [==============================] - 23s - loss: 4.0289 - acc: 0.3722 - val_loss: 4.4134 - val_acc: 0.1931
Epoch 6/300
27/28 [===========================>..] - ETA: 0s - loss: 4.1549 - acc: 0.4036 Epoch 00005: val_loss did not improve
28/28 [==============================] - 23s - loss: 4.1394 - acc: 0.4057 - val_loss: 5.1614 - val_acc: 0.0407
Epoch 7/300
27/28 [===========================>..] - ETA: 0s - loss: 3.8332 - acc: 0.4019 Epoch 00006: val_loss did not improve
28/28 [==============================] - 23s - loss: 3.8212 - acc: 0.3987 - val_loss: 7.4046 - val_acc: 0.0292
Epoch 8/300
27/28 [===========================>..] - ETA: 0s - loss: 3.5482 - acc: 0.4421 Epoch 00007: val_loss did not improve
28/28 [==============================] - 23s - loss: 3.5368 - acc: 0.4406 - val_loss: 4.3551 - val_acc: 0.1194
Epoch 9/300
27/28 [===========================>..] - ETA: 0s - loss: 3.7997 - acc: 0.4280 Epoch 00008: val_loss did not improve
28/28 [==============================] - 23s - loss: 3.7927 - acc: 0.4305 - val_loss: 3.6132 - val_acc: 0.2402
Epoch 10/300
27/28 [===========================>..] - ETA: 0s - loss: 3.4070 - acc: 0.4731 Epoch 00009: val_loss did not improve
28/28 [==============================] - 23s - loss: 3.4308 - acc: 0.4746 - val_loss: 5.0971 - val_acc: 0.2020
Epoch 11/300
27/28 [===========================>..] - ETA: 0s - loss: 3.2031 - acc: 0.4502 Epoch 00010: val_loss did not improve
28/28 [==============================] - 23s - loss: 3.1810 - acc: 0.4531 - val_loss: 3.8130 - val_acc: 0.1639
Epoch 12/300
27/28 [===========================>..] - ETA: 0s - loss: 3.0369 - acc: 0.5194 Epoch 00011: val_loss did not improve
28/28 [==============================] - 23s - loss: 3.0578 - acc: 0.5198 - val_loss: 6.2129 - val_acc: 0.2058
Epoch 13/300
27/28 [===========================>..] - ETA: 0s - loss: 2.9428 - acc: 0.5116 Epoch 00012: val_loss did not improve
28/28 [==============================] - 23s - loss: 2.9602 - acc: 0.5106 - val_loss: 4.0904 - val_acc: 0.1804
Epoch 14/300
27/28 [===========================>..] - ETA: 0s - loss: 2.7888 - acc: 0.5286 Epoch 00013: val_loss did not improve
28/28 [==============================] - 23s - loss: 2.7944 - acc: 0.5276 - val_loss: 2.6224 - val_acc: 0.2872
Epoch 15/300
27/28 [===========================>..] - ETA: 0s - loss: 3.3928 - acc: 0.4913 Epoch 00014: val_loss did not improve
28/28 [==============================] - 23s - loss: 3.3585 - acc: 0.4911 - val_loss: 6.7451 - val_acc: 0.0928
Epoch 16/300
27/28 [===========================>..] - ETA: 0s - loss: 2.9339 - acc: 0.5191 Epoch 00015: val_loss did not improve
28/28 [==============================] - 23s - loss: 2.9486 - acc: 0.5195 - val_loss: 2.4041 - val_acc: 0.4003
Epoch 17/300
27/28 [===========================>..] - ETA: 0s - loss: 2.8165 - acc: 0.5558 Epoch 00016: val_loss did not improve
28/28 [==============================] - 23s - loss: 2.7995 - acc: 0.5572 - val_loss: 1.8992 - val_acc: 0.3977
Epoch 18/300
27/28 [===========================>..] - ETA: 0s - loss: 2.6266 - acc: 0.5573 Epoch 00017: val_loss did not improve
28/28 [==============================] - 23s - loss: 2.6321 - acc: 0.5569 - val_loss: 1.8617 - val_acc: 0.4549
Epoch 19/300
27/28 [===========================>..] - ETA: 0s - loss: 2.4914 - acc: 0.5877 Epoch 00018: val_loss did not improve
28/28 [==============================] - 23s - loss: 2.5006 - acc: 0.5820 - val_loss: 3.0959 - val_acc: 0.4231
Epoch 20/300
27/28 [===========================>..] - ETA: 0s - loss: 2.5318 - acc: 0.5975 Epoch 00019: val_loss did not improve
28/28 [==============================] - 23s - loss: 2.5198 - acc: 0.5977 - val_loss: 2.3040 - val_acc: 0.4219
Epoch 21/300
27/28 [===========================>..] - ETA: 0s - loss: 2.4104 - acc: 0.6082 Epoch 00020: val_loss improved from 1.59296 to 1.41896, saving model to ./resnet19ss_Hybrid_woNoF/checkpoint/weights.020-1.4190.hdf5
28/28 [==============================] - 23s - loss: 2.3889 - acc: 0.6052 - val_loss: 1.4190 - val_acc: 0.5070
Epoch 22/300
27/28 [===========================>..] - ETA: 0s - loss: 2.2196 - acc: 0.6128 Epoch 00021: val_loss did not improve
28/28 [==============================] - 23s - loss: 2.2082 - acc: 0.6150 - val_loss: 1.4918 - val_acc: 0.4879
Epoch 23/300
27/28 [===========================>..] - ETA: 0s - loss: 2.4287 - acc: 0.6016 Epoch 00022: val_loss did not improve
28/28 [==============================] - 23s - loss: 2.4344 - acc: 0.6016 - val_loss: 3.0647 - val_acc: 0.3291
Epoch 24/300
27/28 [===========================>..] - ETA: 0s - loss: 2.3877 - acc: 0.6045 Epoch 00023: val_loss did not improve
28/28 [==============================] - 23s - loss: 2.3836 - acc: 0.6030 - val_loss: 1.5192 - val_acc: 0.4422
Epoch 25/300
27/28 [===========================>..] - ETA: 0s - loss: 2.2831 - acc: 0.6238 Epoch 00024: val_loss improved from 1.41896 to 1.39690, saving model to ./resnet19ss_Hybrid_woNoF/checkpoint/weights.024-1.3969.hdf5
28/28 [==============================] - 23s - loss: 2.2755 - acc: 0.6230 - val_loss: 1.3969 - val_acc: 0.4867
Epoch 26/300
27/28 [===========================>..] - ETA: 0s - loss: 2.1788 - acc: 0.6100 Epoch 00025: val_loss did not improve
28/28 [==============================] - 23s - loss: 2.2051 - acc: 0.6091 - val_loss: 3.0786 - val_acc: 0.3977
Epoch 27/300
27/28 [===========================>..] - ETA: 0s - loss: 2.3199 - acc: 0.6322 Epoch 00026: val_loss did not improve
28/28 [==============================] - 23s - loss: 2.3202 - acc: 0.6336 - val_loss: 1.4227 - val_acc: 0.4968
Epoch 28/300
27/28 [===========================>..] - ETA: 0s - loss: 2.1418 - acc: 0.6218 Epoch 00027: val_loss improved from 1.39690 to 1.29817, saving model to ./resnet19ss_Hybrid_woNoF/checkpoint/weights.027-1.2982.hdf5
28/28 [==============================] - 23s - loss: 2.1197 - acc: 0.6228 - val_loss: 1.2982 - val_acc: 0.4727
Epoch 29/300
27/28 [===========================>..] - ETA: 0s - loss: 2.2387 - acc: 0.6340 Epoch 00028: val_loss did not improve
28/28 [==============================] - 23s - loss: 2.2480 - acc: 0.6320 - val_loss: 2.3219 - val_acc: 0.3202
Epoch 30/300
27/28 [===========================>..] - ETA: 0s - loss: 2.2099 - acc: 0.6166 Epoch 00029: val_loss did not improve
28/28 [==============================] - 23s - loss: 2.2159 - acc: 0.6172 - val_loss: 5.7114 - val_acc: 0.1436
Epoch 31/300
27/28 [===========================>..] - ETA: 0s - loss: 2.0376 - acc: 0.6363 Epoch 00030: val_loss did not improve
28/28 [==============================] - 23s - loss: 2.0393 - acc: 0.6390 - val_loss: 1.4225 - val_acc: 0.4892
Epoch 32/300
27/28 [===========================>..] - ETA: 0s - loss: 1.8254 - acc: 0.6696 Epoch 00031: val_loss improved from 1.29817 to 1.12081, saving model to ./resnet19ss_Hybrid_woNoF/checkpoint/weights.031-1.1208.hdf5
28/28 [==============================] - 23s - loss: 1.8205 - acc: 0.6694 - val_loss: 1.1208 - val_acc: 0.5756
Epoch 33/300
27/28 [===========================>..] - ETA: 0s - loss: 1.8812 - acc: 0.6638 Epoch 00032: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.8714 - acc: 0.6660 - val_loss: 1.5948 - val_acc: 0.4612
Epoch 34/300
27/28 [===========================>..] - ETA: 0s - loss: 1.8937 - acc: 0.6745 Epoch 00033: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.9085 - acc: 0.6708 - val_loss: 1.3145 - val_acc: 0.5362
Epoch 35/300
27/28 [===========================>..] - ETA: 0s - loss: 1.8867 - acc: 0.6635 Epoch 00034: val_loss improved from 1.12081 to 0.95052, saving model to ./resnet19ss_Hybrid_woNoF/checkpoint/weights.034-0.9505.hdf5
28/28 [==============================] - 23s - loss: 1.8874 - acc: 0.6643 - val_loss: 0.9505 - val_acc: 0.6353
Epoch 36/300
27/28 [===========================>..] - ETA: 0s - loss: 1.8249 - acc: 0.6765 Epoch 00035: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.8075 - acc: 0.6766 - val_loss: 0.9978 - val_acc: 0.6112
Epoch 37/300
27/28 [===========================>..] - ETA: 0s - loss: 1.7960 - acc: 0.6843 Epoch 00036: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.7973 - acc: 0.6805 - val_loss: 1.3676 - val_acc: 0.5235
Epoch 38/300
27/28 [===========================>..] - ETA: 0s - loss: 1.9989 - acc: 0.6510 Epoch 00037: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.9965 - acc: 0.6512 - val_loss: 1.3451 - val_acc: 0.5159
Epoch 39/300
27/28 [===========================>..] - ETA: 0s - loss: 1.6950 - acc: 0.6942 Epoch 00038: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.7134 - acc: 0.6942 - val_loss: 1.3160 - val_acc: 0.5324
Epoch 40/300
27/28 [===========================>..] - ETA: 0s - loss: 1.7160 - acc: 0.7037 Epoch 00039: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.7012 - acc: 0.7037 - val_loss: 1.5768 - val_acc: 0.4574
Epoch 41/300
27/28 [===========================>..] - ETA: 0s - loss: 1.7819 - acc: 0.6918 Epoch 00040: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.7890 - acc: 0.6903 - val_loss: 1.0896 - val_acc: 0.5820
Epoch 42/300
27/28 [===========================>..] - ETA: 0s - loss: 1.7722 - acc: 0.7052 Epoch 00041: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.7770 - acc: 0.7048 - val_loss: 1.7936 - val_acc: 0.4651
Epoch 43/300
27/28 [===========================>..] - ETA: 0s - loss: 1.6637 - acc: 0.7005 Epoch 00042: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.6586 - acc: 0.7017 - val_loss: 1.4883 - val_acc: 0.4600
Epoch 44/300
27/28 [===========================>..] - ETA: 0s - loss: 1.5745 - acc: 0.7211 Epoch 00043: val_loss improved from 0.95052 to 0.91395, saving model to ./resnet19ss_Hybrid_woNoF/checkpoint/weights.043-0.9140.hdf5
28/28 [==============================] - 23s - loss: 1.5841 - acc: 0.7188 - val_loss: 0.9140 - val_acc: 0.6645
Epoch 45/300
27/28 [===========================>..] - ETA: 0s - loss: 1.5385 - acc: 0.7164 Epoch 00044: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.5330 - acc: 0.7168 - val_loss: 1.1267 - val_acc: 0.5718
Epoch 46/300
27/28 [===========================>..] - ETA: 0s - loss: 1.6335 - acc: 0.7182 Epoch 00045: val_loss improved from 0.91395 to 0.86563, saving model to ./resnet19ss_Hybrid_woNoF/checkpoint/weights.045-0.8656.hdf5
28/28 [==============================] - 23s - loss: 1.6259 - acc: 0.7182 - val_loss: 0.8656 - val_acc: 0.6620
Epoch 47/300
27/28 [===========================>..] - ETA: 0s - loss: 1.5069 - acc: 0.7225 Epoch 00046: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.4936 - acc: 0.7224 - val_loss: 0.9476 - val_acc: 0.6734
Epoch 48/300
27/28 [===========================>..] - ETA: 0s - loss: 1.8646 - acc: 0.7075 Epoch 00047: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.8439 - acc: 0.7084 - val_loss: 1.6605 - val_acc: 0.5654
Epoch 49/300
27/28 [===========================>..] - ETA: 0s - loss: 1.4898 - acc: 0.7231 Epoch 00048: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.4854 - acc: 0.7241 - val_loss: 1.0184 - val_acc: 0.6569
Epoch 50/300
27/28 [===========================>..] - ETA: 0s - loss: 1.4277 - acc: 0.7410 Epoch 00049: val_loss improved from 0.86563 to 0.85328, saving model to ./resnet19ss_Hybrid_woNoF/checkpoint/weights.049-0.8533.hdf5
28/28 [==============================] - 23s - loss: 1.4223 - acc: 0.7414 - val_loss: 0.8533 - val_acc: 0.6950
Epoch 51/300
27/28 [===========================>..] - ETA: 0s - loss: 1.4618 - acc: 0.7373 Epoch 00050: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.4506 - acc: 0.7383 - val_loss: 0.9772 - val_acc: 0.6188
Epoch 52/300
27/28 [===========================>..] - ETA: 0s - loss: 1.4068 - acc: 0.7254 Epoch 00051: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.4236 - acc: 0.7224 - val_loss: 1.1383 - val_acc: 0.5680
Epoch 53/300
27/28 [===========================>..] - ETA: 0s - loss: 1.5001 - acc: 0.7237 Epoch 00052: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.4919 - acc: 0.7243 - val_loss: 1.9455 - val_acc: 0.4269
Epoch 54/300
27/28 [===========================>..] - ETA: 0s - loss: 1.4958 - acc: 0.7234 Epoch 00053: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.4863 - acc: 0.7254 - val_loss: 1.1866 - val_acc: 0.5349
Epoch 55/300
27/28 [===========================>..] - ETA: 0s - loss: 1.3415 - acc: 0.7526 Epoch 00054: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.3495 - acc: 0.7500 - val_loss: 1.0985 - val_acc: 0.5959
Epoch 56/300
27/28 [===========================>..] - ETA: 0s - loss: 1.3363 - acc: 0.7613 Epoch 00055: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.3255 - acc: 0.7617 - val_loss: 1.3979 - val_acc: 0.5997
Epoch 57/300
27/28 [===========================>..] - ETA: 0s - loss: 1.4159 - acc: 0.7483 Epoch 00056: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.4078 - acc: 0.7478 - val_loss: 1.3310 - val_acc: 0.5565
Epoch 58/300
27/28 [===========================>..] - ETA: 0s - loss: 1.3324 - acc: 0.7590 Epoch 00057: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.3373 - acc: 0.7589 - val_loss: 1.0158 - val_acc: 0.6429
Epoch 59/300
27/28 [===========================>..] - ETA: 0s - loss: 1.3506 - acc: 0.7506 Epoch 00058: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.3584 - acc: 0.7492 - val_loss: 1.1975 - val_acc: 0.5642
Epoch 60/300
27/28 [===========================>..] - ETA: 0s - loss: 1.2966 - acc: 0.7593 Epoch 00059: val_loss improved from 0.85328 to 0.78846, saving model to ./resnet19ss_Hybrid_woNoF/checkpoint/weights.059-0.7885.hdf5
28/28 [==============================] - 23s - loss: 1.3004 - acc: 0.7598 - val_loss: 0.7885 - val_acc: 0.6900
Epoch 61/300
27/28 [===========================>..] - ETA: 0s - loss: 1.2172 - acc: 0.7760 Epoch 00060: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.2107 - acc: 0.7751 - val_loss: 0.8921 - val_acc: 0.6493
Epoch 62/300
27/28 [===========================>..] - ETA: 0s - loss: 1.3884 - acc: 0.7546 Epoch 00061: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.3899 - acc: 0.7545 - val_loss: 1.3611 - val_acc: 0.6734
Epoch 63/300
27/28 [===========================>..] - ETA: 0s - loss: 1.3392 - acc: 0.7639 Epoch 00062: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.3401 - acc: 0.7631 - val_loss: 1.0164 - val_acc: 0.6429
Epoch 64/300
27/28 [===========================>..] - ETA: 0s - loss: 1.2697 - acc: 0.7624 Epoch 00063: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.2658 - acc: 0.7598 - val_loss: 1.2072 - val_acc: 0.5565
Epoch 65/300
27/28 [===========================>..] - ETA: 0s - loss: 1.4153 - acc: 0.7422 Epoch 00064: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.3972 - acc: 0.7453 - val_loss: 1.2919 - val_acc: 0.6404
Epoch 66/300
27/28 [===========================>..] - ETA: 0s - loss: 1.2510 - acc: 0.7726 Epoch 00065: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.2576 - acc: 0.7706 - val_loss: 1.4530 - val_acc: 0.5476
Epoch 67/300
27/28 [===========================>..] - ETA: 0s - loss: 1.1822 - acc: 0.7775 Epoch 00066: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.1721 - acc: 0.7799 - val_loss: 1.0640 - val_acc: 0.5947
Epoch 68/300
27/28 [===========================>..] - ETA: 0s - loss: 1.2619 - acc: 0.7708 Epoch 00067: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.2541 - acc: 0.7695 - val_loss: 2.0994 - val_acc: 0.4333
Epoch 69/300
27/28 [===========================>..] - ETA: 0s - loss: 1.2704 - acc: 0.7679 Epoch 00068: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.2692 - acc: 0.7676 - val_loss: 1.7251 - val_acc: 0.5057
Epoch 70/300
27/28 [===========================>..] - ETA: 0s - loss: 1.1512 - acc: 0.7931 Epoch 00069: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.1466 - acc: 0.7938 - val_loss: 1.4914 - val_acc: 0.5260
Epoch 71/300
27/28 [===========================>..] - ETA: 0s - loss: 1.0765 - acc: 0.8018 Epoch 00070: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.0783 - acc: 0.7997 - val_loss: 1.3531 - val_acc: 0.5527
Epoch 72/300
27/28 [===========================>..] - ETA: 0s - loss: 1.1308 - acc: 0.7879 Epoch 00071: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.1238 - acc: 0.7891 - val_loss: 1.1381 - val_acc: 0.6595
Epoch 73/300
27/28 [===========================>..] - ETA: 0s - loss: 1.0833 - acc: 0.7989 Epoch 00072: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.0972 - acc: 0.7963 - val_loss: 3.2168 - val_acc: 0.3405
Epoch 74/300
27/28 [===========================>..] - ETA: 0s - loss: 1.2349 - acc: 0.7711 Epoch 00073: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.2400 - acc: 0.7718 - val_loss: 1.1318 - val_acc: 0.6302
Epoch 75/300
27/28 [===========================>..] - ETA: 0s - loss: 1.1396 - acc: 0.7833 Epoch 00074: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.1457 - acc: 0.7799 - val_loss: 2.6691 - val_acc: 0.5337
Epoch 76/300
27/28 [===========================>..] - ETA: 0s - loss: 1.1398 - acc: 0.7989 Epoch 00075: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.1469 - acc: 0.7941 - val_loss: 0.9795 - val_acc: 0.6328
Epoch 77/300
27/28 [===========================>..] - ETA: 0s - loss: 1.3379 - acc: 0.7810 Epoch 00076: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.3458 - acc: 0.7799 - val_loss: 1.3832 - val_acc: 0.6379
Epoch 78/300
27/28 [===========================>..] - ETA: 0s - loss: 1.2911 - acc: 0.7506 Epoch 00077: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.3146 - acc: 0.7508 - val_loss: 1.2207 - val_acc: 0.6277
Epoch 79/300
27/28 [===========================>..] - ETA: 0s - loss: 1.0539 - acc: 0.8038 Epoch 00078: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.0508 - acc: 0.8044 - val_loss: 0.8840 - val_acc: 0.6950
Epoch 80/300
27/28 [===========================>..] - ETA: 0s - loss: 1.0816 - acc: 0.8035 Epoch 00079: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.0811 - acc: 0.8030 - val_loss: 0.9219 - val_acc: 0.6595
Epoch 81/300
27/28 [===========================>..] - ETA: 0s - loss: 1.0361 - acc: 0.8001 Epoch 00080: val_loss improved from 0.78846 to 0.66192, saving model to ./resnet19ss_Hybrid_woNoF/checkpoint/weights.080-0.6619.hdf5
28/28 [==============================] - 23s - loss: 1.0437 - acc: 0.8013 - val_loss: 0.6619 - val_acc: 0.7433
Epoch 82/300
27/28 [===========================>..] - ETA: 0s - loss: 1.0253 - acc: 0.8076 Epoch 00081: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.0153 - acc: 0.8092 - val_loss: 1.3023 - val_acc: 0.5883
Epoch 83/300
27/28 [===========================>..] - ETA: 0s - loss: 0.9249 - acc: 0.8171 Epoch 00082: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.9349 - acc: 0.8167 - val_loss: 1.0856 - val_acc: 0.6239
Epoch 84/300
27/28 [===========================>..] - ETA: 0s - loss: 0.9373 - acc: 0.8166 Epoch 00083: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.9304 - acc: 0.8170 - val_loss: 0.8783 - val_acc: 0.6709
Epoch 85/300
27/28 [===========================>..] - ETA: 0s - loss: 0.8639 - acc: 0.8319 Epoch 00084: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.8588 - acc: 0.8320 - val_loss: 1.0164 - val_acc: 0.6493
Epoch 86/300
27/28 [===========================>..] - ETA: 0s - loss: 0.8345 - acc: 0.8351 Epoch 00085: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.8515 - acc: 0.8365 - val_loss: 0.7508 - val_acc: 0.6925
Epoch 87/300
27/28 [===========================>..] - ETA: 0s - loss: 0.9078 - acc: 0.8273 Epoch 00086: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.9121 - acc: 0.8278 - val_loss: 1.4790 - val_acc: 0.5680
Epoch 88/300
27/28 [===========================>..] - ETA: 0s - loss: 0.8516 - acc: 0.8299 Epoch 00087: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.8427 - acc: 0.8320 - val_loss: 1.4514 - val_acc: 0.5667
Epoch 89/300
27/28 [===========================>..] - ETA: 0s - loss: 0.9094 - acc: 0.8223 Epoch 00088: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.9118 - acc: 0.8211 - val_loss: 0.9086 - val_acc: 0.6747
Epoch 90/300
27/28 [===========================>..] - ETA: 0s - loss: 0.9022 - acc: 0.8374 Epoch 00089: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.9052 - acc: 0.8368 - val_loss: 0.9983 - val_acc: 0.6633
Epoch 91/300
27/28 [===========================>..] - ETA: 0s - loss: 1.0217 - acc: 0.8296 Epoch 00090: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.0151 - acc: 0.8290 - val_loss: 0.7178 - val_acc: 0.7446
Epoch 92/300
27/28 [===========================>..] - ETA: 0s - loss: 0.9447 - acc: 0.8218 Epoch 00091: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.9442 - acc: 0.8206 - val_loss: 1.4024 - val_acc: 0.5426
Epoch 93/300
27/28 [===========================>..] - ETA: 0s - loss: 0.8919 - acc: 0.8206 Epoch 00092: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.8881 - acc: 0.8217 - val_loss: 0.6875 - val_acc: 0.7649
Epoch 94/300
27/28 [===========================>..] - ETA: 0s - loss: 0.9115 - acc: 0.8328 Epoch 00093: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.9093 - acc: 0.8334 - val_loss: 1.9118 - val_acc: 0.5337
Epoch 95/300
27/28 [===========================>..] - ETA: 0s - loss: 0.9144 - acc: 0.8409 Epoch 00094: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.9013 - acc: 0.8438 - val_loss: 0.8370 - val_acc: 0.7014
Epoch 96/300
27/28 [===========================>..] - ETA: 0s - loss: 0.8910 - acc: 0.8354 Epoch 00095: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.8792 - acc: 0.8382 - val_loss: 1.1258 - val_acc: 0.6925
Epoch 97/300
27/28 [===========================>..] - ETA: 0s - loss: 0.7233 - acc: 0.8539 Epoch 00096: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.7304 - acc: 0.8532 - val_loss: 1.0196 - val_acc: 0.6671
Epoch 98/300
27/28 [===========================>..] - ETA: 0s - loss: 0.7736 - acc: 0.8406 Epoch 00097: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.7696 - acc: 0.8424 - val_loss: 0.8738 - val_acc: 0.7179
Epoch 99/300
27/28 [===========================>..] - ETA: 0s - loss: 0.8666 - acc: 0.8403 Epoch 00098: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.8587 - acc: 0.8410 - val_loss: 1.2394 - val_acc: 0.6150
Epoch 100/300
27/28 [===========================>..] - ETA: 0s - loss: 0.8417 - acc: 0.8354 Epoch 00099: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.8453 - acc: 0.8365 - val_loss: 1.7759 - val_acc: 0.4917
Epoch 101/300
27/28 [===========================>..] - ETA: 0s - loss: 0.8457 - acc: 0.8464 Epoch 00100: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.8480 - acc: 0.8457 - val_loss: 1.2088 - val_acc: 0.6163
Epoch 102/300
27/28 [===========================>..] - ETA: 0s - loss: 0.9454 - acc: 0.8215 Epoch 00101: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.9477 - acc: 0.8220 - val_loss: 1.3088 - val_acc: 0.5845
Epoch 103/300
27/28 [===========================>..] - ETA: 0s - loss: 0.8803 - acc: 0.8203 Epoch 00102: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.8798 - acc: 0.8209 - val_loss: 1.9526 - val_acc: 0.4701
Epoch 104/300
27/28 [===========================>..] - ETA: 0s - loss: 0.7544 - acc: 0.8545 Epoch 00103: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.7526 - acc: 0.8541 - val_loss: 2.0935 - val_acc: 0.4460
Epoch 105/300
27/28 [===========================>..] - ETA: 0s - loss: 0.8419 - acc: 0.8391 Epoch 00104: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.8390 - acc: 0.8398 - val_loss: 1.4574 - val_acc: 0.6213
Epoch 106/300
27/28 [===========================>..] - ETA: 0s - loss: 0.7407 - acc: 0.8686 Epoch 00105: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.7475 - acc: 0.8680 - val_loss: 1.0779 - val_acc: 0.6417
Epoch 107/300
27/28 [===========================>..] - ETA: 0s - loss: 0.8541 - acc: 0.8336 Epoch 00106: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.8571 - acc: 0.8348 - val_loss: 1.8367 - val_acc: 0.5222
Epoch 108/300
27/28 [===========================>..] - ETA: 0s - loss: 0.7503 - acc: 0.8559 Epoch 00107: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.7507 - acc: 0.8549 - val_loss: 0.9508 - val_acc: 0.7154
Epoch 109/300
27/28 [===========================>..] - ETA: 0s - loss: 0.7051 - acc: 0.8533 Epoch 00108: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.7184 - acc: 0.8510 - val_loss: 1.2120 - val_acc: 0.6290
Epoch 110/300
27/28 [===========================>..] - ETA: 0s - loss: 0.6525 - acc: 0.8724 Epoch 00109: val_loss improved from 0.66192 to 0.65517, saving model to ./resnet19ss_Hybrid_woNoF/checkpoint/weights.109-0.6552.hdf5
28/28 [==============================] - 23s - loss: 0.6465 - acc: 0.8728 - val_loss: 0.6552 - val_acc: 0.7789
Epoch 111/300
27/28 [===========================>..] - ETA: 0s - loss: 0.7108 - acc: 0.8767 Epoch 00110: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.7334 - acc: 0.8764 - val_loss: 0.9080 - val_acc: 0.6798
Epoch 112/300
27/28 [===========================>..] - ETA: 0s - loss: 0.7360 - acc: 0.8507 Epoch 00111: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.7273 - acc: 0.8521 - val_loss: 1.1396 - val_acc: 0.6823
Epoch 113/300
27/28 [===========================>..] - ETA: 0s - loss: 0.8520 - acc: 0.8403 Epoch 00112: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.8488 - acc: 0.8410 - val_loss: 0.8536 - val_acc: 0.7357
Epoch 114/300
27/28 [===========================>..] - ETA: 0s - loss: 0.6906 - acc: 0.8695 Epoch 00113: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.6844 - acc: 0.8700 - val_loss: 0.7192 - val_acc: 0.7637
Epoch 115/300
27/28 [===========================>..] - ETA: 0s - loss: 0.6185 - acc: 0.8715 Epoch 00114: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.6236 - acc: 0.8725 - val_loss: 1.0778 - val_acc: 0.6709
Epoch 116/300
27/28 [===========================>..] - ETA: 0s - loss: 0.6002 - acc: 0.8834 Epoch 00115: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.5921 - acc: 0.8842 - val_loss: 1.5764 - val_acc: 0.5807
Epoch 117/300
27/28 [===========================>..] - ETA: 0s - loss: 0.6738 - acc: 0.8736 Epoch 00116: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.6780 - acc: 0.8730 - val_loss: 0.9575 - val_acc: 0.7243
Epoch 118/300
27/28 [===========================>..] - ETA: 0s - loss: 0.5643 - acc: 0.8869 Epoch 00117: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.5703 - acc: 0.8856 - val_loss: 0.8683 - val_acc: 0.7306
Epoch 119/300
27/28 [===========================>..] - ETA: 0s - loss: 0.7225 - acc: 0.8657 Epoch 00118: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.7322 - acc: 0.8650 - val_loss: 2.3233 - val_acc: 0.4651
Epoch 120/300
27/28 [===========================>..] - ETA: 0s - loss: 1.0647 - acc: 0.8293 Epoch 00119: val_loss did not improve
28/28 [==============================] - 23s - loss: 1.0497 - acc: 0.8295 - val_loss: 1.6952 - val_acc: 0.5680
Epoch 121/300
27/28 [===========================>..] - ETA: 0s - loss: 0.7744 - acc: 0.8652 Epoch 00120: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.7675 - acc: 0.8664 - val_loss: 0.9219 - val_acc: 0.7103
Epoch 122/300
27/28 [===========================>..] - ETA: 0s - loss: 0.6914 - acc: 0.8707 Epoch 00121: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.6872 - acc: 0.8705 - val_loss: 1.2860 - val_acc: 0.7014
Epoch 123/300
27/28 [===========================>..] - ETA: 0s - loss: 0.5902 - acc: 0.8877 Epoch 00122: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.5833 - acc: 0.8878 - val_loss: 0.6605 - val_acc: 0.7624
Epoch 124/300
27/28 [===========================>..] - ETA: 0s - loss: 0.7846 - acc: 0.8683 Epoch 00123: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.8199 - acc: 0.8666 - val_loss: 2.5782 - val_acc: 0.5121
Epoch 125/300
27/28 [===========================>..] - ETA: 0s - loss: 0.9152 - acc: 0.8377 Epoch 00124: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.9025 - acc: 0.8373 - val_loss: 1.6835 - val_acc: 0.6086
Epoch 126/300
27/28 [===========================>..] - ETA: 0s - loss: 0.6839 - acc: 0.8736 Epoch 00125: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.6978 - acc: 0.8730 - val_loss: 0.8321 - val_acc: 0.7166
Epoch 127/300
27/28 [===========================>..] - ETA: 0s - loss: 0.5859 - acc: 0.8837 Epoch 00126: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.5829 - acc: 0.8845 - val_loss: 1.4546 - val_acc: 0.5832
Epoch 128/300
27/28 [===========================>..] - ETA: 0s - loss: 0.6075 - acc: 0.8828 Epoch 00127: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.6011 - acc: 0.8842 - val_loss: 0.9088 - val_acc: 0.7192
Epoch 129/300
27/28 [===========================>..] - ETA: 0s - loss: 0.6033 - acc: 0.8898 Epoch 00128: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.6169 - acc: 0.8867 - val_loss: 0.7892 - val_acc: 0.7370
Epoch 130/300
27/28 [===========================>..] - ETA: 0s - loss: 0.5271 - acc: 0.8918 Epoch 00129: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.5317 - acc: 0.8915 - val_loss: 1.2097 - val_acc: 0.6277
Epoch 131/300
27/28 [===========================>..] - ETA: 0s - loss: 0.5658 - acc: 0.8895 Epoch 00130: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.5601 - acc: 0.8898 - val_loss: 1.0033 - val_acc: 0.7103
Epoch 132/300
27/28 [===========================>..] - ETA: 0s - loss: 0.5126 - acc: 0.9051 Epoch 00131: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.5155 - acc: 0.9032 - val_loss: 0.8115 - val_acc: 0.7255
Epoch 133/300
27/28 [===========================>..] - ETA: 0s - loss: 0.4958 - acc: 0.9097 Epoch 00132: val_loss improved from 0.65517 to 0.61207, saving model to ./resnet19ss_Hybrid_woNoF/checkpoint/weights.132-0.6121.hdf5
28/28 [==============================] - 23s - loss: 0.4962 - acc: 0.9088 - val_loss: 0.6121 - val_acc: 0.7942
Epoch 134/300
27/28 [===========================>..] - ETA: 0s - loss: 0.5724 - acc: 0.8845 Epoch 00133: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.5658 - acc: 0.8856 - val_loss: 0.8289 - val_acc: 0.7255
Epoch 135/300
27/28 [===========================>..] - ETA: 0s - loss: 0.6887 - acc: 0.8721 Epoch 00134: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.6823 - acc: 0.8733 - val_loss: 1.5117 - val_acc: 0.6290
Epoch 136/300
27/28 [===========================>..] - ETA: 0s - loss: 0.6162 - acc: 0.8944 Epoch 00135: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.6114 - acc: 0.8943 - val_loss: 0.8184 - val_acc: 0.7306
Epoch 137/300
27/28 [===========================>..] - ETA: 0s - loss: 0.6586 - acc: 0.8756 Epoch 00136: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.6548 - acc: 0.8758 - val_loss: 1.5968 - val_acc: 0.5743
Epoch 138/300
27/28 [===========================>..] - ETA: 0s - loss: 0.5826 - acc: 0.8776 Epoch 00137: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.5956 - acc: 0.8767 - val_loss: 1.1762 - val_acc: 0.6213
Epoch 139/300
27/28 [===========================>..] - ETA: 0s - loss: 0.5905 - acc: 0.8837 Epoch 00138: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.5950 - acc: 0.8839 - val_loss: 1.1389 - val_acc: 0.6417
Epoch 140/300
27/28 [===========================>..] - ETA: 0s - loss: 0.5822 - acc: 0.8837 Epoch 00139: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.5779 - acc: 0.8825 - val_loss: 0.8906 - val_acc: 0.7459
Epoch 141/300
27/28 [===========================>..] - ETA: 0s - loss: 0.5103 - acc: 0.8944 Epoch 00140: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.5046 - acc: 0.8945 - val_loss: 0.9173 - val_acc: 0.7294
Epoch 142/300
27/28 [===========================>..] - ETA: 0s - loss: 0.6738 - acc: 0.8825 Epoch 00141: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.6686 - acc: 0.8828 - val_loss: 0.9210 - val_acc: 0.7344
Epoch 143/300
27/28 [===========================>..] - ETA: 0s - loss: 0.5953 - acc: 0.8793 Epoch 00142: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.5910 - acc: 0.8800 - val_loss: 1.3734 - val_acc: 0.6125
Epoch 144/300
27/28 [===========================>..] - ETA: 0s - loss: 0.5118 - acc: 0.9005 Epoch 00143: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.5132 - acc: 0.9007 - val_loss: 1.0884 - val_acc: 0.6226
Epoch 145/300
27/28 [===========================>..] - ETA: 0s - loss: 0.4725 - acc: 0.8996 Epoch 00144: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.4656 - acc: 0.9018 - val_loss: 1.2415 - val_acc: 0.6239
Epoch 146/300
27/28 [===========================>..] - ETA: 0s - loss: 0.5218 - acc: 0.9002 Epoch 00145: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.5222 - acc: 0.8998 - val_loss: 1.0172 - val_acc: 0.7090
Epoch 147/300
27/28 [===========================>..] - ETA: 0s - loss: 0.6034 - acc: 0.8903 Epoch 00146: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.6162 - acc: 0.8890 - val_loss: 0.8309 - val_acc: 0.7281
Epoch 148/300
27/28 [===========================>..] - ETA: 0s - loss: 0.5869 - acc: 0.8958 Epoch 00147: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.5854 - acc: 0.8951 - val_loss: 1.1183 - val_acc: 0.6950
Epoch 149/300
27/28 [===========================>..] - ETA: 0s - loss: 0.5673 - acc: 0.8909 Epoch 00148: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.5714 - acc: 0.8915 - val_loss: 0.8338 - val_acc: 0.7433
Epoch 150/300
27/28 [===========================>..] - ETA: 0s - loss: 0.4512 - acc: 0.9138 Epoch 00149: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.4532 - acc: 0.9132 - val_loss: 0.9194 - val_acc: 0.7090
Epoch 151/300
27/28 [===========================>..] - ETA: 0s - loss: 0.5253 - acc: 0.9045 Epoch 00150: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.5198 - acc: 0.9057 - val_loss: 0.9471 - val_acc: 0.7128
Epoch 152/300
27/28 [===========================>..] - ETA: 0s - loss: 0.6348 - acc: 0.8782 Epoch 00151: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.6261 - acc: 0.8783 - val_loss: 1.6484 - val_acc: 0.5832
Epoch 153/300
27/28 [===========================>..] - ETA: 0s - loss: 0.6221 - acc: 0.8811 Epoch 00152: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.6326 - acc: 0.8817 - val_loss: 1.0757 - val_acc: 0.6391
Epoch 154/300
27/28 [===========================>..] - ETA: 0s - loss: 0.4007 - acc: 0.9181 Epoch 00153: val_loss improved from 0.61207 to 0.55799, saving model to ./resnet19ss_Hybrid_woNoF/checkpoint/weights.153-0.5580.hdf5
28/28 [==============================] - 23s - loss: 0.4019 - acc: 0.9166 - val_loss: 0.5580 - val_acc: 0.8030
Epoch 155/300
27/28 [===========================>..] - ETA: 0s - loss: 0.4703 - acc: 0.9149 Epoch 00154: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.4652 - acc: 0.9160 - val_loss: 0.7677 - val_acc: 0.7738
Epoch 156/300
27/28 [===========================>..] - ETA: 0s - loss: 0.4256 - acc: 0.9146 Epoch 00155: val_loss improved from 0.55799 to 0.44078, saving model to ./resnet19ss_Hybrid_woNoF/checkpoint/weights.155-0.4408.hdf5
28/28 [==============================] - 23s - loss: 0.4224 - acc: 0.9135 - val_loss: 0.4408 - val_acc: 0.8602
Epoch 157/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3207 - acc: 0.9358 Epoch 00156: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3155 - acc: 0.9372 - val_loss: 0.6387 - val_acc: 0.7954
Epoch 158/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3371 - acc: 0.9271 Epoch 00157: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3388 - acc: 0.9266 - val_loss: 0.6150 - val_acc: 0.7814
Epoch 159/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3803 - acc: 0.9268 Epoch 00158: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3764 - acc: 0.9272 - val_loss: 1.1024 - val_acc: 0.6252
Epoch 160/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3664 - acc: 0.9239 Epoch 00159: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3653 - acc: 0.9230 - val_loss: 0.5882 - val_acc: 0.8361
Epoch 161/300
27/28 [===========================>..] - ETA: 0s - loss: 0.4453 - acc: 0.9193 Epoch 00160: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.4469 - acc: 0.9188 - val_loss: 0.8662 - val_acc: 0.7408
Epoch 162/300
27/28 [===========================>..] - ETA: 0s - loss: 0.4423 - acc: 0.9155 Epoch 00161: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.4432 - acc: 0.9146 - val_loss: 2.2293 - val_acc: 0.4981
Epoch 163/300
27/28 [===========================>..] - ETA: 0s - loss: 0.4153 - acc: 0.9181 Epoch 00162: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.4090 - acc: 0.9188 - val_loss: 0.6983 - val_acc: 0.7751
Epoch 164/300
27/28 [===========================>..] - ETA: 0s - loss: 0.4888 - acc: 0.9086 Epoch 00163: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.4841 - acc: 0.9079 - val_loss: 0.8761 - val_acc: 0.7357
Epoch 165/300
27/28 [===========================>..] - ETA: 0s - loss: 0.4972 - acc: 0.9010 Epoch 00164: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.4985 - acc: 0.9021 - val_loss: 0.9170 - val_acc: 0.7065
Epoch 166/300
27/28 [===========================>..] - ETA: 0s - loss: 0.4790 - acc: 0.9094 Epoch 00165: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.4841 - acc: 0.9071 - val_loss: 0.6500 - val_acc: 0.7929
Epoch 167/300
27/28 [===========================>..] - ETA: 0s - loss: 0.5181 - acc: 0.9083 Epoch 00166: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.5175 - acc: 0.9079 - val_loss: 0.8835 - val_acc: 0.7535
Epoch 168/300
27/28 [===========================>..] - ETA: 0s - loss: 0.4416 - acc: 0.9094 Epoch 00167: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.4380 - acc: 0.9090 - val_loss: 0.8198 - val_acc: 0.7332
Epoch 169/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3668 - acc: 0.9306 Epoch 00168: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3708 - acc: 0.9294 - val_loss: 1.5179 - val_acc: 0.6239
Epoch 170/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3635 - acc: 0.9216 Epoch 00169: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3636 - acc: 0.9222 - val_loss: 1.1159 - val_acc: 0.6912
Epoch 171/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3776 - acc: 0.9227 Epoch 00170: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3898 - acc: 0.9235 - val_loss: 0.5161 - val_acc: 0.8437
Epoch 172/300
27/28 [===========================>..] - ETA: 0s - loss: 0.4381 - acc: 0.9158 Epoch 00171: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.4322 - acc: 0.9166 - val_loss: 1.1518 - val_acc: 0.6722
Epoch 173/300
27/28 [===========================>..] - ETA: 0s - loss: 0.4381 - acc: 0.9138 Epoch 00172: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.4348 - acc: 0.9143 - val_loss: 0.4741 - val_acc: 0.8501
Epoch 174/300
27/28 [===========================>..] - ETA: 0s - loss: 0.4453 - acc: 0.9201 Epoch 00173: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.4377 - acc: 0.9213 - val_loss: 1.3204 - val_acc: 0.6341
Epoch 175/300
27/28 [===========================>..] - ETA: 0s - loss: 0.4286 - acc: 0.9230 Epoch 00174: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.4266 - acc: 0.9222 - val_loss: 1.2013 - val_acc: 0.6684
Epoch 176/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3850 - acc: 0.9198 Epoch 00175: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3830 - acc: 0.9205 - val_loss: 0.8682 - val_acc: 0.7459
Epoch 177/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3260 - acc: 0.9424 Epoch 00176: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3229 - acc: 0.9428 - val_loss: 0.8657 - val_acc: 0.7713
Epoch 178/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3697 - acc: 0.9219 Epoch 00177: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3694 - acc: 0.9224 - val_loss: 0.7344 - val_acc: 0.7687
Epoch 179/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3314 - acc: 0.9265 Epoch 00178: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3359 - acc: 0.9272 - val_loss: 1.7811 - val_acc: 0.5997
Epoch 180/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3532 - acc: 0.9326 Epoch 00179: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3510 - acc: 0.9325 - val_loss: 1.1682 - val_acc: 0.7294
Epoch 181/300
27/28 [===========================>..] - ETA: 0s - loss: 0.4460 - acc: 0.9112 Epoch 00180: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.4396 - acc: 0.9102 - val_loss: 0.8757 - val_acc: 0.7421
Epoch 182/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2961 - acc: 0.9401 Epoch 00181: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2942 - acc: 0.9386 - val_loss: 0.8589 - val_acc: 0.7789
Epoch 183/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3171 - acc: 0.9413 Epoch 00182: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3179 - acc: 0.9403 - val_loss: 0.6305 - val_acc: 0.8348
Epoch 184/300
27/28 [===========================>..] - ETA: 0s - loss: 0.4257 - acc: 0.9210 Epoch 00183: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.4253 - acc: 0.9219 - val_loss: 1.5884 - val_acc: 0.6633
Epoch 185/300
27/28 [===========================>..] - ETA: 0s - loss: 0.4387 - acc: 0.9222 Epoch 00184: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.4351 - acc: 0.9219 - val_loss: 0.5337 - val_acc: 0.8259
Epoch 186/300
27/28 [===========================>..] - ETA: 0s - loss: 0.4609 - acc: 0.9178 Epoch 00185: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.4649 - acc: 0.9160 - val_loss: 1.8900 - val_acc: 0.5540
Epoch 187/300
27/28 [===========================>..] - ETA: 0s - loss: 0.5073 - acc: 0.8996 Epoch 00186: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.5010 - acc: 0.9015 - val_loss: 0.6478 - val_acc: 0.7967
Epoch 188/300
27/28 [===========================>..] - ETA: 0s - loss: 0.4320 - acc: 0.9132 Epoch 00187: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.4300 - acc: 0.9135 - val_loss: 0.5070 - val_acc: 0.8437
Epoch 189/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2958 - acc: 0.9410 Epoch 00188: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2947 - acc: 0.9411 - val_loss: 0.6037 - val_acc: 0.8437
Epoch 190/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2967 - acc: 0.9485 Epoch 00189: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2961 - acc: 0.9489 - val_loss: 0.6485 - val_acc: 0.8030
Epoch 191/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3034 - acc: 0.9372 Epoch 00190: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3087 - acc: 0.9367 - val_loss: 0.7308 - val_acc: 0.7738
Epoch 192/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3395 - acc: 0.9303 Epoch 00191: val_loss improved from 0.44078 to 0.43931, saving model to ./resnet19ss_Hybrid_woNoF/checkpoint/weights.191-0.4393.hdf5
28/28 [==============================] - 23s - loss: 0.3390 - acc: 0.9314 - val_loss: 0.4393 - val_acc: 0.8729
Epoch 193/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3621 - acc: 0.9294 Epoch 00192: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3588 - acc: 0.9302 - val_loss: 0.6313 - val_acc: 0.8043
Epoch 194/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3437 - acc: 0.9314 Epoch 00193: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3629 - acc: 0.9300 - val_loss: 1.9786 - val_acc: 0.7166
Epoch 195/300
27/28 [===========================>..] - ETA: 0s - loss: 0.4518 - acc: 0.9155 Epoch 00194: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.4506 - acc: 0.9149 - val_loss: 0.7588 - val_acc: 0.7675
Epoch 196/300
27/28 [===========================>..] - ETA: 0s - loss: 0.4568 - acc: 0.9175 Epoch 00195: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.4574 - acc: 0.9160 - val_loss: 1.3757 - val_acc: 0.6773
Epoch 197/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3211 - acc: 0.9326 Epoch 00196: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3205 - acc: 0.9308 - val_loss: 1.0387 - val_acc: 0.7217
Epoch 198/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2668 - acc: 0.9459 Epoch 00197: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2637 - acc: 0.9453 - val_loss: 0.6052 - val_acc: 0.8234
Epoch 199/300
27/28 [===========================>..] - ETA: 0s - loss: 0.4025 - acc: 0.9311 Epoch 00198: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3972 - acc: 0.9305 - val_loss: 0.7848 - val_acc: 0.7814
Epoch 200/300
27/28 [===========================>..] - ETA: 0s - loss: 0.5106 - acc: 0.9028 Epoch 00199: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.5054 - acc: 0.9035 - val_loss: 1.1938 - val_acc: 0.6989
Epoch 201/300
27/28 [===========================>..] - ETA: 0s - loss: 0.5293 - acc: 0.9216 Epoch 00200: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.5270 - acc: 0.9191 - val_loss: 0.7259 - val_acc: 0.7954
Epoch 202/300
27/28 [===========================>..] - ETA: 0s - loss: 0.6469 - acc: 0.8935 Epoch 00201: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.6495 - acc: 0.8931 - val_loss: 1.4235 - val_acc: 0.7471
Epoch 203/300
27/28 [===========================>..] - ETA: 0s - loss: 0.7173 - acc: 0.8834 Epoch 00202: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.7222 - acc: 0.8820 - val_loss: 1.7609 - val_acc: 0.6290
Epoch 204/300
27/28 [===========================>..] - ETA: 0s - loss: 0.8325 - acc: 0.8698 Epoch 00203: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.8264 - acc: 0.8714 - val_loss: 1.9257 - val_acc: 0.6036
Epoch 205/300
27/28 [===========================>..] - ETA: 0s - loss: 0.5927 - acc: 0.8941 Epoch 00204: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.5889 - acc: 0.8929 - val_loss: 0.9834 - val_acc: 0.7598
Epoch 206/300
27/28 [===========================>..] - ETA: 0s - loss: 0.4644 - acc: 0.9057 Epoch 00205: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.4605 - acc: 0.9051 - val_loss: 0.7978 - val_acc: 0.7421
Epoch 207/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3700 - acc: 0.9300 Epoch 00206: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3718 - acc: 0.9302 - val_loss: 0.6876 - val_acc: 0.8081
Epoch 208/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2711 - acc: 0.9430 Epoch 00207: val_loss improved from 0.43931 to 0.37999, saving model to ./resnet19ss_Hybrid_woNoF/checkpoint/weights.207-0.3800.hdf5
28/28 [==============================] - 23s - loss: 0.2673 - acc: 0.9434 - val_loss: 0.3800 - val_acc: 0.8882
Epoch 209/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3416 - acc: 0.9407 Epoch 00208: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3406 - acc: 0.9397 - val_loss: 0.7661 - val_acc: 0.7891
Epoch 210/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3503 - acc: 0.9288 Epoch 00209: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3497 - acc: 0.9286 - val_loss: 0.8799 - val_acc: 0.7624
Epoch 211/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2868 - acc: 0.9424 Epoch 00210: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2845 - acc: 0.9420 - val_loss: 0.7046 - val_acc: 0.7649
Epoch 212/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2976 - acc: 0.9433 Epoch 00211: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2990 - acc: 0.9425 - val_loss: 1.1582 - val_acc: 0.6861
Epoch 213/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2989 - acc: 0.9407 Epoch 00212: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3000 - acc: 0.9400 - val_loss: 0.5855 - val_acc: 0.8132
Epoch 214/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2524 - acc: 0.9502 Epoch 00213: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2519 - acc: 0.9495 - val_loss: 0.4489 - val_acc: 0.8463
Epoch 215/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2018 - acc: 0.9589 Epoch 00214: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2063 - acc: 0.9590 - val_loss: 0.5509 - val_acc: 0.8310
Epoch 216/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2575 - acc: 0.9482 Epoch 00215: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2531 - acc: 0.9481 - val_loss: 0.5841 - val_acc: 0.8259
Epoch 217/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2511 - acc: 0.9468 Epoch 00216: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2494 - acc: 0.9475 - val_loss: 0.7290 - val_acc: 0.8094
Epoch 218/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2199 - acc: 0.9580 Epoch 00217: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2200 - acc: 0.9565 - val_loss: 0.6837 - val_acc: 0.8183
Epoch 219/300
27/28 [===========================>..] - ETA: 0s - loss: 0.1973 - acc: 0.9606 Epoch 00218: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.1988 - acc: 0.9615 - val_loss: 0.4211 - val_acc: 0.8615
Epoch 220/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2436 - acc: 0.9494 Epoch 00219: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2396 - acc: 0.9498 - val_loss: 0.7326 - val_acc: 0.7840
Epoch 221/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2685 - acc: 0.9514 Epoch 00220: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2840 - acc: 0.9506 - val_loss: 2.3757 - val_acc: 0.5705
Epoch 222/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2722 - acc: 0.9444 Epoch 00221: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2717 - acc: 0.9428 - val_loss: 0.6447 - val_acc: 0.8234
Epoch 223/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2820 - acc: 0.9442 Epoch 00222: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2799 - acc: 0.9442 - val_loss: 0.6612 - val_acc: 0.8145
Epoch 224/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2943 - acc: 0.9514 Epoch 00223: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3013 - acc: 0.9503 - val_loss: 2.9995 - val_acc: 0.5146
Epoch 225/300
27/28 [===========================>..] - ETA: 0s - loss: 0.4002 - acc: 0.9282 Epoch 00224: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3970 - acc: 0.9286 - val_loss: 1.6033 - val_acc: 0.6645
Epoch 226/300
27/28 [===========================>..] - ETA: 0s - loss: 0.4005 - acc: 0.9161 Epoch 00225: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3990 - acc: 0.9166 - val_loss: 0.7040 - val_acc: 0.8221
Epoch 227/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3248 - acc: 0.9430 Epoch 00226: val_loss improved from 0.37999 to 0.37676, saving model to ./resnet19ss_Hybrid_woNoF/checkpoint/weights.226-0.3768.hdf5
28/28 [==============================] - 23s - loss: 0.3469 - acc: 0.9406 - val_loss: 0.3768 - val_acc: 0.8945
Epoch 228/300
27/28 [===========================>..] - ETA: 0s - loss: 0.4270 - acc: 0.9314 Epoch 00227: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.4260 - acc: 0.9314 - val_loss: 1.0267 - val_acc: 0.7052
Epoch 229/300
27/28 [===========================>..] - ETA: 0s - loss: 0.4061 - acc: 0.9277 Epoch 00228: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.4031 - acc: 0.9280 - val_loss: 0.7497 - val_acc: 0.7967
Epoch 230/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2715 - acc: 0.9534 Epoch 00229: val_loss improved from 0.37676 to 0.35703, saving model to ./resnet19ss_Hybrid_woNoF/checkpoint/weights.229-0.3570.hdf5
28/28 [==============================] - 23s - loss: 0.2743 - acc: 0.9528 - val_loss: 0.3570 - val_acc: 0.8907
Epoch 231/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2473 - acc: 0.9525 Epoch 00230: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2457 - acc: 0.9528 - val_loss: 0.6644 - val_acc: 0.8094
Epoch 232/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2705 - acc: 0.9485 Epoch 00231: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2649 - acc: 0.9501 - val_loss: 0.5071 - val_acc: 0.8412
Epoch 233/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2019 - acc: 0.9583 Epoch 00232: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2031 - acc: 0.9584 - val_loss: 0.4220 - val_acc: 0.8729
Epoch 234/300
27/28 [===========================>..] - ETA: 0s - loss: 0.1989 - acc: 0.9601 Epoch 00233: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.1975 - acc: 0.9601 - val_loss: 0.5268 - val_acc: 0.8450
Epoch 235/300
27/28 [===========================>..] - ETA: 0s - loss: 0.1579 - acc: 0.9682 Epoch 00234: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.1537 - acc: 0.9690 - val_loss: 0.5550 - val_acc: 0.8310
Epoch 236/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2107 - acc: 0.9572 Epoch 00235: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2065 - acc: 0.9581 - val_loss: 0.8599 - val_acc: 0.7573
Epoch 237/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2565 - acc: 0.9525 Epoch 00236: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2542 - acc: 0.9531 - val_loss: 0.8001 - val_acc: 0.7853
Epoch 238/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2770 - acc: 0.9491 Epoch 00237: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2702 - acc: 0.9509 - val_loss: 0.9636 - val_acc: 0.7268
Epoch 239/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2570 - acc: 0.9418 Epoch 00238: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2581 - acc: 0.9420 - val_loss: 0.6464 - val_acc: 0.8399
Epoch 240/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2188 - acc: 0.9583 Epoch 00239: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2193 - acc: 0.9573 - val_loss: 0.7071 - val_acc: 0.8119
Epoch 241/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2315 - acc: 0.9601 Epoch 00240: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2481 - acc: 0.9581 - val_loss: 0.4701 - val_acc: 0.8704
Epoch 242/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3532 - acc: 0.9340 Epoch 00241: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3507 - acc: 0.9344 - val_loss: 2.9328 - val_acc: 0.4930
Epoch 243/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3483 - acc: 0.9306 Epoch 00242: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3558 - acc: 0.9305 - val_loss: 0.6559 - val_acc: 0.8183
Epoch 244/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3455 - acc: 0.9340 Epoch 00243: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3416 - acc: 0.9347 - val_loss: 1.6057 - val_acc: 0.6429
Epoch 245/300
27/28 [===========================>..] - ETA: 0s - loss: 0.4190 - acc: 0.9271 Epoch 00244: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.4200 - acc: 0.9258 - val_loss: 1.3377 - val_acc: 0.6658
Epoch 246/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3676 - acc: 0.9280 Epoch 00245: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3689 - acc: 0.9269 - val_loss: 2.1435 - val_acc: 0.5591
Epoch 247/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2864 - acc: 0.9421 Epoch 00246: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2889 - acc: 0.9425 - val_loss: 0.6362 - val_acc: 0.8107
Epoch 248/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2717 - acc: 0.9450 Epoch 00247: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2692 - acc: 0.9453 - val_loss: 0.3782 - val_acc: 0.8856
Epoch 249/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2739 - acc: 0.9505 Epoch 00248: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2700 - acc: 0.9515 - val_loss: 0.4620 - val_acc: 0.8767
Epoch 250/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2029 - acc: 0.9624 Epoch 00249: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.1995 - acc: 0.9632 - val_loss: 0.4549 - val_acc: 0.8653
Epoch 251/300
27/28 [===========================>..] - ETA: 0s - loss: 0.1835 - acc: 0.9630 Epoch 00250: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.1845 - acc: 0.9623 - val_loss: 0.4395 - val_acc: 0.8844
Epoch 252/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2249 - acc: 0.9554 Epoch 00251: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2217 - acc: 0.9562 - val_loss: 0.6223 - val_acc: 0.8234
Epoch 253/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2228 - acc: 0.9523 Epoch 00252: val_loss improved from 0.35703 to 0.34743, saving model to ./resnet19ss_Hybrid_woNoF/checkpoint/weights.252-0.3474.hdf5
28/28 [==============================] - 23s - loss: 0.2188 - acc: 0.9537 - val_loss: 0.3474 - val_acc: 0.8958
Epoch 254/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2374 - acc: 0.9505 Epoch 00253: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2354 - acc: 0.9512 - val_loss: 0.7312 - val_acc: 0.7764
Epoch 255/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3041 - acc: 0.9436 Epoch 00254: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3033 - acc: 0.9425 - val_loss: 0.7415 - val_acc: 0.8043
Epoch 256/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3065 - acc: 0.9424 Epoch 00255: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2992 - acc: 0.9436 - val_loss: 0.4995 - val_acc: 0.8564
Epoch 257/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2252 - acc: 0.9528 Epoch 00256: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2210 - acc: 0.9531 - val_loss: 0.8089 - val_acc: 0.7814
Epoch 258/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2019 - acc: 0.9656 Epoch 00257: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2014 - acc: 0.9643 - val_loss: 0.6333 - val_acc: 0.8450
Epoch 259/300
27/28 [===========================>..] - ETA: 0s - loss: 0.1895 - acc: 0.9592 Epoch 00258: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.1913 - acc: 0.9590 - val_loss: 0.5010 - val_acc: 0.8869
Epoch 260/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2408 - acc: 0.9580 Epoch 00259: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2416 - acc: 0.9581 - val_loss: 0.5553 - val_acc: 0.8412
Epoch 261/300
27/28 [===========================>..] - ETA: 0s - loss: 0.1859 - acc: 0.9601 Epoch 00260: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.1867 - acc: 0.9601 - val_loss: 0.6806 - val_acc: 0.8069
Epoch 262/300
27/28 [===========================>..] - ETA: 0s - loss: 0.1872 - acc: 0.9676 Epoch 00261: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.1833 - acc: 0.9676 - val_loss: 1.7439 - val_acc: 0.6557
Epoch 263/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3210 - acc: 0.9398 Epoch 00262: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3152 - acc: 0.9403 - val_loss: 4.9993 - val_acc: 0.3850
Epoch 264/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3809 - acc: 0.9288 Epoch 00263: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3795 - acc: 0.9289 - val_loss: 1.8957 - val_acc: 0.6099
Epoch 265/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3955 - acc: 0.9314 Epoch 00264: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.4042 - acc: 0.9314 - val_loss: 4.6546 - val_acc: 0.3443
Epoch 266/300
27/28 [===========================>..] - ETA: 0s - loss: 0.7314 - acc: 0.8776 Epoch 00265: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.7337 - acc: 0.8764 - val_loss: 1.4820 - val_acc: 0.6506
Epoch 267/300
27/28 [===========================>..] - ETA: 0s - loss: 0.5272 - acc: 0.9025 Epoch 00266: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.5383 - acc: 0.9037 - val_loss: 1.4502 - val_acc: 0.6366
Epoch 268/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3704 - acc: 0.9225 Epoch 00267: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3625 - acc: 0.9241 - val_loss: 0.7115 - val_acc: 0.7980
Epoch 269/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2627 - acc: 0.9479 Epoch 00268: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2595 - acc: 0.9481 - val_loss: 1.2019 - val_acc: 0.7344
Epoch 270/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2560 - acc: 0.9497 Epoch 00269: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2574 - acc: 0.9495 - val_loss: 1.1952 - val_acc: 0.6912
Epoch 271/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3289 - acc: 0.9436 Epoch 00270: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3294 - acc: 0.9450 - val_loss: 1.2810 - val_acc: 0.7255
Epoch 272/300
27/28 [===========================>..] - ETA: 0s - loss: 0.4946 - acc: 0.9083 Epoch 00271: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.5042 - acc: 0.9090 - val_loss: 0.9876 - val_acc: 0.7637
Epoch 273/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3147 - acc: 0.9366 Epoch 00272: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3103 - acc: 0.9369 - val_loss: 0.5899 - val_acc: 0.8475
Epoch 274/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3105 - acc: 0.9485 Epoch 00273: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3236 - acc: 0.9473 - val_loss: 1.3367 - val_acc: 0.6633
Epoch 275/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2716 - acc: 0.9427 Epoch 00274: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2736 - acc: 0.9428 - val_loss: 1.0207 - val_acc: 0.7370
Epoch 276/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2143 - acc: 0.9601 Epoch 00275: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2162 - acc: 0.9581 - val_loss: 0.6835 - val_acc: 0.7751
Epoch 277/300
27/28 [===========================>..] - ETA: 0s - loss: 0.1964 - acc: 0.9612 Epoch 00276: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.1946 - acc: 0.9615 - val_loss: 0.8564 - val_acc: 0.7713
Epoch 278/300
27/28 [===========================>..] - ETA: 0s - loss: 0.1409 - acc: 0.9716 Epoch 00277: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.1384 - acc: 0.9721 - val_loss: 0.5213 - val_acc: 0.8374
Epoch 279/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2571 - acc: 0.9656 Epoch 00278: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2596 - acc: 0.9643 - val_loss: 2.9338 - val_acc: 0.4193
Epoch 280/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2753 - acc: 0.9505 Epoch 00279: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2723 - acc: 0.9515 - val_loss: 1.5049 - val_acc: 0.6671
Epoch 281/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2438 - acc: 0.9459 Epoch 00280: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2379 - acc: 0.9473 - val_loss: 0.4287 - val_acc: 0.8780
Epoch 282/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2286 - acc: 0.9569 Epoch 00281: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2357 - acc: 0.9579 - val_loss: 0.9931 - val_acc: 0.7446
Epoch 283/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2529 - acc: 0.9543 Epoch 00282: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2505 - acc: 0.9534 - val_loss: 0.5872 - val_acc: 0.8361
Epoch 284/300
27/28 [===========================>..] - ETA: 0s - loss: 0.1608 - acc: 0.9667 Epoch 00283: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.1592 - acc: 0.9671 - val_loss: 0.5000 - val_acc: 0.8729
Epoch 285/300
27/28 [===========================>..] - ETA: 0s - loss: 0.1809 - acc: 0.9679 Epoch 00284: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.1796 - acc: 0.9690 - val_loss: 0.5476 - val_acc: 0.8539
Epoch 286/300
27/28 [===========================>..] - ETA: 0s - loss: 0.1201 - acc: 0.9786 Epoch 00285: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.1209 - acc: 0.9782 - val_loss: 0.4299 - val_acc: 0.8691
Epoch 287/300
27/28 [===========================>..] - ETA: 0s - loss: 0.1533 - acc: 0.9705 Epoch 00286: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.1513 - acc: 0.9707 - val_loss: 0.6901 - val_acc: 0.8437
Epoch 288/300
27/28 [===========================>..] - ETA: 0s - loss: 0.1989 - acc: 0.9699 Epoch 00287: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.1984 - acc: 0.9693 - val_loss: 0.5435 - val_acc: 0.8551
Epoch 289/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2100 - acc: 0.9580 Epoch 00288: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2069 - acc: 0.9587 - val_loss: 0.6084 - val_acc: 0.8208
Epoch 290/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2286 - acc: 0.9517 Epoch 00289: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2251 - acc: 0.9531 - val_loss: 0.4760 - val_acc: 0.8640
Epoch 291/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2066 - acc: 0.9609 Epoch 00290: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2078 - acc: 0.9615 - val_loss: 0.5456 - val_acc: 0.8424
Epoch 292/300
27/28 [===========================>..] - ETA: 0s - loss: 0.1810 - acc: 0.9615 Epoch 00291: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.1799 - acc: 0.9618 - val_loss: 1.3233 - val_acc: 0.7128
Epoch 293/300
27/28 [===========================>..] - ETA: 0s - loss: 0.3188 - acc: 0.9398 Epoch 00292: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.3175 - acc: 0.9411 - val_loss: 0.3611 - val_acc: 0.8844
Epoch 294/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2233 - acc: 0.9572 Epoch 00293: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2214 - acc: 0.9584 - val_loss: 0.4652 - val_acc: 0.8628
Epoch 295/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2253 - acc: 0.9667 Epoch 00294: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2329 - acc: 0.9651 - val_loss: 2.2157 - val_acc: 0.5743
Epoch 296/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2630 - acc: 0.9444 Epoch 00295: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2681 - acc: 0.9425 - val_loss: 1.4558 - val_acc: 0.6874
Epoch 297/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2652 - acc: 0.9499 Epoch 00296: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2665 - acc: 0.9492 - val_loss: 1.4133 - val_acc: 0.6773
Epoch 298/300
27/28 [===========================>..] - ETA: 0s - loss: 0.1940 - acc: 0.9566 Epoch 00297: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.1901 - acc: 0.9573 - val_loss: 0.7431 - val_acc: 0.8056
Epoch 299/300
27/28 [===========================>..] - ETA: 0s - loss: 0.1671 - acc: 0.9653 Epoch 00298: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.1678 - acc: 0.9657 - val_loss: 0.3626 - val_acc: 0.9022
Epoch 300/300
27/28 [===========================>..] - ETA: 0s - loss: 0.2314 - acc: 0.9563 Epoch 00299: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.2311 - acc: 0.9562 - val_loss: 1.1030 - val_acc: 0.7510
Out[15]:
<keras.callbacks.History at 0x7fb9b308de90>

In [17]:
#resume training

model, model_name = get_best_model()
# print('Loading model from weights.004-0.0565.hdf5')
# model = load_model(CHECKPOINT_DIR + 'weights.011-1.7062.hdf5')

optimizer = Adam(lr=1e-4)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])

model.fit_generator(train_generator, steps_per_epoch=steps_per_epoch, epochs=600, verbose=1, 
                    callbacks=[early_stopping, model_checkpoint, learningrate_schedule, tensorboard], 
                    validation_data=(X_valid,y_valid), class_weight=class_weight, 
                    workers=3, pickle_safe=True, initial_epoch=253)


Loading model from checkpoint file ./resnet19ss_Hybrid_woNoF/checkpoint/weights.252-0.3474.hdf5
Loading model Done!
Epoch 254/600
27/28 [===========================>..] - ETA: 0s - loss: 0.1512 - acc: 0.9685  Epoch 00253: val_loss improved from inf to 0.27611, saving model to ./resnet19ss_Hybrid_woNoF/checkpoint/weights.253-0.2761.hdf5
28/28 [==============================] - 39s - loss: 0.1480 - acc: 0.9688 - val_loss: 0.2761 - val_acc: 0.9199
Epoch 255/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0983 - acc: 0.9803 Epoch 00254: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0990 - acc: 0.9802 - val_loss: 0.2859 - val_acc: 0.9174
Epoch 256/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0962 - acc: 0.9812 Epoch 00255: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0950 - acc: 0.9816 - val_loss: 0.3126 - val_acc: 0.9136
Epoch 257/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0835 - acc: 0.9861 Epoch 00256: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0876 - acc: 0.9855 - val_loss: 0.3215 - val_acc: 0.9111
Epoch 258/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0954 - acc: 0.9829 Epoch 00257: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0962 - acc: 0.9824 - val_loss: 0.2832 - val_acc: 0.9199
Epoch 259/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0828 - acc: 0.9870 Epoch 00258: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0825 - acc: 0.9866 - val_loss: 0.2838 - val_acc: 0.9174
Epoch 260/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0946 - acc: 0.9841 Epoch 00259: val_loss improved from 0.27611 to 0.26751, saving model to ./resnet19ss_Hybrid_woNoF/checkpoint/weights.259-0.2675.hdf5
28/28 [==============================] - 23s - loss: 0.0943 - acc: 0.9841 - val_loss: 0.2675 - val_acc: 0.9199
Epoch 261/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0771 - acc: 0.9867 Epoch 00260: val_loss improved from 0.26751 to 0.25111, saving model to ./resnet19ss_Hybrid_woNoF/checkpoint/weights.260-0.2511.hdf5
28/28 [==============================] - 23s - loss: 0.0781 - acc: 0.9860 - val_loss: 0.2511 - val_acc: 0.9212
Epoch 262/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0993 - acc: 0.9864 Epoch 00261: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0977 - acc: 0.9869 - val_loss: 0.3160 - val_acc: 0.9072
Epoch 263/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0906 - acc: 0.9818 Epoch 00262: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0884 - acc: 0.9824 - val_loss: 0.2802 - val_acc: 0.9187
Epoch 264/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0920 - acc: 0.9809 Epoch 00263: val_loss improved from 0.25111 to 0.24231, saving model to ./resnet19ss_Hybrid_woNoF/checkpoint/weights.263-0.2423.hdf5
28/28 [==============================] - 23s - loss: 0.0915 - acc: 0.9807 - val_loss: 0.2423 - val_acc: 0.9352
Epoch 265/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0779 - acc: 0.9844 Epoch 00264: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0771 - acc: 0.9844 - val_loss: 0.2606 - val_acc: 0.9339
Epoch 266/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0873 - acc: 0.9864 Epoch 00265: val_loss improved from 0.24231 to 0.22767, saving model to ./resnet19ss_Hybrid_woNoF/checkpoint/weights.265-0.2277.hdf5
28/28 [==============================] - 23s - loss: 0.0857 - acc: 0.9866 - val_loss: 0.2277 - val_acc: 0.9416
Epoch 267/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0855 - acc: 0.9850 Epoch 00266: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0867 - acc: 0.9847 - val_loss: 0.2505 - val_acc: 0.9339
Epoch 268/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0758 - acc: 0.9858 Epoch 00267: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0834 - acc: 0.9849 - val_loss: 0.2467 - val_acc: 0.9377
Epoch 269/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0743 - acc: 0.9847 Epoch 00268: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0756 - acc: 0.9844 - val_loss: 0.2480 - val_acc: 0.9377
Epoch 270/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0696 - acc: 0.9873 Epoch 00269: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0712 - acc: 0.9869 - val_loss: 0.2480 - val_acc: 0.9327
Epoch 271/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0820 - acc: 0.9829 Epoch 00270: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0808 - acc: 0.9830 - val_loss: 0.2454 - val_acc: 0.9339
Epoch 272/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0632 - acc: 0.9899 Epoch 00271: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0638 - acc: 0.9900 - val_loss: 0.2600 - val_acc: 0.9263
Epoch 273/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0675 - acc: 0.9881 Epoch 00272: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0673 - acc: 0.9880 - val_loss: 0.2393 - val_acc: 0.9327
Epoch 274/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0874 - acc: 0.9852 Epoch 00273: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0881 - acc: 0.9852 - val_loss: 0.2774 - val_acc: 0.9339
Epoch 275/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0714 - acc: 0.9864 Epoch 00274: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0699 - acc: 0.9869 - val_loss: 0.3007 - val_acc: 0.9276
Epoch 276/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0585 - acc: 0.9887 Epoch 00275: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0587 - acc: 0.9883 - val_loss: 0.2773 - val_acc: 0.9352
Epoch 277/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0690 - acc: 0.9884 Epoch 00276: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0700 - acc: 0.9883 - val_loss: 0.2640 - val_acc: 0.9390
Epoch 278/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0814 - acc: 0.9832 Epoch 00277: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0819 - acc: 0.9833 - val_loss: 0.2616 - val_acc: 0.9377
Epoch 279/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0776 - acc: 0.9873 Epoch 00278: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0772 - acc: 0.9872 - val_loss: 0.2483 - val_acc: 0.9288
Epoch 280/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0732 - acc: 0.9852 Epoch 00279: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0751 - acc: 0.9849 - val_loss: 0.2461 - val_acc: 0.9301
Epoch 281/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0670 - acc: 0.9902 Epoch 00280: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0662 - acc: 0.9900 - val_loss: 0.2560 - val_acc: 0.9314
Epoch 282/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0947 - acc: 0.9850 Epoch 00281: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0957 - acc: 0.9847 - val_loss: 0.2747 - val_acc: 0.9238
Epoch 283/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0689 - acc: 0.9852 Epoch 00282: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0682 - acc: 0.9852 - val_loss: 0.2557 - val_acc: 0.9339
Epoch 284/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0587 - acc: 0.9893 Epoch 00283: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0647 - acc: 0.9880 - val_loss: 0.2409 - val_acc: 0.9377
Epoch 285/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0727 - acc: 0.9864 Epoch 00284: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0718 - acc: 0.9866 - val_loss: 0.2812 - val_acc: 0.9276
Epoch 286/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0621 - acc: 0.9902 Epoch 00285: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0614 - acc: 0.9902 - val_loss: 0.2497 - val_acc: 0.9377
Epoch 287/600
27/28 [===========================>..] - ETA: 0s - loss: 0.1054 - acc: 0.9835 Epoch 00286: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.1027 - acc: 0.9838 - val_loss: 0.2513 - val_acc: 0.9403
Epoch 288/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0703 - acc: 0.9867 Epoch 00287: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0689 - acc: 0.9869 - val_loss: 0.2597 - val_acc: 0.9352
Epoch 289/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0715 - acc: 0.9867 Epoch 00288: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0753 - acc: 0.9858 - val_loss: 0.2681 - val_acc: 0.9263
Epoch 290/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0942 - acc: 0.9861 Epoch 00289: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0933 - acc: 0.9863 - val_loss: 0.2801 - val_acc: 0.9327
Epoch 291/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0766 - acc: 0.9821 Epoch 00290: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0751 - acc: 0.9827 - val_loss: 0.2728 - val_acc: 0.9314
Epoch 292/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0771 - acc: 0.9852 Epoch 00291: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0761 - acc: 0.9858 - val_loss: 0.2626 - val_acc: 0.9250
Epoch 293/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0638 - acc: 0.9905 Epoch 00292: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0628 - acc: 0.9908 - val_loss: 0.2671 - val_acc: 0.9327
Epoch 294/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0560 - acc: 0.9913 Epoch 00293: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0557 - acc: 0.9914 - val_loss: 0.2521 - val_acc: 0.9390
Epoch 295/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0582 - acc: 0.9870 Epoch 00294: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0586 - acc: 0.9866 - val_loss: 0.2421 - val_acc: 0.9441
Epoch 296/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0655 - acc: 0.9878 Epoch 00295: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0654 - acc: 0.9877 - val_loss: 0.2528 - val_acc: 0.9403
Epoch 297/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0553 - acc: 0.9893 Epoch 00296: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0552 - acc: 0.9897 - val_loss: 0.2767 - val_acc: 0.9339
Epoch 298/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0553 - acc: 0.9905 Epoch 00297: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0572 - acc: 0.9897 - val_loss: 0.2393 - val_acc: 0.9479
Epoch 299/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0548 - acc: 0.9905 Epoch 00298: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0540 - acc: 0.9908 - val_loss: 0.2469 - val_acc: 0.9428
Epoch 300/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0608 - acc: 0.9907 Epoch 00299: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0604 - acc: 0.9908 - val_loss: 0.2477 - val_acc: 0.9416
Epoch 301/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0485 - acc: 0.9896 Epoch 00300: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0499 - acc: 0.9891 - val_loss: 0.2330 - val_acc: 0.9403
Epoch 302/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0768 - acc: 0.9876 Epoch 00301: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0761 - acc: 0.9877 - val_loss: 0.2725 - val_acc: 0.9288
Epoch 303/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0701 - acc: 0.9881 Epoch 00302: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0724 - acc: 0.9874 - val_loss: 0.2622 - val_acc: 0.9352
Epoch 304/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0771 - acc: 0.9864 Epoch 00303: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0764 - acc: 0.9866 - val_loss: 0.2481 - val_acc: 0.9390
Epoch 305/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0700 - acc: 0.9835 Epoch 00304: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0688 - acc: 0.9841 - val_loss: 0.2491 - val_acc: 0.9352
Epoch 306/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0593 - acc: 0.9905 Epoch 00305: val_loss did not improve
28/28 [==============================] - 23s - loss: 0.0592 - acc: 0.9900 - val_loss: 0.2468 - val_acc: 0.9428
Epoch 307/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0629 - acc: 0.9878 Epoch 00306: val_loss did not improve

Epoch 00306: reducing learning rate to 9.99999974738e-06.
28/28 [==============================] - 25s - loss: 0.0637 - acc: 0.9872 - val_loss: 0.2723 - val_acc: 0.9314
Epoch 308/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0682 - acc: 0.9876 Epoch 00307: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0674 - acc: 0.9877 - val_loss: 0.2601 - val_acc: 0.9327
Epoch 309/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0465 - acc: 0.9916 Epoch 00308: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0465 - acc: 0.9916 - val_loss: 0.2554 - val_acc: 0.9365
Epoch 310/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0504 - acc: 0.9905 Epoch 00309: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0502 - acc: 0.9908 - val_loss: 0.2491 - val_acc: 0.9352
Epoch 311/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0453 - acc: 0.9925 Epoch 00310: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0445 - acc: 0.9927 - val_loss: 0.2492 - val_acc: 0.9365
Epoch 312/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0509 - acc: 0.9922 Epoch 00311: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0500 - acc: 0.9925 - val_loss: 0.2460 - val_acc: 0.9416
Epoch 313/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0500 - acc: 0.9893 Epoch 00312: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0487 - acc: 0.9897 - val_loss: 0.2457 - val_acc: 0.9416
Epoch 314/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0612 - acc: 0.9919 Epoch 00313: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0613 - acc: 0.9919 - val_loss: 0.2405 - val_acc: 0.9365
Epoch 315/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0686 - acc: 0.9890 Epoch 00314: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0698 - acc: 0.9877 - val_loss: 0.2398 - val_acc: 0.9352
Epoch 316/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0498 - acc: 0.9905 Epoch 00315: val_loss did not improve
28/28 [==============================] - 20s - loss: 0.0491 - acc: 0.9908 - val_loss: 0.2393 - val_acc: 0.9352
Epoch 317/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0434 - acc: 0.9913 Epoch 00316: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0430 - acc: 0.9914 - val_loss: 0.2387 - val_acc: 0.9352
Epoch 318/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0524 - acc: 0.9931 Epoch 00317: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0511 - acc: 0.9933 - val_loss: 0.2389 - val_acc: 0.9365
Epoch 319/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0598 - acc: 0.9922 Epoch 00318: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0600 - acc: 0.9919 - val_loss: 0.2395 - val_acc: 0.9403
Epoch 320/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0396 - acc: 0.9936 Epoch 00319: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0391 - acc: 0.9939 - val_loss: 0.2402 - val_acc: 0.9365
Epoch 321/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0486 - acc: 0.9916 Epoch 00320: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0494 - acc: 0.9916 - val_loss: 0.2398 - val_acc: 0.9365
Epoch 322/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0454 - acc: 0.9933 Epoch 00321: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0459 - acc: 0.9930 - val_loss: 0.2394 - val_acc: 0.9390
Epoch 323/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0501 - acc: 0.9902 Epoch 00322: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0488 - acc: 0.9905 - val_loss: 0.2377 - val_acc: 0.9339
Epoch 324/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0553 - acc: 0.9933 Epoch 00323: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0546 - acc: 0.9936 - val_loss: 0.2396 - val_acc: 0.9352
Epoch 325/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0733 - acc: 0.9884 Epoch 00324: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0723 - acc: 0.9888 - val_loss: 0.2387 - val_acc: 0.9352
Epoch 326/600
27/28 [===========================>..] - ETA: 0s - loss: 0.1108 - acc: 0.9870 Epoch 00325: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.1090 - acc: 0.9869 - val_loss: 0.2395 - val_acc: 0.9390
Epoch 327/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0597 - acc: 0.9893 Epoch 00326: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0681 - acc: 0.9883 - val_loss: 0.2405 - val_acc: 0.9377
Epoch 328/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0631 - acc: 0.9910 Epoch 00327: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0626 - acc: 0.9914 - val_loss: 0.2393 - val_acc: 0.9352
Epoch 329/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0473 - acc: 0.9939 Epoch 00328: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0466 - acc: 0.9939 - val_loss: 0.2404 - val_acc: 0.9390
Epoch 330/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0520 - acc: 0.9902 Epoch 00329: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0516 - acc: 0.9900 - val_loss: 0.2413 - val_acc: 0.9390
Epoch 331/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0448 - acc: 0.9931 Epoch 00330: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0440 - acc: 0.9933 - val_loss: 0.2395 - val_acc: 0.9377
Epoch 332/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0396 - acc: 0.9936 Epoch 00331: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0395 - acc: 0.9933 - val_loss: 0.2385 - val_acc: 0.9390
Epoch 333/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0493 - acc: 0.9910 Epoch 00332: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0492 - acc: 0.9911 - val_loss: 0.2393 - val_acc: 0.9390
Epoch 334/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0490 - acc: 0.9922 Epoch 00333: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0482 - acc: 0.9922 - val_loss: 0.2422 - val_acc: 0.9377
Epoch 335/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0422 - acc: 0.9910 Epoch 00334: val_loss did not improve
28/28 [==============================] - 20s - loss: 0.0414 - acc: 0.9911 - val_loss: 0.2426 - val_acc: 0.9352
Epoch 336/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0517 - acc: 0.9933 Epoch 00335: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0520 - acc: 0.9936 - val_loss: 0.2421 - val_acc: 0.9365
Epoch 337/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0628 - acc: 0.9916 Epoch 00336: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0631 - acc: 0.9908 - val_loss: 0.2411 - val_acc: 0.9365
Epoch 338/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0548 - acc: 0.9893 Epoch 00337: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0534 - acc: 0.9897 - val_loss: 0.2420 - val_acc: 0.9390
Epoch 339/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0463 - acc: 0.9948 Epoch 00338: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0454 - acc: 0.9950 - val_loss: 0.2447 - val_acc: 0.9365
Epoch 340/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0562 - acc: 0.9884 Epoch 00339: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0567 - acc: 0.9883 - val_loss: 0.2435 - val_acc: 0.9390
Epoch 341/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0439 - acc: 0.9919 Epoch 00340: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0433 - acc: 0.9922 - val_loss: 0.2458 - val_acc: 0.9365
Epoch 342/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0544 - acc: 0.9916 Epoch 00341: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0553 - acc: 0.9908 - val_loss: 0.2436 - val_acc: 0.9390
Epoch 343/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0434 - acc: 0.9922 Epoch 00342: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0454 - acc: 0.9916 - val_loss: 0.2439 - val_acc: 0.9390
Epoch 344/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0470 - acc: 0.9928 Epoch 00343: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0466 - acc: 0.9927 - val_loss: 0.2433 - val_acc: 0.9403
Epoch 345/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0603 - acc: 0.9939 Epoch 00344: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0589 - acc: 0.9941 - val_loss: 0.2441 - val_acc: 0.9377
Epoch 346/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0418 - acc: 0.9942 Epoch 00345: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0419 - acc: 0.9941 - val_loss: 0.2455 - val_acc: 0.9390
Epoch 347/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0504 - acc: 0.9916 Epoch 00346: val_loss did not improve

Epoch 00346: reducing learning rate to 9.99999974738e-07.
28/28 [==============================] - 21s - loss: 0.0494 - acc: 0.9919 - val_loss: 0.2461 - val_acc: 0.9416
Epoch 348/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0472 - acc: 0.9936 Epoch 00347: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0505 - acc: 0.9933 - val_loss: 0.2453 - val_acc: 0.9416
Epoch 349/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0381 - acc: 0.9939 Epoch 00348: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0430 - acc: 0.9930 - val_loss: 0.2447 - val_acc: 0.9416
Epoch 350/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0432 - acc: 0.9939 Epoch 00349: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0435 - acc: 0.9941 - val_loss: 0.2430 - val_acc: 0.9416
Epoch 351/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0419 - acc: 0.9925 Epoch 00350: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0414 - acc: 0.9927 - val_loss: 0.2440 - val_acc: 0.9416
Epoch 352/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0661 - acc: 0.9925 Epoch 00351: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0647 - acc: 0.9925 - val_loss: 0.2433 - val_acc: 0.9428
Epoch 353/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0636 - acc: 0.9905 Epoch 00352: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0627 - acc: 0.9905 - val_loss: 0.2434 - val_acc: 0.9428
Epoch 354/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0504 - acc: 0.9925 Epoch 00353: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0499 - acc: 0.9927 - val_loss: 0.2414 - val_acc: 0.9403
Epoch 355/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0441 - acc: 0.9919 Epoch 00354: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0436 - acc: 0.9919 - val_loss: 0.2426 - val_acc: 0.9403
Epoch 356/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0476 - acc: 0.9922 Epoch 00355: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0473 - acc: 0.9919 - val_loss: 0.2411 - val_acc: 0.9416
Epoch 357/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0438 - acc: 0.9910 Epoch 00356: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0436 - acc: 0.9914 - val_loss: 0.2422 - val_acc: 0.9403
Epoch 358/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0550 - acc: 0.9905 Epoch 00357: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0542 - acc: 0.9902 - val_loss: 0.2432 - val_acc: 0.9403
Epoch 359/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0703 - acc: 0.9925 Epoch 00358: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0692 - acc: 0.9927 - val_loss: 0.2431 - val_acc: 0.9390
Epoch 360/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0502 - acc: 0.9913 Epoch 00359: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0503 - acc: 0.9916 - val_loss: 0.2425 - val_acc: 0.9416
Epoch 361/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0664 - acc: 0.9905 Epoch 00360: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0648 - acc: 0.9908 - val_loss: 0.2429 - val_acc: 0.9403
Epoch 362/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0577 - acc: 0.9919 Epoch 00361: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0573 - acc: 0.9916 - val_loss: 0.2430 - val_acc: 0.9390
Epoch 363/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0473 - acc: 0.9936 Epoch 00362: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0486 - acc: 0.9927 - val_loss: 0.2419 - val_acc: 0.9416
Epoch 364/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0502 - acc: 0.9893 Epoch 00363: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0499 - acc: 0.9897 - val_loss: 0.2427 - val_acc: 0.9403
Epoch 365/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0465 - acc: 0.9910 Epoch 00364: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0462 - acc: 0.9914 - val_loss: 0.2424 - val_acc: 0.9390
Epoch 366/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0552 - acc: 0.9899 Epoch 00365: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0604 - acc: 0.9894 - val_loss: 0.2412 - val_acc: 0.9416
Epoch 367/600
27/28 [===========================>..] - ETA: 0s - loss: 0.0617 - acc: 0.9881 Epoch 00366: val_loss did not improve
28/28 [==============================] - 21s - loss: 0.0606 - acc: 0.9886 - val_loss: 0.2430 - val_acc: 0.9390
Epoch 00366: early stopping
Out[17]:
<keras.callbacks.History at 0x7fb990eb79d0>

In [6]:
#test prepare

test_model, test_model_name = get_best_model()
# print('Loading model from weights.004-0.0565.hdf5')
# test_model = load_model('./checkpoints/checkpoint2/weights.004-0.0565.hdf5')

def test_generator(df, mean, datagen = None, batch_size = BATCHSIZE):
    n = df.shape[0]
    batch_index = 0
    while 1:
        current_index = batch_index * batch_size
        if n >= current_index + batch_size:
            current_batch_size = batch_size
            batch_index += 1    
        else:
            current_batch_size = n - current_index
            batch_index = 0        
        batch_df = df[current_index:current_index+current_batch_size]
        batch_x = np.zeros((batch_df.shape[0], ROWS, COLS, 3), dtype=K.floatx())
        i = 0
        for index,row in batch_df.iterrows():
            image_file = row['image_file']
            bbox = [row['xmin'],row['ymin'],row['xmax'],row['ymax']]
            cropped = load_img(TEST_DIR+image_file,bbox,target_size=(ROWS,COLS))
            x = np.asarray(cropped, dtype=K.floatx())
            x /= 255.
            if datagen is not None: x = datagen.random_transform(x)            
            x = preprocess_input(x, mean)
            batch_x[i] = x
            i += 1
        if batch_index%50 == 0: print('batch_index', batch_index)
        yield(batch_x)
        
test_aug_datagen = ImageDataGenerator(
    rotation_range=180,
    shear_range=0.2,
    zoom_range=0.1,
    width_shift_range=0.1,
    height_shift_range=0.1,
    horizontal_flip=True,
    vertical_flip=True)


Loading model from checkpoint file ./resnet19ss_Hybrid_woNoF/checkpoint/weights.265-0.2277.hdf5
Loading model Done!

In [ ]:
train_mean = [0.37698776,  0.41491762,  0.38681713]

In [8]:
#validation data fish logloss
 
valid_pred = test_model.predict(X_valid, batch_size=BATCHSIZE, verbose=1)
# valid_pred = test_model.predict_generator(test_generator(df=valid_df, mean=valid_mean),
#                                           val_samples=valid_df.shape[0], nb_worker=1, pickle_safe=False)
valid_logloss_df = pd.DataFrame(columns=['logloss','class'])
for i in range(y_valid.shape[0]):
    index = np.argmax(y_valid[i,:])
    fish = CROP_CLASSES[index]
    logloss = -math.log(valid_pred[i,index])
    valid_logloss_df.loc[len(valid_logloss_df)]=[logloss,fish]                                       
print('valid loss:', valid_logloss_df['logloss'].mean())
print(valid_logloss_df.groupby(['class'])['logloss'].mean())

train_pred = test_model.predict(X_train, batch_size=BATCHSIZE, verbose=1)
# train_pred = test_model.predict_generator(test_generator(df=train_df, ),
#                                           val_samples=train_df.shape[0], nb_worker=1, pickle_safe=False)
train_logloss_df = pd.DataFrame(columns=['logloss','class'])
for i in range(y_train.shape[0]):
    index = np.argmax(y_train[i,:])
    fish = CROP_CLASSES[index]
    logloss = -math.log(train_pred[i,index])
    train_logloss_df.loc[len(train_logloss_df)]=[logloss,fish]                                       
print('train loss:', train_logloss_df['logloss'].mean())
print(train_logloss_df.groupby(['class'])['logloss'].mean())


787/787 [==============================] - 1s     
valid loss: 0.229860134341
class
ALB      0.167267
BET      0.692852
DOL      0.750891
LAG      0.208375
OTHER    0.096605
SHARK    0.266442
YFT      0.215996
Name: logloss, dtype: float64
3584/3584 [==============================] - 7s     
train loss: 0.0194088987134
class
ALB      0.026230
BET      0.003189
DOL      0.000596
LAG      0.000126
OTHER    0.001473
SHARK    0.000130
YFT      0.021688
Name: logloss, dtype: float64

In [ ]:
#GTbbox_CROPpred_df = ['image_file','crop_index','crop_class','xmin','ymin','xmax','ymax',
#                      'NoF', 'ALB', 'BET', 'DOL', 'LAG', 'OTHER', 'SHARK', 'YFT', 'logloss']

file_name = 'GTbbox_CROPpred_df_'+test_model_name+'_.pickle'
if os.path.exists(OUTPUT_DIR+file_name):
    print ('Loading from file '+file_name)
    GTbbox_CROPpred_df = pd.read_pickle(OUTPUT_DIR+file_name)
else:
    print ('Generating file '+file_name) 
    nb_augmentation = 1
    if nb_augmentation ==1:
        test_preds = test_model.predict_generator(test_generator(df=GTbbox_df, mean=train_mean), 
                                                  val_samples=GTbbox_df.shape[0], nb_worker=1, pickle_safe=False)
    else:
        test_preds = np.zeros((GTbbox_df.shape[0], len(FISH_CLASSES)), dtype=K.floatx())
        for idx in range(nb_augmentation):
            print('{}th augmentation for testing ...'.format(idx+1))
            test_preds += test_model.predict_generator(test_generator(df=GTbbox_df, mean=train_mean, datagen=test_aug_datagen), 
                                                       val_samples=GTbbox_df.shape[0], nb_worker=1, pickle_safe=False)
        test_preds /= nb_augmentation

    CROPpred_df = pd.DataFrame(test_preds, columns=['ALB', 'BET', 'DOL', 'LAG', 'NoF', 'OTHER', 'SHARK', 'YFT'])
    GTbbox_CROPpred_df = pd.concat([GTbbox_df,CROPpred_df], axis=1)
    GTbbox_CROPpred_df['logloss'] = GTbbox_CROPpred_df.apply(lambda row: -math.log(row[row['crop_class']]), axis=1)
    GTbbox_CROPpred_df.to_pickle(OUTPUT_DIR+file_name) 

#logloss of every fish class
print(GTbbox_CROPpred_df.groupby(['crop_class'])['logloss'].mean())
print(GTbbox_CROPpred_df['logloss'].mean())

In [ ]:
# RFCNbbox_RFCNpred_df = ['image_class','image_file','crop_index','xmin','ymin','xmax','ymax',
#                          'NoF_RFCN', 'ALB_RFCN', 'BET_RFCN', 'DOL_RFCN',
#                          'LAG_RFCN', 'OTHER_RFCN', 'SHARK_RFCN', 'YFT_RFCN']
# select fish_conf >= CONF_THRESH

file_name = 'RFCNbbox_RFCNpred_df_conf{:.2f}.pickle'.format(CONF_THRESH)
if os.path.exists(OUTPUT_DIR+file_name):
    print ('Loading from file '+file_name)
    RFCNbbox_RFCNpred_df = pd.read_pickle(OUTPUT_DIR+file_name)
else:
    print ('Generating file '+file_name)        
    RFCNbbox_RFCNpred_df = pd.DataFrame(columns=['image_class','image_file','crop_index','xmin','ymin','xmax','ymax',
                                                  'NoF_RFCN', 'ALB_RFCN', 'BET_RFCN', 'DOL_RFCN',
                                                  'LAG_RFCN', 'OTHER_RFCN', 'SHARK_RFCN', 'YFT_RFCN']) 

    with open('../data/RFCN_detections/detections_full_AGNOSTICnms_'+RFCN_MODEL+'.pkl','rb') as f:
        detections_full_AGNOSTICnms = pickle.load(f, encoding='latin1') 
    with open("../RFCN/ImageSets/Main/test.txt","r") as f:
        test_files = f.readlines()
    with open("../RFCN/ImageSets/Main/train_test.txt","r") as f:
        train_file_labels = f.readlines()
    assert len(detections_full_AGNOSTICnms) == len(test_files)
    
    count = np.zeros(len(detections_full_AGNOSTICnms))
    
    for im in range(len(detections_full_AGNOSTICnms)):
        if im%1000 == 0: print(im)
        basename = test_files[im][:9]
        if im<1000:
            image_class = '--'
        else:
            for i in range(len(train_file_labels)):
                if train_file_labels[i][:9] == basename:
                    image_class = train_file_labels[i][10:-1]
                    break
        image = Image.open(TEST_DIR+'/'+basename+'.jpg')
        width_image, height_image = image.size
        
        bboxes = []
        detects_im = detections_full_AGNOSTICnms[im]
        for i in range(len(detects_im)):
#             if np.sum(detects_im[i,5:]) >= CONF_THRESH:
            if np.max(detects_im[i,5:]) >= CONF_THRESH:
                bboxes.append(detects_im[i,:]) 
        count[im] = len(bboxes)
        if len(bboxes) == 0:
            ind = np.argmax(np.sum(detects_im[:,5:], axis=1))
            bboxes.append(detects_im[ind,:])
        bboxes = np.asarray(bboxes)

        for j in range(len(bboxes)):    
            bbox = bboxes[j]
            xmin = bbox[0]
            ymin = bbox[1]
            xmax = bbox[2]
            ymax = bbox[3]
            assert max(xmin,0)<min(xmax,width_image)
            assert max(ymin,0)<min(ymax,height_image)
            RFCNbbox_RFCNpred_df.loc[len(RFCNbbox_RFCNpred_df)]=[image_class,basename+'.jpg',j,max(xmin,0),max(ymin,0),
                                                                   min(xmax,width_image),min(ymax,height_image),
                                                                   bbox[4],bbox[5],bbox[6],bbox[7],bbox[8],bbox[9],bbox[10],bbox[11]]   
    
    RFCNbbox_RFCNpred_df.to_pickle(OUTPUT_DIR+file_name)

In [ ]:
GTbbox_CROPpred_df.loc[GTbbox_CROPpred_df['crop_class']!='NoF']
file_name = 'data_test_Crop_{}_{}.pickle'.format(ROWS, COLS) if os.path.exists(OUTPUT_DIR+file_name): print ('Loading from file '+file_name) with open(OUTPUT_DIR+file_name, 'rb') as f: data_test = pickle.load(f) X_test_crop = data_train['X_test_crop'] else: print ('Generating file '+file_name) X_test_crop = np.ndarray((RFCNbbox_RFCNpred_df.shape[0], ROWS, COLS, 3), dtype=np.uint8) i = 0 for index,row in RFCNbbox_RFCNpred_df.iterrows(): image_file = row['image_file'] bbox = [row['xmin'],row['ymin'],row['xmax'],row['ymax']] cropped = load_img(TEST_DIR+image_file,bbox,target_size=(ROWS,COLS)) X_test_crop[i] = np.asarray(cropped) i += 1 #save data to file data_test = {'X_test_crop': X_test_crop} with open(OUTPUT_DIR+file_name, 'wb') as f: pickle.dump(data_test, f) print('Loading data done.') X_test_crop = X_test_crop.astype(np.float32) print('Convert to float32 done.') X_test_crop /= 255. print('Rescale by 255 done.')

In [ ]:
file_name = 'data_trainfish_Crop_{}_{}.pickle'.format(ROWS, COLS)
if os.path.exists(OUTPUT_DIR+file_name):
    print ('Loading from file '+file_name)
    with open(OUTPUT_DIR+file_name, 'rb') as f:
        data_trainfish = pickle.load(f)
    X_trainfish_crop = data_train['X_trainfish_crop']
else:
    print ('Generating file '+file_name)

    GTbbox_CROPpred_fish_df = GTbbox_CROPpred_df.loc[GTbbox_CROPpred_df['crop_class']!='NoF']
    X_trainfish_crop = np.ndarray((GTbbox_CROPpred_fish_df.shape[0], ROWS, COLS, 3), dtype=np.uint8)
    i = 0
    for index,row in GTbbox_CROPpred_fish_df.iterrows():
        image_file = row['image_file']
        bbox = [row['xmin'],row['ymin'],row['xmax'],row['ymax']]
        cropped = load_img(TEST_DIR+image_file,bbox,target_size=(ROWS,COLS))
        X_trainfish_crop[i] = np.asarray(cropped)
        i += 1
   
    #save data to file
    data_trainfish = {'X_trainfish_crop': X_trainfish_crop}
    with open(OUTPUT_DIR+file_name, 'wb') as f:
        pickle.dump(data_trainfish, f)
        
print('Loading data done.')
X_trainfish_crop = X_trainfish_crop.astype(np.float32)
print('Convert to float32 done.')
X_trainfish_crop /= 255.
print('Rescale by 255 done.')

In [ ]:
mean(X_trainfish_crop)

In [ ]:
mean(X_test_crop[1251:])

In [ ]:
# test_mean = [0.41019869,  0.43978861,  0.39873621]
test_mean = [0.37698776,  0.41491762,  0.38681713]

In [ ]:
# RFCNbbox_RFCNpred_CROPpred_HYBRIDpred_df = ['image_class', 'image_file','crop_index','xmin','ymin','xmax','ymax',
#                                    'NoF_RFCN', 'ALB_RFCN', 'BET_RFCN', 'DOL_RFCN',
#                                    'LAG_RFCN', 'OTHER_RFCN', 'SHARK_RFCN', 'YFT_RFCN',
#                                    'NoF_CROP', 'ALB_CROP', 'BET_CROP', 'DOL_CROP',
#                                    'LAG_CROP', 'OTHER_CROP', 'SHARK_CROP', 'YFT_CROP',
#                                    'NoF', 'ALB', 'BET', 'DOL', 'LAG', 'OTHER', 'SHARK', 'YFT']

file_name = 'RFCNbbox_RFCNpred_CROPpred_HYBRIDpred_df_'+test_model_name+'_.pickle'
if os.path.exists(OUTPUT_DIR+file_name):
    print ('Loading from file '+file_name)
    RFCNbbox_RFCNpred_CROPpred_HYBRIDpred_df = pd.read_pickle(OUTPUT_DIR+file_name)
else:
    print ('Generating file '+file_name)  
    nb_augmentation = 1
    if nb_augmentation ==1:
        test_preds = test_model.predict_generator(test_generator(df=RFCNbbox_RFCNpred_df, mean=test_mean), 
                                                  val_samples=RFCNbbox_RFCNpred_df.shape[0], nb_worker=1, pickle_safe=False)
    else:
        test_preds = np.zeros((RFCNbbox_RFCNpred_df.shape[0], len(FISH_CLASSES)), dtype=K.floatx())
        for idx in range(nb_augmentation):
            print('{}th augmentation for testing ...'.format(idx+1))
            test_preds += test_model.predict_generator(test_generator(df=RFCNbbox_RFCNpred_df, mean=test_mean, datagen=test_aug_datagen), 
                                                       val_samples=RFCNbbox_RFCNpred_df.shape[0], nb_worker=1, pickle_safe=False)
        test_preds /= nb_augmentation

    CROPpred_df = pd.DataFrame(test_preds, columns=['ALB_CROP', 'BET_CROP', 'DOL_CROP', 'LAG_CROP', 'NoF_CROP', 'OTHER_CROP', 'SHARK_CROP', 'YFT_CROP'])
    RFCNbbox_RFCNpred_CROPpred_HYBRIDpred_df = pd.concat([RFCNbbox_RFCNpred_df,CROPpred_df], axis=1)
    
    RFCNbbox_RFCNpred_CROPpred_HYBRIDpred_df['NoF'] = RFCNbbox_RFCNpred_CROPpred_HYBRIDpred_df['NoF_RFCN']
    for fish in ['ALB', 'BET', 'DOL', 'LAG', 'OTHER', 'SHARK', 'YFT']:
        RFCNbbox_RFCNpred_CROPpred_HYBRIDpred_df[fish] = RFCNbbox_RFCNpred_CROPpred_HYBRIDpred_df.apply(lambda row: (1-row['NoF_RFCN'])*row[[fish+'_CROP']]/(1-row['NoF_CROP']) if row['NoF_CROP']!=1 else 0, axis=1)
#     for fish in FISH_CLASSES:
#         RFCNbbox_RFCNpred_CROPpred_HYBRIDpred_df[fish] = RFCNbbox_RFCNpred_CROPpred_HYBRIDpred_df[fish+'_CROP']

    RFCNbbox_RFCNpred_CROPpred_HYBRIDpred_df.to_pickle(OUTPUT_DIR+file_name)

In [ ]:
# clsMaxAve and hybrid RFCNpred&CROPpred such that RFCNpred for NoF and CROPpred for fish
# test_pred_df = ['logloss','image_class','image_file','NoF', 'ALB', 'BET', 'DOL', 'LAG', 'OTHER', 'SHARK', 'YFT']
# RFCNbbox_RFCNpred_CROPpred_HYBRIDpred_df = ['image_class', 'image_file','crop_index','xmin','ymin','xmax','ymax',
#                                    'NoF_RFCN', 'ALB_RFCN', 'BET_RFCN', 'DOL_RFCN',
#                                    'LAG_RFCN', 'OTHER_RFCN', 'SHARK_RFCN', 'YFT_RFCN',
#                                    'ALB_CROP', 'BET_CROP', 'DOL_CROP',
#                                    'LAG_CROP', 'OTHER_CROP', 'SHARK_CROP', 'YFT_CROP',
#                                    'NoF', 'ALB', 'BET', 'DOL', 'LAG', 'OTHER', 'SHARK', 'YFT']

file_name = 'test_pred_df_Hybrid_'+test_model_name+'_.pickle'
if os.path.exists(OUTPUT_DIR+file_name):
    print ('Loading from file '+file_name)
    test_pred_df = pd.read_pickle(OUTPUT_DIR+file_name)
else:
    print ('Generating file '+file_name)  
    with open("../RFCN/ImageSets/Main/test.txt","r") as f:
        test_files = f.readlines()
    
    test_pred_df = pd.DataFrame(columns=['logloss','image_class','image_file','NoF', 'ALB', 'BET', 'DOL', 'LAG', 'OTHER', 'SHARK', 'YFT'])  
    for j in range(len(test_files)): 
        image_file = test_files[j][:-1]+'.jpg'
        test_pred_im_df = RFCNbbox_RFCNpred_CROPpred_HYBRIDpred_df.loc[RFCNbbox_RFCNpred_CROPpred_HYBRIDpred_df['image_file'] == image_file,
                                                                       ['image_class', 'NoF', 'ALB', 'BET', 'DOL', 'LAG', 'OTHER', 'SHARK', 'YFT']]
        image_class = test_pred_im_df.iloc[0]['image_class']
        test_pred_im_df.drop('image_class', axis=1, inplace=True)
        max_score = test_pred_im_df.max(axis=1)
        max_cls = test_pred_im_df.idxmax(axis=1)
        test_pred_im_df['max_score'] = max_score
        test_pred_im_df['max_cls'] = max_cls
        test_pred_im_df['Count'] = test_pred_im_df.groupby(['max_cls'])['max_cls'].transform('count')
        idx = test_pred_im_df.groupby(['max_cls'])['max_score'].transform(max) == test_pred_im_df['max_score']
        test_pred_im_clsMax_df = test_pred_im_df.loc[idx,['NoF', 'ALB', 'BET', 'DOL', 'LAG', 'OTHER', 'SHARK', 'YFT', 'Count']]
        test_pred_im_clsMax_array = test_pred_im_clsMax_df.values
        pred = np.average(test_pred_im_clsMax_array[:,:-1], axis=0, weights=test_pred_im_clsMax_array[:,-1], returned=False).tolist()
        if image_class!='--':
            ind = FISH_CLASSES.index(image_class)
            logloss = -math.log(pred[ind]) 
        else:
            logloss = np.nan
        test_pred_im_clsMaxAve = [logloss,image_class,image_file]
        test_pred_im_clsMaxAve.extend(pred)
        test_pred_df.loc[len(test_pred_df)]=test_pred_im_clsMaxAve

    test_pred_df.to_pickle(OUTPUT_DIR+file_name)

In [ ]:
#### visualization
# RFCNbbox_RFCNpred_CROPpred_df = ['image_class', 'image_file','crop_index','x_min','y_min','x_max','ymax',
#                                    'NoF_RFCN', 'ALB_RFCN', 'BET_RFCN', 'DOL_RFCN',
#                                    'LAG_RFCN', 'OTHER_RFCN', 'SHARK_RFCN', 'YFT_RFCN'
#                                    'NoF_CROP', 'ALB_CROP', 'BET_CROP', 'DOL_CROP',
#                                    'LAG_CROP', 'OTHER_CROP', 'SHARK_CROP', 'YFT_CROP']
#GTbbox_CROPpred_df = ['image_file','crop_index','crop_class','xmin','ymin','xmax','ymax',
#                      'NoF', 'ALB', 'BET', 'DOL', 'LAG', 'OTHER', 'SHARK', 'YFT', 'logloss']
# test_pred_df = ['logloss','image_class','image_file','NoF', 'ALB', 'BET', 'DOL', 'LAG', 'OTHER', 'SHARK', 'YFT']

for j in range(test_pred_df.shape[0]):
    image_logloss = test_pred_df.iat[j,0]
    image_class = test_pred_df.iat[j,1]
    image_file = test_pred_df.iat[j,2]
    if j<1000 and j%30== 0:
        pass
    else: 
        continue
    im = Image.open('../RFCN/JPEGImages/'+image_file)
    im = np.asarray(im)
    fig, ax = plt.subplots(figsize=(10, 8))
    ax.imshow(im, aspect='equal')
    RFCN_dets = RFCNbbox_RFCNpred_CROPpred_HYBRIDpred_df.loc[RFCNbbox_RFCNpred_CROPpred_HYBRIDpred_df['image_file']==image_file]
    for index,row in RFCN_dets.iterrows():
        bbox = [row['xmin'],row['ymin'],row['xmax'],row['ymax']]
        RFCN = [row['NoF_RFCN'],row['ALB_RFCN'],row['BET_RFCN'],row['DOL_RFCN'],row['LAG_RFCN'],row['OTHER_RFCN'],row['SHARK_RFCN'],row['YFT_RFCN']]
        CROP = [row['NoF'],row['ALB'],row['BET'],row['DOL'],row['LAG'],row['OTHER'],row['SHARK'],row['YFT']]
        score_RFCN = max(RFCN)
        score_CROP = max(CROP)
        index_RFCN = RFCN.index(score_RFCN)
        index_CROP = CROP.index(score_CROP)
        class_RFCN = FISH_CLASSES[index_RFCN]
        class_CROP = FISH_CLASSES[index_CROP]
        ax.add_patch(plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor='red', linewidth=2))
        ax.text(bbox[0], bbox[1] - 2, 'RFCN_{:s} {:.3f} \nHYBRID_{:s} {:.3f}'.format(class_RFCN, score_RFCN, class_CROP, score_CROP), bbox=dict(facecolor='red', alpha=0.5), fontsize=8, color='white')       
    GT_dets = GTbbox_CROPpred_df.loc[GTbbox_CROPpred_df['image_file']==image_file]
    for index,row in GT_dets.iterrows():
        bbox = [row['xmin'],row['ymin'],row['xmax'],row['ymax']]
        CROP = [row['NoF'],row['ALB'],row['BET'],row['DOL'],row['LAG'],row['OTHER'],row['SHARK'],row['YFT']]
        score_CROP = max(CROP)
        index_CROP = CROP.index(score_CROP)
        class_CROP = FISH_CLASSES[index_CROP]
        ax.add_patch(plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor='green', linewidth=2))
        ax.text(bbox[0], bbox[3] + 40, 'GT_{:s} \nCROP_{:s} {:.3f}'.format(row['crop_class'], class_CROP, score_CROP), bbox=dict(facecolor='green', alpha=0.5), fontsize=8, color='white')
    ax.set_title(('Image {:s}    FISH {:s}    logloss {}').format(image_file, image_class, image_logloss), fontsize=10) 
    plt.axis('off')
    plt.tight_layout()
    plt.draw()

In [ ]:
#temperature
T = 1
test_pred_array = test_pred_df[FISH_CLASSES].values
test_pred_T_array = np.exp(np.log(test_pred_array)/T)
test_pred_T_array = test_pred_T_array/np.sum(test_pred_T_array, axis=1, keepdims=True)
test_pred_T_df = pd.DataFrame(test_pred_T_array, columns=FISH_CLASSES)
test_pred_T_df = pd.concat([test_pred_df[['image_class','image_file']],test_pred_T_df], axis=1)

#add logloss
test_pred_T_df['logloss'] = test_pred_T_df.apply(lambda row: -math.log(row[row['image_class']]) if row['image_class']!='--' else np.nan, axis=1)

#calculate train logloss
print(test_pred_T_df.groupby(['image_class'])['logloss'].mean())
train_logloss = test_pred_T_df['logloss'].mean()
print('logloss of train is', train_logloss )

In [ ]:
#test submission
submission = test_pred_T_df.loc[:999,['image_file','NoF', 'ALB', 'BET', 'DOL', 'LAG', 'OTHER', 'SHARK', 'YFT']]
submission.rename(columns={'image_file':'image'}, inplace=True)
sub_file = 'RFCN_AGONOSTICnms_'+RFCN_MODEL+'_'+CROP_MODEL+'_clsMaxAve_conf{:.2f}_T{}_'.format(CONF_THRESH, T)+'{:.4f}'.format(train_logloss)+'.csv'
submission.to_csv(sub_file, index=False)
print('Done!'+sub_file)

In [34]:
def create_model_resnet25ss():
    
    img_input = Input(shape=(ROWS, COLS, 3))
    
    x = Conv2D(16, (3, 3), strides=(2, 2), name='conv1')(img_input)
    x = BatchNormalization(name='bn_conv1')(x)
    x = Activation('relu')(x)

    x = conv_block(x, 3, 16, stage=2, block='a')
    x = identity_block(x, 3, 16, stage=2, block='b')
    x = identity_block(x, 3, 16, stage=2, block='c')

    x = conv_block(x, 3, 32, stage=3, block='a')
    x = identity_block(x, 3, 32, stage=3, block='b')
    x = identity_block(x, 3, 32, stage=3, block='c')

    x = conv_block(x, 3, 64, stage=4, block='a')
    x = identity_block(x, 3, 64, stage=4, block='b')
    x = identity_block(x, 3, 64, stage=4, block='c')

    x = conv_block(x, 3, 128, stage=5, block='a')
    x = identity_block(x, 3, 128, stage=5, block='b')
    x = identity_block(x, 3, 128, stage=5, block='c')

    x = GlobalAveragePooling2D()(x)
#     model.add(Dropout(0.8))
    x = Dense(len(CROP_CLASSES), activation='softmax')(x)

    model = Model(img_input, x)
    return model