weights.212-0.5219.hdf5 loss: 1.1229 - acc: 0.7779 - val_loss: 0.5219 - val_acc: 0.8030

In [1]:
import os, random, glob, pickle, collections, math, json
import numpy as np
import pandas as pd
from __future__ import division
from __future__ import print_function
# import ujson as json
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.metrics import log_loss
from sklearn.preprocessing import LabelEncoder

import matplotlib.pyplot as plt
%matplotlib inline 

from keras.models import Sequential, Model, load_model, model_from_json
from keras.layers import GlobalAveragePooling2D, Flatten, Dropout, Dense, LeakyReLU, Conv2D, Input, BatchNormalization
from keras.optimizers import Adam, RMSprop
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
from keras.preprocessing import image
from keras import backend as K
K.set_image_dim_ordering('tf')


Using TensorFlow backend.

In [10]:
TRAIN_DIR = '../data/train/'
TEST_DIR = '../RFCN/JPEGImages/'
TRAIN_CROP_DIR = '../data/train_crop/'
TEST_CROP_DIR = '../data/test_stg1_crop/'
RFCN_MODEL = 'resnet101_rfcn_ohem_iter_30000'
CROP_MODEL = 'VGG13_Hybrid_woNoF'
if not os.path.exists('./' + CROP_MODEL):
    os.mkdir('./' + CROP_MODEL)
CHECKPOINT_DIR = './' + CROP_MODEL + '/checkpoint/'
if not os.path.exists(CHECKPOINT_DIR):
    os.mkdir(CHECKPOINT_DIR)
LOG_DIR = './' + CROP_MODEL + '/log/'
if not os.path.exists(LOG_DIR):
    os.mkdir(LOG_DIR)
OUTPUT_DIR = './' + CROP_MODEL + '/output/'
if not os.path.exists(OUTPUT_DIR):
    os.mkdir(OUTPUT_DIR)
FISH_CLASSES = ['NoF', 'ALB', 'BET', 'DOL', 'LAG', 'OTHER', 'SHARK', 'YFT']
CROP_CLASSES=FISH_CLASSES[:]
CROP_CLASSES.remove('NoF')
CONF_THRESH = 0.8
ROWS = 224
COLS = 224
BATCHSIZE = 32
LEARNINGRATE = 1e-4
def featurewise_center(x):
    mean = np.mean(x, axis=0, keepdims=True)
    mean = np.mean(mean, axis=(1,2), keepdims=True)
    x_centered = x - mean
    return x_centered

def mean(x):
    mean = np.mean(x, axis=0)
    mean = np.mean(mean, axis=(0,1))
    return mean

def load_img(path, bbox, target_size=None):
    img = Image.open(path)
#     img = img.convert('RGB')
    cropped = img.crop((bbox[0],bbox[1],bbox[2],bbox[3]))
    width_cropped, height_cropped = cropped.size
    if height_cropped > width_cropped: cropped = cropped.transpose(method=2)  
    if target_size:
        cropped = cropped.resize((target_size[1], target_size[0]), Image.BILINEAR)
    return cropped

def preprocess_input(x, mean):
    #resnet50 image preprocessing
#     'RGB'->'BGR'
#     x = x[:, :, ::-1]
#     x /= 255.
    x[:, :, 0] -= mean[0]
    x[:, :, 1] -= mean[1]
    x[:, :, 2] -= mean[2]
    return x

def get_best_model(checkpoint_dir = CHECKPOINT_DIR):
    files = glob.glob(checkpoint_dir+'*')
    val_losses = [float(f.split('-')[-1][:-5]) for f in files]
    index = val_losses.index(min(val_losses))
    print('Loading model from checkpoint file ' + files[index])
    model = load_model(files[index])
    model_name = files[index].split('/')[-1]
    print('Loading model Done!')
    return (model, model_name)

In [3]:
# GTbbox_df = ['image_file','crop_index','crop_class','xmin',''ymin','xmax','ymax']

file_name = 'GTbbox_df.pickle'
if os.path.exists(OUTPUT_DIR+file_name):
    print ('Loading from file '+file_name)
    GTbbox_df = pd.read_pickle(OUTPUT_DIR+file_name)
else:
    print ('Generating file '+file_name)       
    GTbbox_df = pd.DataFrame(columns=['image_file','crop_index','crop_class','xmin','ymin','xmax','ymax'])  
    
    for c in CROP_CLASSES:
        print(c)
        j = json.load(open('../data/BBannotations/{}.json'.format(c), 'r'))
        for l in j: 
            filename = l["filename"]
            head, image_file = os.path.split(filename)
            basename, file_extension = os.path.splitext(image_file) 
            image = Image.open(TEST_DIR+image_file)
            width_image, height_image = image.size
            for i in range(len(l["annotations"])):
                a = l["annotations"][i]
                xmin = (a["x"])
                ymin = (a["y"])
                width = (a["width"])
                height = (a["height"])
                xmax = xmin + width
                ymax = ymin + height
                assert max(xmin,0)<min(xmax,width_image)
                assert max(ymin,0)<min(ymax,height_image)
                GTbbox_df.loc[len(GTbbox_df)]=[image_file,i,a["class"],max(xmin,0),max(ymin,0),min(xmax,width_image),min(ymax,height_image)]
                if a["class"] != c: print(GTbbox_df.tail(1))  
    
    test_size = GTbbox_df.shape[0]-int(math.ceil(GTbbox_df.shape[0]*0.8/BATCHSIZE)*BATCHSIZE)
    train_ind, valid_ind = train_test_split(range(GTbbox_df.shape[0]), test_size=test_size, random_state=1986, stratify=GTbbox_df['crop_class'])
    GTbbox_df['split'] = ['train' if i in train_ind else 'valid' for i in range(GTbbox_df.shape[0])]
    GTbbox_df.to_pickle(OUTPUT_DIR+file_name)


Loading from file GTbbox_df.pickle

In [4]:
#Load data

def data_from_df(df):
    X = np.ndarray((df.shape[0], ROWS, COLS, 3), dtype=np.uint8)
    y = np.zeros((df.shape[0], len(CROP_CLASSES)), dtype=K.floatx())
    i = 0
    for index,row in df.iterrows():
        image_file = row['image_file']
        fish = row['crop_class']
        bbox = [row['xmin'],row['ymin'],row['xmax'],row['ymax']]
        cropped = load_img(TEST_DIR+image_file,bbox,target_size=(ROWS,COLS))
        X[i] = np.asarray(cropped)
        y[i,CROP_CLASSES.index(fish)] = 1
        i += 1
    return (X, y)

def data_load(name):
    file_name = 'data_'+name+'_{}_{}.pickle'.format(ROWS, COLS)
    if os.path.exists(OUTPUT_DIR+file_name):
        print ('Loading from file '+file_name)
        with open(OUTPUT_DIR+file_name, 'rb') as f:
            data = pickle.load(f)
        X = data['X']
        y = data['y']
    else:
        print ('Generating file '+file_name)
        
        if name=='train' or name=='valid': 
            df = GTbbox_df[GTbbox_df['split']==name]
        elif name=='all':
            df = GTbbox_df
        else:
            print('Invalid name '+name)
    
        X, y = data_from_df(df)

        data = {'X': X,'y': y}
        with open(OUTPUT_DIR+file_name, 'wb') as f:
            pickle.dump(data, f)
    return (X, y)
X_train, y_train = data_load('train')
X_valid, y_valid = data_load('valid')
       
print('Loading data done.')
print('train sample ', X_train.shape[0])
print('valid sample ', X_valid.shape[0])
X_train = X_train.astype(np.float32)
X_valid = X_valid.astype(np.float32)
print('Convert to float32 done.')
X_train /= 255.
X_valid /= 255.
print('Rescale by 255 done.')
X_train_centerd = featurewise_center(X_train)
print('mean of X_train is ', mean(X_train))
X_valid_centerd = featurewise_center(X_valid)
print('mean of X_valid is ', mean(X_valid))
print('Featurewise centered done.')


Loading from file data_train_224_224.pickle
Loading from file data_valid_224_224.pickle
Loading data done.
train sample  3584
valid sample  787
Convert to float32 done.
Rescale by 255 done.
mean of X_train is  [ 0.40704539  0.43806663  0.39486334]
mean of X_valid is  [ 0.4065561   0.43584293  0.39404479]
Featurewise centered done.

In [5]:
# #class weight = n_samples / (n_classes * np.bincount(y))
# class_weight_fish = dict(GTbbox_df.groupby('crop_class').size())
# class_weight = {}
# n_samples = GTbbox_df.shape[0]
# for key,value in class_weight_fish.items():
#         class_weight[CROP_CLASSES.index(key)] = n_samples / (len(CROP_CLASSES)*value)
# class_weight

class_weight_fish = dict(GTbbox_df.groupby('crop_class').size())
class_weight = {}
ref = max(class_weight_fish.values())
for key,value in class_weight_fish.items():
    class_weight[CROP_CLASSES.index(key)] = ref/value
class_weight


Out[5]:
{0: 1.0,
 1: 8.212418300653594,
 2: 19.944444444444443,
 3: 23.933333333333334,
 4: 7.5465465465465469,
 5: 13.296296296296296,
 6: 3.1451814768460578}

In [11]:
#data preprocessing

train_datagen = ImageDataGenerator(
    rotation_range=180,
    shear_range=0.2,
    zoom_range=0.1,
    width_shift_range=0.1,
    height_shift_range=0.1,
    horizontal_flip=True,
    vertical_flip=True)
train_generator = train_datagen.flow(X_train, y_train, batch_size=BATCHSIZE, shuffle=True, seed=None)
assert X_train.shape[0]%BATCHSIZE==0
steps_per_epoch = int(X_train.shape[0]/BATCHSIZE)

In [12]:
#callbacks

early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1, mode='auto')        

model_checkpoint = ModelCheckpoint(filepath=CHECKPOINT_DIR+'weights.{epoch:03d}-{val_loss:.4f}.hdf5', monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, mode='auto')
        
learningrate_schedule = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=1, mode='auto', epsilon=0.001, cooldown=0, min_lr=0)

tensorboard = TensorBoard(log_dir=LOG_DIR, histogram_freq=0, write_graph=False, write_images=True)

In [13]:
def create_model_VGG13():
    model = Sequential()

#     model.add(Input(shape=(ROWS, COLS, 3)))
    model.add(Conv2D(64, (3, 3), padding='same', name='block1_conv1', input_shape=(ROWS, COLS, 3)))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.33))
    model.add(Conv2D(64, (3, 3), strides=(2, 2), padding='same', name='block1_conv2'))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.33))
    
    model.add(Conv2D(128, (3, 3), padding='same', name='block2_conv1'))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.33))
    model.add(Conv2D(128, (3, 3), strides=(2, 2), padding='same', name='block2_conv2'))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.33))

    model.add(Conv2D(256, (3, 3), padding='same', name='block3_conv1'))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.33))
    model.add(Conv2D(256, (3, 3), padding='same', name='block3_conv2'))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.33))
    model.add(Conv2D(256, (3, 3), strides=(2, 2), padding='same', name='block3_conv3'))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.33))
    
    model.add(Conv2D(512, (3, 3), padding='same', name='block4_conv1'))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.33))
    model.add(Conv2D(512, (3, 3), padding='same', name='block4_conv2'))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.33))
    model.add(Conv2D(512, (3, 3), strides=(2, 2), padding='same', name='block4_conv3'))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.33))
    
    model.add(Conv2D(512, (3, 3), padding='same', name='block5_conv1'))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.33))
    model.add(Conv2D(512, (3, 3), padding='same', name='block5_conv2'))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.33))
    model.add(Conv2D(512, (3, 3), strides=(2, 2), padding='same', name='block5_conv3'))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.33))
    
    model.add(GlobalAveragePooling2D())
#     model.add(Dropout(0.8))
    model.add(Dense(len(CROP_CLASSES), activation='softmax'))

    return model

In [14]:
#train from scratch

model = create_model_VGG13()

# compile the model (should be done *after* setting layers to non-trainable)
optimizer = Adam(lr=1e-4)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])

# train the model on the new data for a few epochs
model.fit_generator(train_generator, steps_per_epoch=steps_per_epoch, epochs=100, verbose=1, 
                    callbacks=[model_checkpoint, tensorboard], 
                    validation_data=(X_valid,y_valid), class_weight=class_weight, workers=3, pickle_safe=True)


Epoch 1/100
111/112 [============================>.] - ETA: 1s - loss: 6.0961 - acc: 0.2066   Epoch 00000: val_loss improved from inf to 2.14524, saving model to ./resnet50_FT38_Hybrid_woNoF/checkpoint/weights.000-2.1452.hdf5
112/112 [==============================] - 228s - loss: 6.0776 - acc: 0.2070 - val_loss: 2.1452 - val_acc: 0.0839
Epoch 2/100
111/112 [============================>.] - ETA: 1s - loss: 5.2523 - acc: 0.2441  Epoch 00001: val_loss improved from 2.14524 to 2.03555, saving model to ./resnet50_FT38_Hybrid_woNoF/checkpoint/weights.001-2.0355.hdf5
112/112 [==============================] - 219s - loss: 5.2586 - acc: 0.2453 - val_loss: 2.0355 - val_acc: 0.1804
Epoch 3/100
111/112 [============================>.] - ETA: 1s - loss: 4.7283 - acc: 0.2917  Epoch 00002: val_loss did not improve
112/112 [==============================] - 219s - loss: 4.7313 - acc: 0.2907 - val_loss: 4.0712 - val_acc: 0.0267
Epoch 4/100
111/112 [============================>.] - ETA: 1s - loss: 4.7677 - acc: 0.3190  Epoch 00003: val_loss did not improve
112/112 [==============================] - 218s - loss: 4.7691 - acc: 0.3186 - val_loss: 3.2098 - val_acc: 0.1881
Epoch 5/100
111/112 [============================>.] - ETA: 1s - loss: 4.6109 - acc: 0.3331  Epoch 00004: val_loss improved from 2.03555 to 1.97199, saving model to ./resnet50_FT38_Hybrid_woNoF/checkpoint/weights.004-1.9720.hdf5
112/112 [==============================] - 219s - loss: 4.6020 - acc: 0.3320 - val_loss: 1.9720 - val_acc: 0.1970
Epoch 6/100
111/112 [============================>.] - ETA: 1s - loss: 4.6062 - acc: 0.3221  Epoch 00005: val_loss improved from 1.97199 to 1.95914, saving model to ./resnet50_FT38_Hybrid_woNoF/checkpoint/weights.005-1.9591.hdf5
112/112 [==============================] - 219s - loss: 4.6056 - acc: 0.3220 - val_loss: 1.9591 - val_acc: 0.2554
Epoch 7/100
111/112 [============================>.] - ETA: 1s - loss: 4.2123 - acc: 0.3404  Epoch 00006: val_loss improved from 1.95914 to 1.80081, saving model to ./resnet50_FT38_Hybrid_woNoF/checkpoint/weights.006-1.8008.hdf5
112/112 [==============================] - 219s - loss: 4.2043 - acc: 0.3410 - val_loss: 1.8008 - val_acc: 0.3304
Epoch 8/100
111/112 [============================>.] - ETA: 1s - loss: 4.2009 - acc: 0.3477  Epoch 00007: val_loss did not improve
112/112 [==============================] - 219s - loss: 4.1988 - acc: 0.3502 - val_loss: 2.6175 - val_acc: 0.2643
Epoch 9/100
111/112 [============================>.] - ETA: 1s - loss: 4.2762 - acc: 0.3764  Epoch 00008: val_loss did not improve
112/112 [==============================] - 218s - loss: 4.2657 - acc: 0.3775 - val_loss: 2.3096 - val_acc: 0.2402
Epoch 10/100
111/112 [============================>.] - ETA: 1s - loss: 4.1545 - acc: 0.3820  Epoch 00009: val_loss did not improve
112/112 [==============================] - 218s - loss: 4.1508 - acc: 0.3820 - val_loss: 2.2760 - val_acc: 0.2173
Epoch 11/100
111/112 [============================>.] - ETA: 1s - loss: 3.9678 - acc: 0.3711  Epoch 00010: val_loss did not improve
112/112 [==============================] - 218s - loss: 3.9599 - acc: 0.3705 - val_loss: 2.9965 - val_acc: 0.2516
Epoch 12/100
111/112 [============================>.] - ETA: 1s - loss: 4.0232 - acc: 0.4023  Epoch 00011: val_loss improved from 1.80081 to 1.70622, saving model to ./resnet50_FT38_Hybrid_woNoF/checkpoint/weights.011-1.7062.hdf5
112/112 [==============================] - 218s - loss: 4.0245 - acc: 0.4015 - val_loss: 1.7062 - val_acc: 0.2973
Epoch 13/100
111/112 [============================>.] - ETA: 1s - loss: 3.8091 - acc: 0.3927  Epoch 00012: val_loss did not improve
112/112 [==============================] - 219s - loss: 3.8121 - acc: 0.3926 - val_loss: 2.0726 - val_acc: 0.2236
Epoch 14/100
111/112 [============================>.] - ETA: 1s - loss: 4.0086 - acc: 0.3767  Epoch 00013: val_loss did not improve
112/112 [==============================] - 219s - loss: 3.9960 - acc: 0.3770 - val_loss: 1.7478 - val_acc: 0.4066
Epoch 15/100
111/112 [============================>.] - ETA: 1s - loss: 3.8921 - acc: 0.4122  Epoch 00014: val_loss did not improve
112/112 [==============================] - 218s - loss: 3.8867 - acc: 0.4107 - val_loss: 2.8827 - val_acc: 0.2135
Epoch 16/100
 12/112 [==>...........................] - ETA: 181s - loss: 3.5375 - acc: 0.3724
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-14-741946b6bbd4> in <module>()
     11 model.fit_generator(train_generator, steps_per_epoch=steps_per_epoch, epochs=100, verbose=1, 
     12                     callbacks=[model_checkpoint, tensorboard],
---> 13                     validation_data=(X_valid,y_valid), class_weight=class_weight, workers=3, pickle_safe=True)

/usr/local/lib64/python2.7/site-packages/keras/legacy/interfaces.pyc in wrapper(*args, **kwargs)
     86                 warnings.warn('Update your `' + object_name +
     87                               '` call to the Keras 2 API: ' + signature, stacklevel=2)
---> 88             return func(*args, **kwargs)
     89         wrapper._legacy_support_signature = inspect.getargspec(func)
     90         return wrapper

/usr/local/lib64/python2.7/site-packages/keras/models.pyc in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_q_size, workers, pickle_safe, initial_epoch)
   1095                                         workers=workers,
   1096                                         pickle_safe=pickle_safe,
-> 1097                                         initial_epoch=initial_epoch)
   1098 
   1099     @interfaces.legacy_generator_methods_support

/usr/local/lib64/python2.7/site-packages/keras/legacy/interfaces.pyc in wrapper(*args, **kwargs)
     86                 warnings.warn('Update your `' + object_name +
     87                               '` call to the Keras 2 API: ' + signature, stacklevel=2)
---> 88             return func(*args, **kwargs)
     89         wrapper._legacy_support_signature = inspect.getargspec(func)
     90         return wrapper

/usr/local/lib64/python2.7/site-packages/keras/engine/training.pyc in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_q_size, workers, pickle_safe, initial_epoch)
   1874                     outs = self.train_on_batch(x, y,
   1875                                                sample_weight=sample_weight,
-> 1876                                                class_weight=class_weight)
   1877 
   1878                     if not isinstance(outs, list):

/usr/local/lib64/python2.7/site-packages/keras/engine/training.pyc in train_on_batch(self, x, y, sample_weight, class_weight)
   1618             ins = x + y + sample_weights
   1619         self._make_train_function()
-> 1620         outputs = self.train_function(ins)
   1621         if len(outputs) == 1:
   1622             return outputs[0]

/usr/local/lib64/python2.7/site-packages/keras/backend/tensorflow_backend.pyc in __call__(self, inputs)
   2071         session = get_session()
   2072         updated = session.run(self.outputs + [self.updates_op],
-> 2073                               feed_dict=feed_dict)
   2074         return updated[:len(self.outputs)]
   2075 

/usr/lib/python2.7/dist-packages/tensorflow/python/client/session.pyc in run(self, fetches, feed_dict, options, run_metadata)
    765     try:
    766       result = self._run(None, fetches, feed_dict, options_ptr,
--> 767                          run_metadata_ptr)
    768       if run_metadata:
    769         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

/usr/lib/python2.7/dist-packages/tensorflow/python/client/session.pyc in _run(self, handle, fetches, feed_dict, options, run_metadata)
    963     if final_fetches or final_targets:
    964       results = self._do_run(handle, final_targets, final_fetches,
--> 965                              feed_dict_string, options, run_metadata)
    966     else:
    967       results = []

/usr/lib/python2.7/dist-packages/tensorflow/python/client/session.pyc in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
   1013     if handle is None:
   1014       return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
-> 1015                            target_list, options, run_metadata)
   1016     else:
   1017       return self._do_call(_prun_fn, self._session, handle, feed_dict,

/usr/lib/python2.7/dist-packages/tensorflow/python/client/session.pyc in _do_call(self, fn, *args)
   1020   def _do_call(self, fn, *args):
   1021     try:
-> 1022       return fn(*args)
   1023     except errors.OpError as e:
   1024       message = compat.as_text(e.message)

/usr/lib/python2.7/dist-packages/tensorflow/python/client/session.pyc in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
   1002         return tf_session.TF_Run(session, options,
   1003                                  feed_dict, fetch_list, target_list,
-> 1004                                  status, run_metadata)
   1005 
   1006     def _prun_fn(session, handle, feed_dict, fetch_list):

KeyboardInterrupt: 

In [21]:
#resume training

model, model_name = get_best_model()
# print('Loading model from weights.004-0.0565.hdf5')
# model = load_model(CHECKPOINT_DIR + 'weights.011-1.7062.hdf5')

model.fit_generator(train_generator, steps_per_epoch=steps_per_epoch, epochs=250, verbose=1, 
                    callbacks=[model_checkpoint, tensorboard], 
                    validation_data=(X_valid,y_valid), class_weight=class_weight, 
                    workers=3, pickle_safe=True, initial_epoch=82)


Loading model from checkpoint file ./resnet50_FT38_Hybrid_woNoF/checkpoint/weights.081-0.8621.hdf5
Loading model Done!
Epoch 83/250
111/112 [============================>.] - ETA: 1s - loss: 2.0954 - acc: 0.6318  Epoch 00082: val_loss did not improve
112/112 [==============================] - 220s - loss: 2.0904 - acc: 0.6334 - val_loss: 1.0413 - val_acc: 0.6226
Epoch 84/250
111/112 [============================>.] - ETA: 1s - loss: 2.1778 - acc: 0.6343  Epoch 00083: val_loss did not improve
112/112 [==============================] - 218s - loss: 2.1745 - acc: 0.6339 - val_loss: 1.3510 - val_acc: 0.4663
Epoch 85/250
111/112 [============================>.] - ETA: 1s - loss: 2.2228 - acc: 0.6349  Epoch 00084: val_loss did not improve
112/112 [==============================] - 219s - loss: 2.2158 - acc: 0.6362 - val_loss: 1.0983 - val_acc: 0.5388
Epoch 86/250
111/112 [============================>.] - ETA: 1s - loss: 2.0699 - acc: 0.6377  Epoch 00085: val_loss did not improve
112/112 [==============================] - 219s - loss: 2.0646 - acc: 0.6376 - val_loss: 0.9702 - val_acc: 0.6379
Epoch 87/250
111/112 [============================>.] - ETA: 1s - loss: 2.0465 - acc: 0.6236  Epoch 00086: val_loss did not improve
112/112 [==============================] - 219s - loss: 2.0450 - acc: 0.6236 - val_loss: 1.0705 - val_acc: 0.5591
Epoch 88/250
111/112 [============================>.] - ETA: 1s - loss: 2.0990 - acc: 0.6501  Epoch 00087: val_loss did not improve
112/112 [==============================] - 219s - loss: 2.0959 - acc: 0.6504 - val_loss: 0.9654 - val_acc: 0.6036
Epoch 89/250
111/112 [============================>.] - ETA: 1s - loss: 2.0922 - acc: 0.6402  Epoch 00088: val_loss did not improve
112/112 [==============================] - 219s - loss: 2.0894 - acc: 0.6409 - val_loss: 0.8910 - val_acc: 0.6429
Epoch 90/250
111/112 [============================>.] - ETA: 1s - loss: 2.1845 - acc: 0.6439  Epoch 00089: val_loss did not improve
112/112 [==============================] - 219s - loss: 2.1807 - acc: 0.6448 - val_loss: 1.0705 - val_acc: 0.5997
Epoch 91/250
111/112 [============================>.] - ETA: 1s - loss: 2.1418 - acc: 0.6526  Epoch 00090: val_loss did not improve
112/112 [==============================] - 219s - loss: 2.1472 - acc: 0.6518 - val_loss: 1.2531 - val_acc: 0.5578
Epoch 92/250
111/112 [============================>.] - ETA: 1s - loss: 2.1244 - acc: 0.6543  Epoch 00091: val_loss did not improve
112/112 [==============================] - 219s - loss: 2.1139 - acc: 0.6546 - val_loss: 0.9383 - val_acc: 0.6595
Epoch 93/250
111/112 [============================>.] - ETA: 1s - loss: 2.1761 - acc: 0.6439  Epoch 00092: val_loss did not improve
112/112 [==============================] - 219s - loss: 2.1687 - acc: 0.6440 - val_loss: 0.9861 - val_acc: 0.6569
Epoch 94/250
111/112 [============================>.] - ETA: 1s - loss: 2.2073 - acc: 0.6374  Epoch 00093: val_loss did not improve
112/112 [==============================] - 219s - loss: 2.2073 - acc: 0.6381 - val_loss: 1.0020 - val_acc: 0.6607
Epoch 95/250
111/112 [============================>.] - ETA: 1s - loss: 2.0347 - acc: 0.6582  Epoch 00094: val_loss did not improve
112/112 [==============================] - 219s - loss: 2.0270 - acc: 0.6590 - val_loss: 0.9173 - val_acc: 0.6175
Epoch 96/250
111/112 [============================>.] - ETA: 1s - loss: 1.9868 - acc: 0.6532  Epoch 00095: val_loss improved from 0.86211 to 0.76306, saving model to ./resnet50_FT38_Hybrid_woNoF/checkpoint/weights.095-0.7631.hdf5
112/112 [==============================] - 224s - loss: 1.9853 - acc: 0.6543 - val_loss: 0.7631 - val_acc: 0.6976
Epoch 97/250
111/112 [============================>.] - ETA: 1s - loss: 1.9209 - acc: 0.6695  Epoch 00096: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.9212 - acc: 0.6688 - val_loss: 1.0255 - val_acc: 0.5883
Epoch 98/250
111/112 [============================>.] - ETA: 1s - loss: 2.0235 - acc: 0.6458  Epoch 00097: val_loss did not improve
112/112 [==============================] - 219s - loss: 2.0219 - acc: 0.6462 - val_loss: 0.9241 - val_acc: 0.6480
Epoch 99/250
111/112 [============================>.] - ETA: 1s - loss: 2.0741 - acc: 0.6577  Epoch 00098: val_loss did not improve
112/112 [==============================] - 219s - loss: 2.0631 - acc: 0.6588 - val_loss: 1.1789 - val_acc: 0.5578
Epoch 100/250
111/112 [============================>.] - ETA: 1s - loss: 2.0195 - acc: 0.6622  Epoch 00099: val_loss did not improve
112/112 [==============================] - 219s - loss: 2.0130 - acc: 0.6621 - val_loss: 1.4059 - val_acc: 0.5438
Epoch 101/250
111/112 [============================>.] - ETA: 1s - loss: 1.9436 - acc: 0.6475  Epoch 00100: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.9361 - acc: 0.6468 - val_loss: 1.2172 - val_acc: 0.5400
Epoch 102/250
111/112 [============================>.] - ETA: 1s - loss: 1.9877 - acc: 0.6734  Epoch 00101: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.9828 - acc: 0.6735 - val_loss: 1.0104 - val_acc: 0.5883
Epoch 103/250
111/112 [============================>.] - ETA: 1s - loss: 1.9960 - acc: 0.6624  Epoch 00102: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.9888 - acc: 0.6629 - val_loss: 1.3340 - val_acc: 0.5019
Epoch 104/250
111/112 [============================>.] - ETA: 1s - loss: 1.9088 - acc: 0.6740  Epoch 00103: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.9098 - acc: 0.6738 - val_loss: 1.1978 - val_acc: 0.5578
Epoch 105/250
111/112 [============================>.] - ETA: 1s - loss: 1.9156 - acc: 0.6723  Epoch 00104: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.9168 - acc: 0.6722 - val_loss: 0.8788 - val_acc: 0.6658
Epoch 106/250
111/112 [============================>.] - ETA: 1s - loss: 2.0870 - acc: 0.6492  Epoch 00105: val_loss did not improve
112/112 [==============================] - 219s - loss: 2.0941 - acc: 0.6487 - val_loss: 1.0782 - val_acc: 0.5451
Epoch 107/250
111/112 [============================>.] - ETA: 1s - loss: 1.9389 - acc: 0.6678  Epoch 00106: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.9358 - acc: 0.6685 - val_loss: 2.2778 - val_acc: 0.4956
Epoch 108/250
111/112 [============================>.] - ETA: 1s - loss: 2.0552 - acc: 0.6486  Epoch 00107: val_loss did not improve
112/112 [==============================] - 219s - loss: 2.0484 - acc: 0.6493 - val_loss: 1.4237 - val_acc: 0.4968
Epoch 109/250
111/112 [============================>.] - ETA: 1s - loss: 2.1154 - acc: 0.6602  Epoch 00108: val_loss did not improve
112/112 [==============================] - 219s - loss: 2.1186 - acc: 0.6599 - val_loss: 1.2923 - val_acc: 0.5845
Epoch 110/250
111/112 [============================>.] - ETA: 1s - loss: 1.8602 - acc: 0.6684  Epoch 00109: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.8606 - acc: 0.6680 - val_loss: 1.1352 - val_acc: 0.6239
Epoch 111/250
111/112 [============================>.] - ETA: 1s - loss: 1.7135 - acc: 0.6886  Epoch 00110: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.7051 - acc: 0.6895 - val_loss: 0.9576 - val_acc: 0.6239
Epoch 112/250
111/112 [============================>.] - ETA: 1s - loss: 1.8703 - acc: 0.6726  Epoch 00111: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.8689 - acc: 0.6716 - val_loss: 1.1247 - val_acc: 0.6226
Epoch 113/250
111/112 [============================>.] - ETA: 1s - loss: 1.9447 - acc: 0.6906  Epoch 00112: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.9410 - acc: 0.6903 - val_loss: 1.1477 - val_acc: 0.5591
Epoch 114/250
111/112 [============================>.] - ETA: 1s - loss: 1.6606 - acc: 0.6954  Epoch 00113: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.6537 - acc: 0.6964 - val_loss: 0.8279 - val_acc: 0.6823
Epoch 115/250
111/112 [============================>.] - ETA: 1s - loss: 1.8565 - acc: 0.6830  Epoch 00114: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.8596 - acc: 0.6830 - val_loss: 1.0498 - val_acc: 0.6480
Epoch 116/250
111/112 [============================>.] - ETA: 1s - loss: 1.8546 - acc: 0.6807  Epoch 00115: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.8547 - acc: 0.6802 - val_loss: 0.8452 - val_acc: 0.6785
Epoch 117/250
111/112 [============================>.] - ETA: 1s - loss: 1.9140 - acc: 0.6771  Epoch 00116: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.9045 - acc: 0.6766 - val_loss: 1.0092 - val_acc: 0.6099
Epoch 118/250
111/112 [============================>.] - ETA: 1s - loss: 1.8893 - acc: 0.6791  Epoch 00117: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.8875 - acc: 0.6780 - val_loss: 1.1356 - val_acc: 0.5489
Epoch 119/250
111/112 [============================>.] - ETA: 1s - loss: 1.9173 - acc: 0.6774  Epoch 00118: val_loss improved from 0.76306 to 0.73469, saving model to ./resnet50_FT38_Hybrid_woNoF/checkpoint/weights.118-0.7347.hdf5
112/112 [==============================] - 219s - loss: 1.9187 - acc: 0.6777 - val_loss: 0.7347 - val_acc: 0.7192
Epoch 120/250
111/112 [============================>.] - ETA: 1s - loss: 1.8965 - acc: 0.6776  Epoch 00119: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.8957 - acc: 0.6769 - val_loss: 0.8096 - val_acc: 0.6849
Epoch 121/250
111/112 [============================>.] - ETA: 1s - loss: 1.8054 - acc: 0.6945  Epoch 00120: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.8139 - acc: 0.6934 - val_loss: 1.1386 - val_acc: 0.5718
Epoch 122/250
111/112 [============================>.] - ETA: 1s - loss: 1.8218 - acc: 0.6830  Epoch 00121: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.8166 - acc: 0.6825 - val_loss: 1.2010 - val_acc: 0.5807
Epoch 123/250
111/112 [============================>.] - ETA: 1s - loss: 1.9506 - acc: 0.6712  Epoch 00122: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.9522 - acc: 0.6705 - val_loss: 1.5390 - val_acc: 0.4803
Epoch 124/250
111/112 [============================>.] - ETA: 1s - loss: 1.6657 - acc: 0.7002  Epoch 00123: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.6657 - acc: 0.7003 - val_loss: 1.0925 - val_acc: 0.5273
Epoch 125/250
111/112 [============================>.] - ETA: 1s - loss: 1.7354 - acc: 0.6847  Epoch 00124: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.7453 - acc: 0.6850 - val_loss: 1.2556 - val_acc: 0.5057
Epoch 126/250
111/112 [============================>.] - ETA: 1s - loss: 1.6693 - acc: 0.7086  Epoch 00125: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.6659 - acc: 0.7079 - val_loss: 1.0777 - val_acc: 0.5769
Epoch 127/250
111/112 [============================>.] - ETA: 1s - loss: 1.7098 - acc: 0.7109  Epoch 00126: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.7159 - acc: 0.7118 - val_loss: 1.3301 - val_acc: 0.5858
Epoch 128/250
111/112 [============================>.] - ETA: 1s - loss: 1.8599 - acc: 0.6807  Epoch 00127: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.8553 - acc: 0.6811 - val_loss: 1.5954 - val_acc: 0.4625
Epoch 129/250
111/112 [============================>.] - ETA: 1s - loss: 1.9475 - acc: 0.6686  Epoch 00128: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.9425 - acc: 0.6682 - val_loss: 1.2139 - val_acc: 0.5299
Epoch 130/250
111/112 [============================>.] - ETA: 1s - loss: 1.6868 - acc: 0.7019  Epoch 00129: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.6813 - acc: 0.7020 - val_loss: 0.8059 - val_acc: 0.6696
Epoch 131/250
111/112 [============================>.] - ETA: 1s - loss: 1.7228 - acc: 0.7005  Epoch 00130: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.7274 - acc: 0.7006 - val_loss: 0.9243 - val_acc: 0.6849
Epoch 132/250
111/112 [============================>.] - ETA: 1s - loss: 1.6357 - acc: 0.7038  Epoch 00131: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.6300 - acc: 0.7048 - val_loss: 1.0106 - val_acc: 0.6048
Epoch 133/250
111/112 [============================>.] - ETA: 1s - loss: 1.7474 - acc: 0.7044  Epoch 00132: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.7496 - acc: 0.7042 - val_loss: 2.2493 - val_acc: 0.3888
Epoch 134/250
111/112 [============================>.] - ETA: 1s - loss: 1.6162 - acc: 0.7106  Epoch 00133: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.6125 - acc: 0.7112 - val_loss: 0.8734 - val_acc: 0.6811
Epoch 135/250
111/112 [============================>.] - ETA: 1s - loss: 1.7012 - acc: 0.6906  Epoch 00134: val_loss improved from 0.73469 to 0.68849, saving model to ./resnet50_FT38_Hybrid_woNoF/checkpoint/weights.134-0.6885.hdf5
112/112 [==============================] - 219s - loss: 1.6947 - acc: 0.6914 - val_loss: 0.6885 - val_acc: 0.7535
Epoch 136/250
111/112 [============================>.] - ETA: 1s - loss: 1.5660 - acc: 0.7238  Epoch 00135: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.5627 - acc: 0.7235 - val_loss: 0.7422 - val_acc: 0.7205
Epoch 137/250
111/112 [============================>.] - ETA: 1s - loss: 1.8009 - acc: 0.6858  Epoch 00136: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.7978 - acc: 0.6861 - val_loss: 0.9843 - val_acc: 0.6849
Epoch 138/250
111/112 [============================>.] - ETA: 1s - loss: 1.9401 - acc: 0.6850  Epoch 00137: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.9382 - acc: 0.6847 - val_loss: 1.1789 - val_acc: 0.5985
Epoch 139/250
111/112 [============================>.] - ETA: 1s - loss: 1.7198 - acc: 0.7019  Epoch 00138: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.7154 - acc: 0.7028 - val_loss: 0.8633 - val_acc: 0.6417
Epoch 140/250
111/112 [============================>.] - ETA: 1s - loss: 1.7091 - acc: 0.7027  Epoch 00139: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.7107 - acc: 0.7034 - val_loss: 1.5054 - val_acc: 0.6099
Epoch 141/250
111/112 [============================>.] - ETA: 1s - loss: 1.5778 - acc: 0.7075  Epoch 00140: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.5759 - acc: 0.7081 - val_loss: 0.9114 - val_acc: 0.6582
Epoch 142/250
111/112 [============================>.] - ETA: 1s - loss: 1.5990 - acc: 0.6948  Epoch 00141: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.5952 - acc: 0.6950 - val_loss: 1.0628 - val_acc: 0.5553
Epoch 143/250
111/112 [============================>.] - ETA: 1s - loss: 1.5853 - acc: 0.7148  Epoch 00142: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.5800 - acc: 0.7151 - val_loss: 1.1911 - val_acc: 0.5426
Epoch 144/250
111/112 [============================>.] - ETA: 1s - loss: 1.5782 - acc: 0.7188  Epoch 00143: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.5772 - acc: 0.7182 - val_loss: 0.8856 - val_acc: 0.6760
Epoch 145/250
111/112 [============================>.] - ETA: 1s - loss: 1.6291 - acc: 0.7117  Epoch 00144: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.6306 - acc: 0.7109 - val_loss: 0.7832 - val_acc: 0.7078
Epoch 146/250
111/112 [============================>.] - ETA: 1s - loss: 1.9242 - acc: 0.7024  Epoch 00145: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.9179 - acc: 0.7026 - val_loss: 0.9460 - val_acc: 0.6455
Epoch 147/250
111/112 [============================>.] - ETA: 1s - loss: 1.6581 - acc: 0.7086  Epoch 00146: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.6658 - acc: 0.7081 - val_loss: 1.1069 - val_acc: 0.5769
Epoch 148/250
111/112 [============================>.] - ETA: 1s - loss: 1.5759 - acc: 0.7109  Epoch 00147: val_loss improved from 0.68849 to 0.67531, saving model to ./resnet50_FT38_Hybrid_woNoF/checkpoint/weights.147-0.6753.hdf5
112/112 [==============================] - 219s - loss: 1.5707 - acc: 0.7121 - val_loss: 0.6753 - val_acc: 0.7484
Epoch 149/250
111/112 [============================>.] - ETA: 1s - loss: 1.5735 - acc: 0.7323  Epoch 00148: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.5807 - acc: 0.7321 - val_loss: 0.7496 - val_acc: 0.7027
Epoch 150/250
111/112 [============================>.] - ETA: 1s - loss: 1.8512 - acc: 0.6906  Epoch 00149: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.8589 - acc: 0.6906 - val_loss: 1.2356 - val_acc: 0.5197
Epoch 151/250
111/112 [============================>.] - ETA: 1s - loss: 1.5415 - acc: 0.7401  Epoch 00150: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.5477 - acc: 0.7411 - val_loss: 1.6909 - val_acc: 0.5705
Epoch 152/250
111/112 [============================>.] - ETA: 1s - loss: 1.8076 - acc: 0.6836  Epoch 00151: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.8055 - acc: 0.6844 - val_loss: 1.5123 - val_acc: 0.4727
Epoch 153/250
111/112 [============================>.] - ETA: 1s - loss: 1.7179 - acc: 0.7157  Epoch 00152: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.7104 - acc: 0.7165 - val_loss: 1.5137 - val_acc: 0.4790
Epoch 154/250
111/112 [============================>.] - ETA: 1s - loss: 1.4185 - acc: 0.7435  Epoch 00153: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.4170 - acc: 0.7436 - val_loss: 0.7903 - val_acc: 0.6861
Epoch 155/250
111/112 [============================>.] - ETA: 1s - loss: 1.7005 - acc: 0.7066  Epoch 00154: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.6986 - acc: 0.7065 - val_loss: 1.1226 - val_acc: 0.5959
Epoch 156/250
111/112 [============================>.] - ETA: 1s - loss: 1.5174 - acc: 0.7416  Epoch 00155: val_loss improved from 0.67531 to 0.61403, saving model to ./resnet50_FT38_Hybrid_woNoF/checkpoint/weights.155-0.6140.hdf5
112/112 [==============================] - 219s - loss: 1.5139 - acc: 0.7422 - val_loss: 0.6140 - val_acc: 0.7624
Epoch 157/250
111/112 [============================>.] - ETA: 1s - loss: 1.4839 - acc: 0.7328  Epoch 00156: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.4809 - acc: 0.7330 - val_loss: 1.0154 - val_acc: 0.6798
Epoch 158/250
111/112 [============================>.] - ETA: 1s - loss: 1.5435 - acc: 0.7275  Epoch 00157: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.5457 - acc: 0.7274 - val_loss: 0.7447 - val_acc: 0.7166
Epoch 159/250
111/112 [============================>.] - ETA: 1s - loss: 1.4415 - acc: 0.7416  Epoch 00158: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.4405 - acc: 0.7416 - val_loss: 1.1074 - val_acc: 0.6125
Epoch 160/250
111/112 [============================>.] - ETA: 1s - loss: 1.3909 - acc: 0.7373  Epoch 00159: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.3855 - acc: 0.7366 - val_loss: 0.8826 - val_acc: 0.6455
Epoch 161/250
111/112 [============================>.] - ETA: 1s - loss: 1.4169 - acc: 0.7261  Epoch 00160: val_loss did not improve
112/112 [==============================] - 217s - loss: 1.4245 - acc: 0.7257 - val_loss: 1.2572 - val_acc: 0.5413
Epoch 162/250
111/112 [============================>.] - ETA: 1s - loss: 1.6517 - acc: 0.7221  Epoch 00161: val_loss did not improve
112/112 [==============================] - 218s - loss: 1.6432 - acc: 0.7238 - val_loss: 1.2696 - val_acc: 0.6950
Epoch 163/250
111/112 [============================>.] - ETA: 1s - loss: 1.3976 - acc: 0.7356  Epoch 00162: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.3946 - acc: 0.7360 - val_loss: 0.7895 - val_acc: 0.7027
Epoch 164/250
111/112 [============================>.] - ETA: 1s - loss: 1.8156 - acc: 0.7103  Epoch 00163: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.8429 - acc: 0.7098 - val_loss: 2.0184 - val_acc: 0.4307
Epoch 165/250
111/112 [============================>.] - ETA: 1s - loss: 2.0675 - acc: 0.6669  Epoch 00164: val_loss did not improve
112/112 [==============================] - 219s - loss: 2.0636 - acc: 0.6674 - val_loss: 0.8347 - val_acc: 0.7116
Epoch 166/250
111/112 [============================>.] - ETA: 1s - loss: 1.4627 - acc: 0.7370  Epoch 00165: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.4632 - acc: 0.7366 - val_loss: 0.7450 - val_acc: 0.7014
Epoch 167/250
111/112 [============================>.] - ETA: 1s - loss: 1.4943 - acc: 0.7247  Epoch 00166: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.4864 - acc: 0.7263 - val_loss: 0.8642 - val_acc: 0.6773
Epoch 168/250
111/112 [============================>.] - ETA: 1s - loss: 1.5291 - acc: 0.7261  Epoch 00167: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.5249 - acc: 0.7249 - val_loss: 1.2215 - val_acc: 0.5565
Epoch 169/250
111/112 [============================>.] - ETA: 1s - loss: 1.3851 - acc: 0.7520  Epoch 00168: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.3819 - acc: 0.7508 - val_loss: 0.8552 - val_acc: 0.6455
Epoch 170/250
111/112 [============================>.] - ETA: 1s - loss: 1.5248 - acc: 0.7328  Epoch 00169: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.5222 - acc: 0.7327 - val_loss: 1.9701 - val_acc: 0.4587
Epoch 171/250
111/112 [============================>.] - ETA: 1s - loss: 1.3837 - acc: 0.7458  Epoch 00170: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.3780 - acc: 0.7464 - val_loss: 0.6362 - val_acc: 0.7560
Epoch 172/250
111/112 [============================>.] - ETA: 1s - loss: 1.4745 - acc: 0.7489  Epoch 00171: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.4777 - acc: 0.7486 - val_loss: 0.6777 - val_acc: 0.7344
Epoch 173/250
111/112 [============================>.] - ETA: 1s - loss: 1.4214 - acc: 0.7548  Epoch 00172: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.4157 - acc: 0.7553 - val_loss: 0.8675 - val_acc: 0.7306
Epoch 174/250
111/112 [============================>.] - ETA: 1s - loss: 1.3698 - acc: 0.7379  Epoch 00173: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.3671 - acc: 0.7377 - val_loss: 1.2278 - val_acc: 0.6112
Epoch 175/250
111/112 [============================>.] - ETA: 1s - loss: 1.2687 - acc: 0.7669  Epoch 00174: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.2744 - acc: 0.7670 - val_loss: 0.8008 - val_acc: 0.6722
Epoch 176/250
111/112 [============================>.] - ETA: 1s - loss: 1.3924 - acc: 0.7514  Epoch 00175: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.3914 - acc: 0.7506 - val_loss: 1.1924 - val_acc: 0.7078
Epoch 177/250
111/112 [============================>.] - ETA: 1s - loss: 1.5709 - acc: 0.7396  Epoch 00176: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.5651 - acc: 0.7397 - val_loss: 1.9948 - val_acc: 0.5438
Epoch 178/250
111/112 [============================>.] - ETA: 1s - loss: 1.4789 - acc: 0.7306  Epoch 00177: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.4794 - acc: 0.7296 - val_loss: 2.0963 - val_acc: 0.3774
Epoch 179/250
111/112 [============================>.] - ETA: 1s - loss: 1.2685 - acc: 0.7666  Epoch 00178: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.2730 - acc: 0.7667 - val_loss: 0.7733 - val_acc: 0.6950
Epoch 180/250
111/112 [============================>.] - ETA: 1s - loss: 1.1998 - acc: 0.7683  Epoch 00179: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.1971 - acc: 0.7684 - val_loss: 0.8404 - val_acc: 0.7001
Epoch 181/250
111/112 [============================>.] - ETA: 1s - loss: 1.4514 - acc: 0.7511  Epoch 00180: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.4486 - acc: 0.7520 - val_loss: 1.0528 - val_acc: 0.5794
Epoch 182/250
111/112 [============================>.] - ETA: 1s - loss: 1.6511 - acc: 0.7264  Epoch 00181: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.6437 - acc: 0.7274 - val_loss: 2.3101 - val_acc: 0.4130
Epoch 183/250
111/112 [============================>.] - ETA: 1s - loss: 1.5653 - acc: 0.7157  Epoch 00182: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.5583 - acc: 0.7154 - val_loss: 0.6261 - val_acc: 0.7586
Epoch 184/250
111/112 [============================>.] - ETA: 1s - loss: 1.2528 - acc: 0.7700  Epoch 00183: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.2488 - acc: 0.7706 - val_loss: 0.6918 - val_acc: 0.7522
Epoch 185/250
111/112 [============================>.] - ETA: 1s - loss: 1.3076 - acc: 0.7582  Epoch 00184: val_loss improved from 0.61403 to 0.60621, saving model to ./resnet50_FT38_Hybrid_woNoF/checkpoint/weights.184-0.6062.hdf5
112/112 [==============================] - 219s - loss: 1.3084 - acc: 0.7589 - val_loss: 0.6062 - val_acc: 0.7840
Epoch 186/250
111/112 [============================>.] - ETA: 1s - loss: 1.2397 - acc: 0.7568  Epoch 00185: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.2421 - acc: 0.7567 - val_loss: 0.9970 - val_acc: 0.7243
Epoch 187/250
111/112 [============================>.] - ETA: 1s - loss: 1.3154 - acc: 0.7627  Epoch 00186: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.3164 - acc: 0.7631 - val_loss: 0.9363 - val_acc: 0.7052
Epoch 188/250
111/112 [============================>.] - ETA: 1s - loss: 1.3450 - acc: 0.7615  Epoch 00187: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.3406 - acc: 0.7628 - val_loss: 0.6852 - val_acc: 0.7370
Epoch 189/250
111/112 [============================>.] - ETA: 1s - loss: 1.5245 - acc: 0.7483  Epoch 00188: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.5169 - acc: 0.7480 - val_loss: 0.7682 - val_acc: 0.6963
Epoch 190/250
111/112 [============================>.] - ETA: 1s - loss: 1.2264 - acc: 0.7767  Epoch 00189: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.2270 - acc: 0.7768 - val_loss: 0.7027 - val_acc: 0.7294
Epoch 191/250
111/112 [============================>.] - ETA: 1s - loss: 1.4338 - acc: 0.7638  Epoch 00190: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.4393 - acc: 0.7626 - val_loss: 0.7412 - val_acc: 0.7395
Epoch 192/250
111/112 [============================>.] - ETA: 1s - loss: 1.4274 - acc: 0.7399  Epoch 00191: val_loss improved from 0.60621 to 0.60382, saving model to ./resnet50_FT38_Hybrid_woNoF/checkpoint/weights.191-0.6038.hdf5
112/112 [==============================] - 219s - loss: 1.4208 - acc: 0.7400 - val_loss: 0.6038 - val_acc: 0.7802
Epoch 193/250
111/112 [============================>.] - ETA: 1s - loss: 1.3455 - acc: 0.7722  Epoch 00192: val_loss did not improve
112/112 [==============================] - 220s - loss: 1.3465 - acc: 0.7723 - val_loss: 0.8770 - val_acc: 0.7471
Epoch 194/250
111/112 [============================>.] - ETA: 1s - loss: 1.4439 - acc: 0.7351  Epoch 00193: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.4362 - acc: 0.7366 - val_loss: 0.8023 - val_acc: 0.7078
Epoch 195/250
111/112 [============================>.] - ETA: 1s - loss: 1.1722 - acc: 0.7739  Epoch 00194: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.1673 - acc: 0.7748 - val_loss: 0.9171 - val_acc: 0.6429
Epoch 196/250
111/112 [============================>.] - ETA: 1s - loss: 1.2461 - acc: 0.7703  Epoch 00195: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.2438 - acc: 0.7709 - val_loss: 0.8899 - val_acc: 0.7154
Epoch 197/250
111/112 [============================>.] - ETA: 1s - loss: 1.5179 - acc: 0.7562  Epoch 00196: val_loss did not improve
112/112 [==============================] - 220s - loss: 1.5211 - acc: 0.7545 - val_loss: 1.8723 - val_acc: 0.4409
Epoch 198/250
111/112 [============================>.] - ETA: 1s - loss: 1.5371 - acc: 0.7351  Epoch 00197: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.5411 - acc: 0.7358 - val_loss: 0.8442 - val_acc: 0.6836
Epoch 199/250
111/112 [============================>.] - ETA: 1s - loss: 1.3280 - acc: 0.7601  Epoch 00198: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.3246 - acc: 0.7600 - val_loss: 0.6637 - val_acc: 0.7586
Epoch 200/250
111/112 [============================>.] - ETA: 1s - loss: 1.1819 - acc: 0.7790  Epoch 00199: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.1796 - acc: 0.7782 - val_loss: 0.6584 - val_acc: 0.7598
Epoch 201/250
111/112 [============================>.] - ETA: 1s - loss: 1.1419 - acc: 0.7787  Epoch 00200: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.1449 - acc: 0.7782 - val_loss: 0.9113 - val_acc: 0.6569
Epoch 202/250
111/112 [============================>.] - ETA: 1s - loss: 1.2297 - acc: 0.7773  Epoch 00201: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.2248 - acc: 0.7771 - val_loss: 1.3020 - val_acc: 0.5578
Epoch 203/250
111/112 [============================>.] - ETA: 1s - loss: 1.1146 - acc: 0.7908  Epoch 00202: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.1197 - acc: 0.7905 - val_loss: 0.7101 - val_acc: 0.7306
Epoch 204/250
111/112 [============================>.] - ETA: 1s - loss: 1.2523 - acc: 0.7753  Epoch 00203: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.2510 - acc: 0.7737 - val_loss: 1.5887 - val_acc: 0.4981
Epoch 205/250
111/112 [============================>.] - ETA: 1s - loss: 1.1662 - acc: 0.7762  Epoch 00204: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.1613 - acc: 0.7773 - val_loss: 1.1494 - val_acc: 0.6226
Epoch 206/250
111/112 [============================>.] - ETA: 1s - loss: 1.2119 - acc: 0.7756  Epoch 00205: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.2110 - acc: 0.7768 - val_loss: 1.1239 - val_acc: 0.6557
Epoch 207/250
111/112 [============================>.] - ETA: 1s - loss: 1.3935 - acc: 0.7480  Epoch 00206: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.4021 - acc: 0.7480 - val_loss: 0.8649 - val_acc: 0.7103
Epoch 208/250
111/112 [============================>.] - ETA: 1s - loss: 1.4416 - acc: 0.7646  Epoch 00207: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.4360 - acc: 0.7645 - val_loss: 1.0123 - val_acc: 0.6531
Epoch 209/250
111/112 [============================>.] - ETA: 1s - loss: 1.1708 - acc: 0.7810  Epoch 00208: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.1678 - acc: 0.7807 - val_loss: 1.0251 - val_acc: 0.6086
Epoch 210/250
111/112 [============================>.] - ETA: 1s - loss: 1.4116 - acc: 0.7556  Epoch 00209: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.4078 - acc: 0.7556 - val_loss: 1.3701 - val_acc: 0.5375
Epoch 211/250
111/112 [============================>.] - ETA: 1s - loss: 1.2544 - acc: 0.7807  Epoch 00210: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.2503 - acc: 0.7807 - val_loss: 0.8284 - val_acc: 0.7001
Epoch 212/250
111/112 [============================>.] - ETA: 1s - loss: 1.1310 - acc: 0.7846  Epoch 00211: val_loss improved from 0.60382 to 0.58072, saving model to ./resnet50_FT38_Hybrid_woNoF/checkpoint/weights.211-0.5807.hdf5
112/112 [==============================] - 219s - loss: 1.1314 - acc: 0.7849 - val_loss: 0.5807 - val_acc: 0.7992
Epoch 213/250
111/112 [============================>.] - ETA: 1s - loss: 1.1294 - acc: 0.7773  Epoch 00212: val_loss improved from 0.58072 to 0.52190, saving model to ./resnet50_FT38_Hybrid_woNoF/checkpoint/weights.212-0.5219.hdf5
112/112 [==============================] - 219s - loss: 1.1229 - acc: 0.7779 - val_loss: 0.5219 - val_acc: 0.8030
Epoch 214/250
111/112 [============================>.] - ETA: 1s - loss: 1.1142 - acc: 0.7942  Epoch 00213: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.1133 - acc: 0.7930 - val_loss: 0.5829 - val_acc: 0.7929
Epoch 215/250
111/112 [============================>.] - ETA: 1s - loss: 1.1875 - acc: 0.7782  Epoch 00214: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.1839 - acc: 0.7782 - val_loss: 1.0412 - val_acc: 0.6290
Epoch 216/250
111/112 [============================>.] - ETA: 1s - loss: 1.2350 - acc: 0.7776  Epoch 00215: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.2360 - acc: 0.7779 - val_loss: 1.0053 - val_acc: 0.5972
Epoch 217/250
111/112 [============================>.] - ETA: 1s - loss: 1.5253 - acc: 0.7435  Epoch 00216: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.5298 - acc: 0.7439 - val_loss: 1.2127 - val_acc: 0.5997
Epoch 218/250
111/112 [============================>.] - ETA: 1s - loss: 1.1318 - acc: 0.7863  Epoch 00217: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.1378 - acc: 0.7857 - val_loss: 0.6076 - val_acc: 0.7776
Epoch 219/250
111/112 [============================>.] - ETA: 1s - loss: 1.0593 - acc: 0.7942  Epoch 00218: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.0652 - acc: 0.7935 - val_loss: 0.7826 - val_acc: 0.7624
Epoch 220/250
111/112 [============================>.] - ETA: 1s - loss: 1.2589 - acc: 0.7711  Epoch 00219: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.2560 - acc: 0.7701 - val_loss: 1.3187 - val_acc: 0.6290
Epoch 221/250
111/112 [============================>.] - ETA: 1s - loss: 1.1681 - acc: 0.7903  Epoch 00220: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.1654 - acc: 0.7910 - val_loss: 0.9117 - val_acc: 0.6518
Epoch 222/250
111/112 [============================>.] - ETA: 1s - loss: 1.0790 - acc: 0.7962  Epoch 00221: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.0847 - acc: 0.7952 - val_loss: 1.3618 - val_acc: 0.5222
Epoch 223/250
111/112 [============================>.] - ETA: 1s - loss: 1.2108 - acc: 0.7748  Epoch 00222: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.2128 - acc: 0.7748 - val_loss: 0.9291 - val_acc: 0.6709
Epoch 224/250
111/112 [============================>.] - ETA: 1s - loss: 1.3338 - acc: 0.7948  Epoch 00223: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.3488 - acc: 0.7924 - val_loss: 1.1443 - val_acc: 0.6201
Epoch 225/250
111/112 [============================>.] - ETA: 1s - loss: 1.3746 - acc: 0.7494  Epoch 00224: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.3718 - acc: 0.7503 - val_loss: 1.2752 - val_acc: 0.5604
Epoch 226/250
111/112 [============================>.] - ETA: 1s - loss: 1.1704 - acc: 0.7838  Epoch 00225: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.1716 - acc: 0.7835 - val_loss: 0.8504 - val_acc: 0.6582
Epoch 227/250
111/112 [============================>.] - ETA: 1s - loss: 1.1497 - acc: 0.7782  Epoch 00226: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.1462 - acc: 0.7782 - val_loss: 0.6910 - val_acc: 0.7776
Epoch 228/250
111/112 [============================>.] - ETA: 1s - loss: 1.3292 - acc: 0.7658  Epoch 00227: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.3224 - acc: 0.7667 - val_loss: 0.8591 - val_acc: 0.6696
Epoch 229/250
111/112 [============================>.] - ETA: 1s - loss: 1.0814 - acc: 0.8057  Epoch 00228: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.0772 - acc: 0.8052 - val_loss: 0.8309 - val_acc: 0.6709
Epoch 230/250
111/112 [============================>.] - ETA: 1s - loss: 1.0440 - acc: 0.8060  Epoch 00229: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.0446 - acc: 0.8050 - val_loss: 0.8207 - val_acc: 0.7116
Epoch 231/250
111/112 [============================>.] - ETA: 1s - loss: 1.0252 - acc: 0.8018  Epoch 00230: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.0223 - acc: 0.8025 - val_loss: 0.8430 - val_acc: 0.7052
Epoch 232/250
111/112 [============================>.] - ETA: 1s - loss: 1.1105 - acc: 0.8035  Epoch 00231: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.1121 - acc: 0.8033 - val_loss: 1.3607 - val_acc: 0.6036
Epoch 233/250
111/112 [============================>.] - ETA: 1s - loss: 1.2683 - acc: 0.7841  Epoch 00232: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.2739 - acc: 0.7829 - val_loss: 1.1158 - val_acc: 0.6417
Epoch 234/250
111/112 [============================>.] - ETA: 1s - loss: 1.1996 - acc: 0.7942  Epoch 00233: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.1992 - acc: 0.7944 - val_loss: 0.9902 - val_acc: 0.7154
Epoch 235/250
111/112 [============================>.] - ETA: 1s - loss: 1.0857 - acc: 0.8024  Epoch 00234: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.0847 - acc: 0.8033 - val_loss: 0.6850 - val_acc: 0.7548
Epoch 236/250
111/112 [============================>.] - ETA: 1s - loss: 1.0734 - acc: 0.8049  Epoch 00235: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.0706 - acc: 0.8050 - val_loss: 1.2539 - val_acc: 0.6099
Epoch 237/250
111/112 [============================>.] - ETA: 1s - loss: 0.9880 - acc: 0.8004  Epoch 00236: val_loss did not improve
112/112 [==============================] - 219s - loss: 0.9851 - acc: 0.8005 - val_loss: 1.1447 - val_acc: 0.5832
Epoch 238/250
111/112 [============================>.] - ETA: 1s - loss: 1.0025 - acc: 0.8038  Epoch 00237: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.0030 - acc: 0.8039 - val_loss: 0.5408 - val_acc: 0.7865
Epoch 239/250
111/112 [============================>.] - ETA: 1s - loss: 1.0493 - acc: 0.8119  Epoch 00238: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.0430 - acc: 0.8125 - val_loss: 0.6843 - val_acc: 0.7471
Epoch 240/250
111/112 [============================>.] - ETA: 1s - loss: 1.2548 - acc: 0.7852  Epoch 00239: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.2589 - acc: 0.7840 - val_loss: 0.6809 - val_acc: 0.7344
Epoch 241/250
111/112 [============================>.] - ETA: 1s - loss: 1.0332 - acc: 0.8052  Epoch 00240: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.0365 - acc: 0.8052 - val_loss: 1.0085 - val_acc: 0.6518
Epoch 242/250
111/112 [============================>.] - ETA: 1s - loss: 1.0840 - acc: 0.8012  Epoch 00241: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.0855 - acc: 0.8008 - val_loss: 1.0615 - val_acc: 0.7395
Epoch 243/250
111/112 [============================>.] - ETA: 1s - loss: 1.3424 - acc: 0.7793  Epoch 00242: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.3378 - acc: 0.7799 - val_loss: 0.9536 - val_acc: 0.6671
Epoch 244/250
111/112 [============================>.] - ETA: 1s - loss: 1.0994 - acc: 0.7953  Epoch 00243: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.1010 - acc: 0.7955 - val_loss: 0.7293 - val_acc: 0.7446
Epoch 245/250
111/112 [============================>.] - ETA: 1s - loss: 1.1643 - acc: 0.8026  Epoch 00244: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.1617 - acc: 0.8019 - val_loss: 1.2409 - val_acc: 0.5502
Epoch 246/250
111/112 [============================>.] - ETA: 1s - loss: 0.9755 - acc: 0.8083  Epoch 00245: val_loss did not improve
112/112 [==============================] - 219s - loss: 0.9731 - acc: 0.8089 - val_loss: 0.5934 - val_acc: 0.7764
Epoch 247/250
111/112 [============================>.] - ETA: 1s - loss: 0.9843 - acc: 0.8181  Epoch 00246: val_loss did not improve
112/112 [==============================] - 219s - loss: 0.9854 - acc: 0.8164 - val_loss: 0.9219 - val_acc: 0.6645
Epoch 248/250
111/112 [============================>.] - ETA: 1s - loss: 1.1411 - acc: 0.7919  Epoch 00247: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.1368 - acc: 0.7924 - val_loss: 0.7670 - val_acc: 0.7217
Epoch 249/250
111/112 [============================>.] - ETA: 1s - loss: 0.8999 - acc: 0.8266  Epoch 00248: val_loss did not improve
112/112 [==============================] - 219s - loss: 0.8983 - acc: 0.8265 - val_loss: 0.5511 - val_acc: 0.7929
Epoch 250/250
111/112 [============================>.] - ETA: 1s - loss: 1.0848 - acc: 0.7976  Epoch 00249: val_loss did not improve
112/112 [==============================] - 219s - loss: 1.0813 - acc: 0.7974 - val_loss: 1.2105 - val_acc: 0.6061
Out[21]:
<keras.callbacks.History at 0x7f2981998fd0>

In [4]:
#test prepare

test_model, test_model_name = get_best_model(checkpoint_dir='./resnet50_FT38_CW_STGTrain/checkpoint/')
# print('Loading model from weights.004-0.0565.hdf5')
# test_model = load_model('./checkpoints/checkpoint2/weights.004-0.0565.hdf5')

def test_generator(df, mean, datagen = None, batch_size = BATCHSIZE):
    n = df.shape[0]
    batch_index = 0
    while 1:
        current_index = batch_index * batch_size
        if n >= current_index + batch_size:
            current_batch_size = batch_size
            batch_index += 1    
        else:
            current_batch_size = n - current_index
            batch_index = 0        
        batch_df = df[current_index:current_index+current_batch_size]
        batch_x = np.zeros((batch_df.shape[0], ROWS, COLS, 3), dtype=K.floatx())
        i = 0
        for index,row in batch_df.iterrows():
            image_file = row['image_file']
            bbox = [row['xmin'],row['ymin'],row['xmax'],row['ymax']]
            cropped = load_img(TEST_DIR+image_file,bbox,target_size=(ROWS,COLS))
            x = np.asarray(cropped, dtype=K.floatx())
            x /= 255.
            if datagen is not None: x = datagen.random_transform(x)            
            x = preprocess_input(x, mean)
            batch_x[i] = x
            i += 1
        if batch_index%50 == 0: print('batch_index', batch_index)
        yield(batch_x)
        
test_aug_datagen = ImageDataGenerator(
    rotation_range=180,
    shear_range=0.2,
    zoom_range=0.1,
    width_shift_range=0.1,
    height_shift_range=0.1,
    horizontal_flip=True,
    vertical_flip=True)


Loading model from checkpoint file ./resnet50_FT38_CW_STGTrain/checkpoint/weights.000-0.0327.hdf5
Loading model Done!

In [5]:
train_mean = [0.37698776,  0.41491762,  0.38681713]

In [ ]:
train_mean = train_datagen.mean
valid_mean = valid_datagen.mean
X_train_centered = featurewise_center(X_train)
X_valid_centered = featurewise_center(X_valid)

In [ ]:
#validation data fish logloss
 
valid_pred = test_model.predict(X_valid_centered, batch_size=BATCHSIZE, verbose=1)
# valid_pred = test_model.predict_generator(test_generator(df=valid_df, mean=valid_mean),
#                                           val_samples=valid_df.shape[0], nb_worker=1, pickle_safe=False)
valid_logloss_df = pd.DataFrame(columns=['logloss','class'])
for i in range(y_valid.shape[0]):
    index = np.argmax(y_valid[i,:])
    fish = FISH_CLASSES[index]
    logloss = -math.log(valid_pred[i,index])
    valid_logloss_df.loc[len(valid_logloss_df)]=[logloss,fish]                                       
print(valid_logloss_df.groupby(['class'])['logloss'].mean())
print(valid_logloss_df['logloss'].mean())

train_pred = test_model.predict(X_train_centered, batch_size=BATCHSIZE, verbose=1)
# train_pred = test_model.predict_generator(test_generator(df=train_df, ),
#                                           val_samples=train_df.shape[0], nb_worker=1, pickle_safe=False)
train_logloss_df = pd.DataFrame(columns=['logloss','class'])
for i in range(y_train.shape[0]):
    index = np.argmax(y_train[i,:])
    fish = FISH_CLASSES[index]
    logloss = -math.log(train_pred[i,index])
    train_logloss_df.loc[len(train_logloss_df)]=[logloss,fish]                                       
print(train_logloss_df.groupby(['class'])['logloss'].mean())
print(train_logloss_df['logloss'].mean())

In [8]:
#GTbbox_CROPpred_df = ['image_file','crop_index','crop_class','xmin','ymin','xmax','ymax',
#                      'NoF', 'ALB', 'BET', 'DOL', 'LAG', 'OTHER', 'SHARK', 'YFT', 'logloss']

file_name = 'GTbbox_CROPpred_df_'+test_model_name+'_.pickle'
if os.path.exists(OUTPUT_DIR+file_name):
    print ('Loading from file '+file_name)
    GTbbox_CROPpred_df = pd.read_pickle(OUTPUT_DIR+file_name)
else:
    print ('Generating file '+file_name) 
    nb_augmentation = 1
    if nb_augmentation ==1:
        test_preds = test_model.predict_generator(test_generator(df=GTbbox_df, mean=train_mean), 
                                                  val_samples=GTbbox_df.shape[0], nb_worker=1, pickle_safe=False)
    else:
        test_preds = np.zeros((GTbbox_df.shape[0], len(FISH_CLASSES)), dtype=K.floatx())
        for idx in range(nb_augmentation):
            print('{}th augmentation for testing ...'.format(idx+1))
            test_preds += test_model.predict_generator(test_generator(df=GTbbox_df, mean=train_mean, datagen=test_aug_datagen), 
                                                       val_samples=GTbbox_df.shape[0], nb_worker=1, pickle_safe=False)
        test_preds /= nb_augmentation

    CROPpred_df = pd.DataFrame(test_preds, columns=['ALB', 'BET', 'DOL', 'LAG', 'NoF', 'OTHER', 'SHARK', 'YFT'])
    GTbbox_CROPpred_df = pd.concat([GTbbox_df,CROPpred_df], axis=1)
    GTbbox_CROPpred_df['logloss'] = GTbbox_CROPpred_df.apply(lambda row: -math.log(row[row['crop_class']]), axis=1)
    GTbbox_CROPpred_df.to_pickle(OUTPUT_DIR+file_name) 

#logloss of every fish class
print(GTbbox_CROPpred_df.groupby(['crop_class'])['logloss'].mean())
print(GTbbox_CROPpred_df['logloss'].mean())


Loading from file GTbbox_CROPpred_df_weights.000-0.0327.hdf5_.pickle
crop_class
ALB      0.076577
BET      0.139025
DOL      0.126520
LAG      0.000761
NoF      0.051943
OTHER    0.133949
SHARK    0.018328
YFT      0.090739
Name: logloss, dtype: float64
0.05936252677814113

In [9]:
# RFCNbbox_RFCNpred_df = ['image_class','image_file','crop_index','xmin','ymin','xmax','ymax',
#                          'NoF_RFCN', 'ALB_RFCN', 'BET_RFCN', 'DOL_RFCN',
#                          'LAG_RFCN', 'OTHER_RFCN', 'SHARK_RFCN', 'YFT_RFCN']
# select fish_conf >= CONF_THRESH

file_name = 'RFCNbbox_RFCNpred_df_conf{:.2f}.pickle'.format(CONF_THRESH)
if os.path.exists(OUTPUT_DIR+file_name):
    print ('Loading from file '+file_name)
    RFCNbbox_RFCNpred_df = pd.read_pickle(OUTPUT_DIR+file_name)
else:
    print ('Generating file '+file_name)        
    RFCNbbox_RFCNpred_df = pd.DataFrame(columns=['image_class','image_file','crop_index','xmin','ymin','xmax','ymax',
                                                  'NoF_RFCN', 'ALB_RFCN', 'BET_RFCN', 'DOL_RFCN',
                                                  'LAG_RFCN', 'OTHER_RFCN', 'SHARK_RFCN', 'YFT_RFCN']) 

    with open('../data/RFCN_detections/detections_full_AGNOSTICnms_'+RFCN_MODEL+'.pkl','rb') as f:
        detections_full_AGNOSTICnms = pickle.load(f, encoding='latin1') 
    with open("../RFCN/ImageSets/Main/test.txt","r") as f:
        test_files = f.readlines()
    with open("../RFCN/ImageSets/Main/train_test.txt","r") as f:
        train_file_labels = f.readlines()
    assert len(detections_full_AGNOSTICnms) == len(test_files)
    
    count = np.zeros(len(detections_full_AGNOSTICnms))
    
    for im in range(len(detections_full_AGNOSTICnms)):
        if im%1000 == 0: print(im)
        basename = test_files[im][:9]
        if im<1000:
            image_class = '--'
        else:
            for i in range(len(train_file_labels)):
                if train_file_labels[i][:9] == basename:
                    image_class = train_file_labels[i][10:-1]
                    break
        image = Image.open(TEST_DIR+'/'+basename+'.jpg')
        width_image, height_image = image.size
        
        bboxes = []
        detects_im = detections_full_AGNOSTICnms[im]
        for i in range(len(detects_im)):
#             if np.sum(detects_im[i,5:]) >= CONF_THRESH:
            if np.max(detects_im[i,5:]) >= CONF_THRESH:
                bboxes.append(detects_im[i,:]) 
        count[im] = len(bboxes)
        if len(bboxes) == 0:
            ind = np.argmax(np.sum(detects_im[:,5:], axis=1))
            bboxes.append(detects_im[ind,:])
        bboxes = np.asarray(bboxes)

        for j in range(len(bboxes)):    
            bbox = bboxes[j]
            xmin = bbox[0]
            ymin = bbox[1]
            xmax = bbox[2]
            ymax = bbox[3]
            assert max(xmin,0)<min(xmax,width_image)
            assert max(ymin,0)<min(ymax,height_image)
            RFCNbbox_RFCNpred_df.loc[len(RFCNbbox_RFCNpred_df)]=[image_class,basename+'.jpg',j,max(xmin,0),max(ymin,0),
                                                                   min(xmax,width_image),min(ymax,height_image),
                                                                   bbox[4],bbox[5],bbox[6],bbox[7],bbox[8],bbox[9],bbox[10],bbox[11]]   
    
    RFCNbbox_RFCNpred_df.to_pickle(OUTPUT_DIR+file_name)


Loading from file RFCNbbox_RFCNpred_df_conf0.80.pickle

In [31]:
GTbbox_CROPpred_df.loc[GTbbox_CROPpred_df['crop_class']!='NoF']


Out[31]:
image_file crop_index crop_class xmin ymin xmax ymax ALB BET DOL LAG NoF OTHER SHARK YFT logloss
0 img_00003.jpg 0.0 ALB 377.000000 66.000000 730.000000 173.000000 9.999806e-01 1.153381e-06 6.197113e-10 1.895012e-10 3.613221e-06 1.272149e-05 5.510684e-09 2.063266e-06 1.943130e-05
1 img_00003.jpg 1.0 ALB 670.000000 95.000000 1008.000000 219.000000 9.999139e-01 3.154530e-08 1.504183e-09 1.004528e-09 1.135657e-06 8.434706e-05 8.872343e-12 5.066360e-07 8.607281e-05
2 img_00003.jpg 2.0 ALB 820.000000 328.000000 1123.000000 485.000000 9.999986e-01 3.259925e-07 2.411850e-12 2.287089e-07 1.287768e-07 6.981027e-07 5.469245e-12 6.689053e-09 1.430512e-06
3 img_00003.jpg 3.0 ALB 291.000000 122.000000 643.000000 407.000000 9.998715e-01 7.074304e-07 1.498078e-13 5.582359e-11 1.212325e-04 6.655146e-08 2.109827e-15 6.572765e-06 1.285159e-04
4 img_00010.jpg 0.0 ALB 651.000000 422.000000 746.000000 612.000000 9.999821e-01 1.612292e-05 1.606051e-09 8.399101e-09 5.414718e-07 1.055654e-06 8.472713e-09 1.611917e-07 1.788155e-05
5 img_00010.jpg 1.0 ALB 831.000000 305.000000 943.000000 418.000000 9.967868e-01 5.258065e-05 2.600038e-07 1.262084e-08 1.224622e-03 1.223431e-03 1.717334e-07 7.120565e-04 3.218340e-03
6 img_00012.jpg 0.0 ALB 471.000000 513.000000 627.000000 703.000000 9.999943e-01 3.739872e-08 1.850646e-06 1.132046e-08 1.897254e-06 6.289488e-09 9.704899e-11 1.929350e-06 5.722062e-06
7 img_00015.jpg 0.0 ALB 233.000000 341.000000 444.000000 435.000000 9.743453e-01 1.293397e-03 1.068411e-06 3.617434e-07 2.392042e-02 2.577601e-04 8.125739e-07 1.809537e-04 2.598955e-02
8 img_00019.jpg 0.0 ALB 155.000000 393.000000 293.000000 468.000000 9.727021e-01 9.626866e-10 6.829089e-13 5.678039e-09 2.318743e-02 6.181457e-08 3.705326e-10 4.110374e-03 2.767742e-02
9 img_00020.jpg 0.0 ALB 586.000000 537.000000 710.000000 719.000000 9.955834e-01 3.461484e-03 5.535413e-09 3.351136e-06 3.796670e-07 8.154724e-05 1.728447e-06 8.681710e-04 4.426427e-03
10 img_00020.jpg 1.0 ALB 690.000000 454.000000 847.000000 687.000000 9.998964e-01 2.346682e-05 2.276121e-12 6.263109e-10 5.679273e-08 7.904898e-05 3.889341e-11 1.027095e-06 1.035982e-04
11 img_00020.jpg 2.0 ALB 614.000000 377.000000 756.000000 484.000000 9.999816e-01 6.440972e-07 7.907461e-08 1.131726e-11 9.268691e-06 1.255067e-06 5.675534e-06 1.460077e-06 1.835840e-05
12 img_00020.jpg 3.0 ALB 724.000000 360.000000 816.000000 537.000000 9.999635e-01 7.285932e-06 1.114230e-08 3.857879e-09 1.466452e-05 1.255418e-05 8.424390e-09 2.041774e-06 3.647871e-05
13 img_00020.jpg 4.0 ALB 630.000000 327.000000 756.000000 438.000000 9.998150e-01 2.325187e-06 5.186432e-07 1.736279e-11 4.357657e-05 5.044311e-08 1.080510e-06 1.373883e-04 1.849703e-04
14 img_00029.jpg 0.0 ALB 607.000000 343.000000 706.000000 524.000000 9.999987e-01 3.594790e-07 1.605573e-08 6.174470e-09 2.296128e-07 5.097974e-07 3.114929e-11 2.182999e-07 1.311303e-06
15 img_00029.jpg 1.0 ALB 693.000000 331.000000 773.000000 485.000000 9.990757e-01 1.419920e-05 8.545166e-06 5.261857e-05 7.995616e-04 4.140537e-05 3.945559e-09 7.982659e-06 9.247763e-04
16 img_00032.jpg 0.0 ALB 520.000000 513.000000 627.000000 568.000000 9.148430e-01 1.440050e-04 6.734094e-06 2.674154e-07 8.218089e-02 1.380336e-06 6.068142e-08 2.823662e-03 8.900279e-02
17 img_00037.jpg 0.0 ALB 291.000000 278.000000 383.000000 436.000000 9.988926e-01 7.055594e-05 3.687090e-05 2.988373e-07 7.559397e-05 6.331778e-06 1.440459e-08 9.177037e-04 1.108008e-03
18 img_00038.jpg 0.0 ALB 745.000000 254.000000 831.000000 401.000000 9.678084e-01 3.162137e-02 7.209297e-08 6.797074e-07 4.583938e-04 2.350453e-05 1.698614e-05 7.062314e-05 3.272112e-02
19 img_00039.jpg 0.0 ALB 393.000000 115.000000 653.000000 220.000000 9.999824e-01 8.426422e-07 2.922295e-09 5.091283e-09 8.092207e-08 4.532214e-08 1.761962e-07 1.647602e-05 1.764313e-05
20 img_00041.jpg 0.0 ALB 774.000000 188.000000 1074.000000 372.000000 9.973723e-01 1.446992e-04 1.484089e-06 1.172362e-03 1.302605e-03 9.352129e-08 1.967828e-08 6.406744e-06 2.631189e-03
21 img_00041.jpg 1.0 ALB 562.000000 84.000000 890.000000 193.000000 9.963587e-01 1.568967e-03 1.909340e-08 9.761413e-08 2.041460e-03 2.864800e-06 8.028692e-08 2.767326e-05 3.647953e-03
22 img_00043.jpg 0.0 ALB 556.000000 373.000000 612.000000 520.000000 9.999943e-01 1.869347e-08 6.470538e-09 3.601910e-10 3.813006e-06 1.243006e-06 3.678304e-08 5.485905e-07 5.722062e-06
23 img_00045.jpg 0.0 ALB 725.000000 432.000000 838.000000 525.000000 9.997253e-01 1.044408e-06 4.624509e-11 1.380859e-12 2.997921e-08 9.887623e-07 3.264254e-11 2.726966e-04 2.747555e-04
24 img_00055.jpg 0.0 ALB 189.000000 382.000000 375.000000 469.000000 9.991695e-01 2.037858e-04 3.110706e-05 3.521669e-07 2.683227e-06 4.896912e-07 3.195880e-07 5.917859e-04 8.308762e-04
25 img_00057.jpg 0.0 ALB 456.000000 509.000000 550.000000 666.000000 9.803254e-01 5.703667e-05 8.106243e-05 6.375744e-08 4.867250e-03 1.483713e-04 7.475342e-06 1.451331e-02 1.987072e-02
26 img_00074.jpg 0.0 ALB 415.000000 142.000000 594.000000 225.000000 9.999074e-01 8.572769e-06 5.425484e-05 2.907593e-09 8.140572e-06 3.617875e-07 8.698826e-07 2.055169e-05 9.262991e-05
27 img_00085.jpg 0.0 ALB 528.000000 145.000000 904.000000 277.000000 9.999944e-01 4.824967e-06 2.861454e-13 1.500103e-10 5.636679e-09 1.107719e-07 8.330427e-12 7.240401e-07 5.602852e-06
28 img_00090.jpg 0.0 ALB 373.000000 69.000000 736.000000 176.000000 9.999552e-01 1.398818e-07 3.503078e-11 5.925299e-11 1.950427e-07 4.438670e-05 8.072463e-10 4.472725e-08 4.482370e-05
29 img_00090.jpg 1.0 ALB 664.000000 90.000000 1011.000000 217.000000 9.990360e-01 3.220962e-08 1.046357e-09 4.747873e-10 9.323478e-06 9.547356e-04 2.068044e-11 1.235434e-08 9.645105e-04
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
4341 img_07624.jpg 0.0 YFT 40.425532 313.475177 427.659574 521.276596 2.164462e-07 8.217400e-08 1.176050e-10 9.168304e-13 3.708680e-08 7.436453e-13 8.649925e-11 9.999996e-01 3.576279e-07
4342 img_07633.jpg 0.0 YFT 449.645390 404.255319 776.595745 734.042553 1.944061e-07 1.345823e-10 2.600791e-09 1.223723e-13 5.715361e-04 1.499527e-11 7.718879e-11 9.994282e-01 5.719509e-04
4343 img_07644.jpg 0.0 YFT 31.205674 284.397163 441.134752 561.702128 4.146482e-08 7.857637e-14 2.391757e-12 4.072475e-16 1.115367e-05 6.872426e-15 5.370510e-13 9.999888e-01 1.120574e-05
4344 img_07648.jpg 0.0 YFT 496.453901 458.865248 863.829787 715.602837 1.056789e-06 8.189900e-12 2.191024e-08 3.227101e-14 2.480658e-01 4.621349e-10 2.824466e-11 7.519331e-01 2.851079e-01
4345 img_07649.jpg 0.0 YFT 395.744681 338.297872 539.007092 399.290780 9.800946e-01 2.885848e-05 6.444725e-10 2.656619e-10 5.918504e-06 4.666089e-10 2.298762e-10 1.987064e-02 3.918512e+00
4346 img_07653.jpg 0.0 YFT 392.907801 339.716312 492.907801 527.659574 2.288577e-04 5.256919e-09 1.510330e-12 2.889810e-13 9.491819e-09 3.025020e-09 1.956473e-10 9.997712e-01 2.288484e-04
4347 img_07655.jpg 0.0 YFT 355.319149 412.765957 508.510638 492.198582 1.258860e-02 9.553523e-07 2.006075e-09 8.764372e-11 5.649769e-07 1.484699e-08 1.848887e-08 9.874098e-01 1.267016e-02
4348 img_07665.jpg 0.0 YFT 386.524823 397.163121 615.602837 529.787234 7.730698e-08 9.188389e-07 7.162744e-13 4.260208e-13 1.254958e-10 1.479765e-11 2.911281e-11 9.999990e-01 9.536748e-07
4349 img_07706.jpg 0.0 YFT 739.716312 119.858156 1107.092199 265.957447 6.675404e-03 5.656304e-03 7.849219e-04 5.090093e-06 1.270020e-03 2.034237e-06 4.498025e-07 9.856058e-01 1.449883e-02
4350 img_07712.jpg 0.0 YFT 384.397163 395.035461 621.985816 527.659574 9.987452e-08 6.990318e-07 3.480624e-13 1.654098e-13 5.825423e-10 2.003397e-11 4.399122e-10 9.999992e-01 8.344654e-07
4351 img_07714.jpg 0.0 YFT 518.439716 499.290780 965.957447 751.063830 4.431536e-04 5.329949e-03 5.520845e-04 3.762749e-08 1.303821e-02 1.918395e-06 1.929933e-06 9.806327e-01 1.955728e-02
4352 img_07731.jpg 0.0 YFT 0.709220 285.106383 407.092199 492.198582 2.290689e-05 1.586421e-05 9.612826e-06 8.063775e-10 2.111648e-04 6.028992e-10 5.919560e-09 9.997404e-01 2.596119e-04
4353 img_07742.jpg 0.0 YFT 392.907801 119.858156 604.964539 216.312057 7.540400e-04 5.190315e-07 1.048923e-06 2.994529e-12 5.577776e-07 3.216247e-08 4.288129e-09 9.992439e-01 7.564305e-04
4354 img_07747.jpg 0.0 YFT 396.453901 36.170213 573.758865 139.007092 4.107560e-02 1.682030e-06 1.991541e-06 3.832559e-10 7.867641e-05 2.695598e-08 5.084877e-09 9.588420e-01 4.202893e-02
4355 img_07750.jpg 0.0 YFT 382.269504 400.709220 618.439716 525.531915 1.154416e-07 3.628240e-07 7.613718e-13 1.997993e-13 3.715605e-11 6.477192e-12 5.375092e-11 9.999995e-01 4.768373e-07
4356 img_07752.jpg 0.0 YFT 516.312057 100.709220 1063.120567 470.921986 2.969756e-06 3.421748e-11 4.237403e-10 1.173359e-13 2.443959e-05 4.258927e-12 1.328220e-07 9.999725e-01 2.753773e-05
4357 img_07759.jpg 0.0 YFT 372.340426 34.751773 563.829787 142.553191 7.952327e-04 3.425041e-08 2.242637e-06 1.119358e-11 6.487022e-06 2.139202e-07 9.518618e-09 9.991958e-01 8.045691e-04
4358 img_07761.jpg 0.0 YFT 378.723404 327.659574 623.404255 600.000000 2.354784e-02 1.261270e-04 2.567733e-06 2.596141e-08 3.898747e-02 1.521842e-03 5.412410e-03 9.304017e-01 7.213880e-02
4359 img_07765.jpg 0.0 YFT 240.425532 397.163121 436.170213 506.382979 1.646531e-04 7.871562e-07 1.974605e-12 9.064095e-11 1.574225e-05 5.046791e-09 5.182184e-09 9.998189e-01 1.811549e-04
4360 img_07775.jpg 0.0 YFT 358.865248 383.687943 544.680851 507.801418 1.533139e-04 6.141526e-09 1.837603e-12 1.696163e-12 2.080864e-08 9.277617e-08 1.598433e-08 9.998466e-01 1.534341e-04
4361 img_07782.jpg 0.0 YFT 719.148936 495.744681 1050.354610 770.212766 3.887371e-04 2.830485e-04 9.643040e-07 5.700894e-10 6.147433e-05 5.678395e-07 2.822596e-07 9.992649e-01 7.353744e-04
4362 img_07828.jpg 0.0 YFT 545.390071 437.588652 944.680851 599.290780 4.730524e-05 4.999148e-08 2.016533e-07 3.433206e-13 4.449790e-07 5.377348e-11 6.311234e-10 9.999520e-01 4.804250e-05
4363 img_07849.jpg 0.0 YFT 409.219858 329.787234 502.127660 533.333333 5.100721e-02 2.538231e-06 3.884194e-10 2.187348e-09 3.148336e-07 8.814292e-06 3.255179e-06 9.489779e-01 5.236972e-02
4364 img_07852.jpg 0.0 YFT 246.808511 399.290780 431.914894 506.382979 2.338640e-01 5.934151e-06 1.941234e-10 2.950447e-09 1.761298e-04 7.111951e-09 1.075934e-08 7.659540e-01 2.666332e-01
4365 img_07853.jpg 0.0 YFT 367.375887 420.567376 534.751773 507.092199 4.134394e-03 5.552590e-06 3.968423e-10 6.416629e-10 1.457083e-07 2.094415e-08 5.564397e-11 9.958599e-01 4.148673e-03
4366 img_07854.jpg 0.0 YFT 297.163121 402.836879 504.255319 531.205674 9.831114e-03 1.452477e-05 4.623424e-09 4.330351e-09 1.166227e-06 6.302580e-06 1.310425e-07 9.901467e-01 9.902168e-03
4367 img_07891.jpg 0.0 YFT 403.546099 504.964539 1150.354610 743.971631 2.132043e-02 5.357747e-04 1.069194e-08 6.377197e-08 1.312335e-03 3.185128e-07 9.797323e-06 9.768212e-01 2.345167e-02
4368 img_07901.jpg 0.0 YFT 104.255319 348.936170 414.184397 512.765957 2.305388e-03 1.219822e-08 8.169907e-08 1.499224e-10 4.947213e-04 7.767779e-10 1.084323e-09 9.971998e-01 2.804154e-03
4369 img_07911.jpg 0.0 YFT 190.780142 53.900709 562.411348 219.858156 5.444570e-04 1.197029e-09 2.904432e-11 1.601409e-07 3.330995e-04 2.670902e-10 2.680843e-11 9.991223e-01 8.780638e-04
4370 img_07911.jpg 1.0 YFT 756.028369 112.765957 1136.879433 329.787234 5.873860e-03 4.908578e-06 1.231487e-07 4.685965e-06 1.182474e-03 1.162095e-06 1.040281e-07 9.929327e-01 7.092415e-03

4371 rows × 16 columns

file_name = 'data_test_Crop_{}_{}.pickle'.format(ROWS, COLS) if os.path.exists(OUTPUT_DIR+file_name): print ('Loading from file '+file_name) with open(OUTPUT_DIR+file_name, 'rb') as f: data_test = pickle.load(f) X_test_crop = data_train['X_test_crop'] else: print ('Generating file '+file_name) X_test_crop = np.ndarray((RFCNbbox_RFCNpred_df.shape[0], ROWS, COLS, 3), dtype=np.uint8) i = 0 for index,row in RFCNbbox_RFCNpred_df.iterrows(): image_file = row['image_file'] bbox = [row['xmin'],row['ymin'],row['xmax'],row['ymax']] cropped = load_img(TEST_DIR+image_file,bbox,target_size=(ROWS,COLS)) X_test_crop[i] = np.asarray(cropped) i += 1 #save data to file data_test = {'X_test_crop': X_test_crop} with open(OUTPUT_DIR+file_name, 'wb') as f: pickle.dump(data_test, f) print('Loading data done.') X_test_crop = X_test_crop.astype(np.float32) print('Convert to float32 done.') X_test_crop /= 255. print('Rescale by 255 done.')

In [32]:
file_name = 'data_trainfish_Crop_{}_{}.pickle'.format(ROWS, COLS)
if os.path.exists(OUTPUT_DIR+file_name):
    print ('Loading from file '+file_name)
    with open(OUTPUT_DIR+file_name, 'rb') as f:
        data_trainfish = pickle.load(f)
    X_trainfish_crop = data_train['X_trainfish_crop']
else:
    print ('Generating file '+file_name)

    GTbbox_CROPpred_fish_df = GTbbox_CROPpred_df.loc[GTbbox_CROPpred_df['crop_class']!='NoF']
    X_trainfish_crop = np.ndarray((GTbbox_CROPpred_fish_df.shape[0], ROWS, COLS, 3), dtype=np.uint8)
    i = 0
    for index,row in GTbbox_CROPpred_fish_df.iterrows():
        image_file = row['image_file']
        bbox = [row['xmin'],row['ymin'],row['xmax'],row['ymax']]
        cropped = load_img(TEST_DIR+image_file,bbox,target_size=(ROWS,COLS))
        X_trainfish_crop[i] = np.asarray(cropped)
        i += 1
   
    #save data to file
    data_trainfish = {'X_trainfish_crop': X_trainfish_crop}
    with open(OUTPUT_DIR+file_name, 'wb') as f:
        pickle.dump(data_trainfish, f)
        
print('Loading data done.')
X_trainfish_crop = X_trainfish_crop.astype(np.float32)
print('Convert to float32 done.')
X_trainfish_crop /= 255.
print('Rescale by 255 done.')


Generating file data_trainfish_Crop_224_224.pickle
Loading data done.
Convert to float32 done.
Rescale by 255 done.

In [33]:
mean(X_trainfish_crop)


Out[33]:
array([ 0.40706199,  0.4373979 ,  0.39489502], dtype=float32)

In [28]:
mean(X_test_crop[1251:])


Out[28]:
array([ 0.41078389,  0.43895897,  0.39912957], dtype=float32)

In [35]:
# test_mean = [0.41019869,  0.43978861,  0.39873621]
test_mean = [0.37698776,  0.41491762,  0.38681713]

In [55]:
# RFCNbbox_RFCNpred_CROPpred_HYBRIDpred_df = ['image_class', 'image_file','crop_index','xmin','ymin','xmax','ymax',
#                                    'NoF_RFCN', 'ALB_RFCN', 'BET_RFCN', 'DOL_RFCN',
#                                    'LAG_RFCN', 'OTHER_RFCN', 'SHARK_RFCN', 'YFT_RFCN',
#                                    'NoF_CROP', 'ALB_CROP', 'BET_CROP', 'DOL_CROP',
#                                    'LAG_CROP', 'OTHER_CROP', 'SHARK_CROP', 'YFT_CROP',
#                                    'NoF', 'ALB', 'BET', 'DOL', 'LAG', 'OTHER', 'SHARK', 'YFT']

file_name = 'RFCNbbox_RFCNpred_CROPpred_HYBRIDpred_df_'+test_model_name+'_.pickle'
if os.path.exists(OUTPUT_DIR+file_name):
    print ('Loading from file '+file_name)
    RFCNbbox_RFCNpred_CROPpred_HYBRIDpred_df = pd.read_pickle(OUTPUT_DIR+file_name)
else:
    print ('Generating file '+file_name)  
    nb_augmentation = 1
    if nb_augmentation ==1:
        test_preds = test_model.predict_generator(test_generator(df=RFCNbbox_RFCNpred_df, mean=test_mean), 
                                                  val_samples=RFCNbbox_RFCNpred_df.shape[0], nb_worker=1, pickle_safe=False)
    else:
        test_preds = np.zeros((RFCNbbox_RFCNpred_df.shape[0], len(FISH_CLASSES)), dtype=K.floatx())
        for idx in range(nb_augmentation):
            print('{}th augmentation for testing ...'.format(idx+1))
            test_preds += test_model.predict_generator(test_generator(df=RFCNbbox_RFCNpred_df, mean=test_mean, datagen=test_aug_datagen), 
                                                       val_samples=RFCNbbox_RFCNpred_df.shape[0], nb_worker=1, pickle_safe=False)
        test_preds /= nb_augmentation

    CROPpred_df = pd.DataFrame(test_preds, columns=['ALB_CROP', 'BET_CROP', 'DOL_CROP', 'LAG_CROP', 'NoF_CROP', 'OTHER_CROP', 'SHARK_CROP', 'YFT_CROP'])
    RFCNbbox_RFCNpred_CROPpred_HYBRIDpred_df = pd.concat([RFCNbbox_RFCNpred_df,CROPpred_df], axis=1)
    
    RFCNbbox_RFCNpred_CROPpred_HYBRIDpred_df['NoF'] = RFCNbbox_RFCNpred_CROPpred_HYBRIDpred_df['NoF_RFCN']
    for fish in ['ALB', 'BET', 'DOL', 'LAG', 'OTHER', 'SHARK', 'YFT']:
        RFCNbbox_RFCNpred_CROPpred_HYBRIDpred_df[fish] = RFCNbbox_RFCNpred_CROPpred_HYBRIDpred_df.apply(lambda row: (1-row['NoF_RFCN'])*row[[fish+'_CROP']]/(1-row['NoF_CROP']) if row['NoF_CROP']!=1 else 0, axis=1)
#     for fish in FISH_CLASSES:
#         RFCNbbox_RFCNpred_CROPpred_HYBRIDpred_df[fish] = RFCNbbox_RFCNpred_CROPpred_HYBRIDpred_df[fish+'_CROP']

    RFCNbbox_RFCNpred_CROPpred_HYBRIDpred_df.to_pickle(OUTPUT_DIR+file_name)


Generating file RFCNbbox_RFCNpred_CROPpred_HYBRIDpred_df_weights.000-0.0327.hdf5_.pickle
batch_index 0

In [56]:
# clsMaxAve and hybrid RFCNpred&CROPpred such that RFCNpred for NoF and CROPpred for fish
# test_pred_df = ['logloss','image_class','image_file','NoF', 'ALB', 'BET', 'DOL', 'LAG', 'OTHER', 'SHARK', 'YFT']
# RFCNbbox_RFCNpred_CROPpred_HYBRIDpred_df = ['image_class', 'image_file','crop_index','xmin','ymin','xmax','ymax',
#                                    'NoF_RFCN', 'ALB_RFCN', 'BET_RFCN', 'DOL_RFCN',
#                                    'LAG_RFCN', 'OTHER_RFCN', 'SHARK_RFCN', 'YFT_RFCN',
#                                    'ALB_CROP', 'BET_CROP', 'DOL_CROP',
#                                    'LAG_CROP', 'OTHER_CROP', 'SHARK_CROP', 'YFT_CROP',
#                                    'NoF', 'ALB', 'BET', 'DOL', 'LAG', 'OTHER', 'SHARK', 'YFT']

file_name = 'test_pred_df_Hybrid_'+test_model_name+'_.pickle'
if os.path.exists(OUTPUT_DIR+file_name):
    print ('Loading from file '+file_name)
    test_pred_df = pd.read_pickle(OUTPUT_DIR+file_name)
else:
    print ('Generating file '+file_name)  
    with open("../RFCN/ImageSets/Main/test.txt","r") as f:
        test_files = f.readlines()
    
    test_pred_df = pd.DataFrame(columns=['logloss','image_class','image_file','NoF', 'ALB', 'BET', 'DOL', 'LAG', 'OTHER', 'SHARK', 'YFT'])  
    for j in range(len(test_files)): 
        image_file = test_files[j][:-1]+'.jpg'
        test_pred_im_df = RFCNbbox_RFCNpred_CROPpred_HYBRIDpred_df.loc[RFCNbbox_RFCNpred_CROPpred_HYBRIDpred_df['image_file'] == image_file,
                                                                       ['image_class', 'NoF', 'ALB', 'BET', 'DOL', 'LAG', 'OTHER', 'SHARK', 'YFT']]
        image_class = test_pred_im_df.iloc[0]['image_class']
        test_pred_im_df.drop('image_class', axis=1, inplace=True)
        max_score = test_pred_im_df.max(axis=1)
        max_cls = test_pred_im_df.idxmax(axis=1)
        test_pred_im_df['max_score'] = max_score
        test_pred_im_df['max_cls'] = max_cls
        test_pred_im_df['Count'] = test_pred_im_df.groupby(['max_cls'])['max_cls'].transform('count')
        idx = test_pred_im_df.groupby(['max_cls'])['max_score'].transform(max) == test_pred_im_df['max_score']
        test_pred_im_clsMax_df = test_pred_im_df.loc[idx,['NoF', 'ALB', 'BET', 'DOL', 'LAG', 'OTHER', 'SHARK', 'YFT', 'Count']]
        test_pred_im_clsMax_array = test_pred_im_clsMax_df.values
        pred = np.average(test_pred_im_clsMax_array[:,:-1], axis=0, weights=test_pred_im_clsMax_array[:,-1], returned=False).tolist()
        if image_class!='--':
            ind = FISH_CLASSES.index(image_class)
            logloss = -math.log(pred[ind]) 
        else:
            logloss = np.nan
        test_pred_im_clsMaxAve = [logloss,image_class,image_file]
        test_pred_im_clsMaxAve.extend(pred)
        test_pred_df.loc[len(test_pred_df)]=test_pred_im_clsMaxAve

    test_pred_df.to_pickle(OUTPUT_DIR+file_name)


Generating file test_pred_df_Hybrid_weights.000-0.0327.hdf5_.pickle

In [ ]:
#### visualization
# RFCNbbox_RFCNpred_CROPpred_df = ['image_class', 'image_file','crop_index','x_min','y_min','x_max','ymax',
#                                    'NoF_RFCN', 'ALB_RFCN', 'BET_RFCN', 'DOL_RFCN',
#                                    'LAG_RFCN', 'OTHER_RFCN', 'SHARK_RFCN', 'YFT_RFCN'
#                                    'NoF_CROP', 'ALB_CROP', 'BET_CROP', 'DOL_CROP',
#                                    'LAG_CROP', 'OTHER_CROP', 'SHARK_CROP', 'YFT_CROP']
#GTbbox_CROPpred_df = ['image_file','crop_index','crop_class','xmin','ymin','xmax','ymax',
#                      'NoF', 'ALB', 'BET', 'DOL', 'LAG', 'OTHER', 'SHARK', 'YFT', 'logloss']
# test_pred_df = ['logloss','image_class','image_file','NoF', 'ALB', 'BET', 'DOL', 'LAG', 'OTHER', 'SHARK', 'YFT']

for j in range(test_pred_df.shape[0]):
    image_logloss = test_pred_df.iat[j,0]
    image_class = test_pred_df.iat[j,1]
    image_file = test_pred_df.iat[j,2]
    if j<1000 and j%30== 0:
        pass
    else: 
        continue
    im = Image.open('../RFCN/JPEGImages/'+image_file)
    im = np.asarray(im)
    fig, ax = plt.subplots(figsize=(10, 8))
    ax.imshow(im, aspect='equal')
    RFCN_dets = RFCNbbox_RFCNpred_CROPpred_HYBRIDpred_df.loc[RFCNbbox_RFCNpred_CROPpred_HYBRIDpred_df['image_file']==image_file]
    for index,row in RFCN_dets.iterrows():
        bbox = [row['xmin'],row['ymin'],row['xmax'],row['ymax']]
        RFCN = [row['NoF_RFCN'],row['ALB_RFCN'],row['BET_RFCN'],row['DOL_RFCN'],row['LAG_RFCN'],row['OTHER_RFCN'],row['SHARK_RFCN'],row['YFT_RFCN']]
        CROP = [row['NoF'],row['ALB'],row['BET'],row['DOL'],row['LAG'],row['OTHER'],row['SHARK'],row['YFT']]
        score_RFCN = max(RFCN)
        score_CROP = max(CROP)
        index_RFCN = RFCN.index(score_RFCN)
        index_CROP = CROP.index(score_CROP)
        class_RFCN = FISH_CLASSES[index_RFCN]
        class_CROP = FISH_CLASSES[index_CROP]
        ax.add_patch(plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor='red', linewidth=2))
        ax.text(bbox[0], bbox[1] - 2, 'RFCN_{:s} {:.3f} \nHYBRID_{:s} {:.3f}'.format(class_RFCN, score_RFCN, class_CROP, score_CROP), bbox=dict(facecolor='red', alpha=0.5), fontsize=8, color='white')       
    GT_dets = GTbbox_CROPpred_df.loc[GTbbox_CROPpred_df['image_file']==image_file]
    for index,row in GT_dets.iterrows():
        bbox = [row['xmin'],row['ymin'],row['xmax'],row['ymax']]
        CROP = [row['NoF'],row['ALB'],row['BET'],row['DOL'],row['LAG'],row['OTHER'],row['SHARK'],row['YFT']]
        score_CROP = max(CROP)
        index_CROP = CROP.index(score_CROP)
        class_CROP = FISH_CLASSES[index_CROP]
        ax.add_patch(plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor='green', linewidth=2))
        ax.text(bbox[0], bbox[3] + 40, 'GT_{:s} \nCROP_{:s} {:.3f}'.format(row['crop_class'], class_CROP, score_CROP), bbox=dict(facecolor='green', alpha=0.5), fontsize=8, color='white')
    ax.set_title(('Image {:s}    FISH {:s}    logloss {}').format(image_file, image_class, image_logloss), fontsize=10) 
    plt.axis('off')
    plt.tight_layout()
    plt.draw()

In [58]:
#temperature
T = 1
test_pred_array = test_pred_df[FISH_CLASSES].values
test_pred_T_array = np.exp(np.log(test_pred_array)/T)
test_pred_T_array = test_pred_T_array/np.sum(test_pred_T_array, axis=1, keepdims=True)
test_pred_T_df = pd.DataFrame(test_pred_T_array, columns=FISH_CLASSES)
test_pred_T_df = pd.concat([test_pred_df[['image_class','image_file']],test_pred_T_df], axis=1)

#add logloss
test_pred_T_df['logloss'] = test_pred_T_df.apply(lambda row: -math.log(row[row['image_class']]) if row['image_class']!='--' else np.nan, axis=1)

#calculate train logloss
print(test_pred_T_df.groupby(['image_class'])['logloss'].mean())
train_logloss = test_pred_T_df['logloss'].mean()
print('logloss of train is', train_logloss )


image_class
--            NaN
ALB      0.043386
BET      0.234966
DOL      0.140754
LAG      0.000835
NoF      0.126442
OTHER    0.105386
SHARK    0.147953
YFT      0.080953
Name: logloss, dtype: float64
logloss of train is 0.08309862497761385

In [62]:
#test submission
submission = test_pred_T_df.loc[:999,['image_file','NoF', 'ALB', 'BET', 'DOL', 'LAG', 'OTHER', 'SHARK', 'YFT']]
submission.rename(columns={'image_file':'image'}, inplace=True)
sub_file = 'RFCN_AGONOSTICnms_'+RFCN_MODEL+'_'+CROP_MODEL+'_clsMaxAve_conf{:.2f}_T{}_'.format(CONF_THRESH, T)+'{:.4f}'.format(train_logloss)+'.csv'
submission.to_csv(sub_file, index=False)
print('Done!'+sub_file)


Done!RFCN_AGONOSTICnms_resnet101_rfcn_ohem_iter_30000_resnet50_FT38_Classifier_Rep_clsMaxAve_conf0.80_T2.5_0.1780.csv