In [1]:
import os, random, glob, pickle, collections
import numpy as np
import pandas as pd
import ujson as json
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.metrics import log_loss
from sklearn.preprocessing import LabelEncoder

import matplotlib.pyplot as plt
from matplotlib import ticker
import seaborn as sns
%matplotlib inline 

from keras.models import Sequential, Model, load_model, model_from_json
from keras.layers import GlobalAveragePooling2D, Flatten, Dropout, Dense
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
from keras import backend as K
K.set_image_dim_ordering('tf')

from skimage.data import imread
from skimage.io import imshow,imsave
import cv2
from skimage.util import crop
from skimage.transform import rotate
from skimage.transform import resize
import math


Using TensorFlow backend.

In [2]:
TRAIN_DIR = '../data/train/'
TEST_DIR = '../RFCN/JPEGImages/'
TRAIN_CROP_DIR = '../data/train_crop/'
TEST_CROP_DIR = '../data/test_stg1_crop/'
CHECKPOINT_DIR = './checkpoints/checkpoint3/'
FISH_CLASSES = ['ALB', 'BET', 'DOL', 'LAG', 'NoF', 'OTHER', 'SHARK', 'YFT']
CONF_THRESH = 0.8
ROWS = 224
COLS = 224
BatchSize = 128
LearningRate = 1e-4
le = LabelEncoder()
le.fit(FISH_CLASSES)
print(le.transform(FISH_CLASSES))
def featurewise_center(x):
    mean = np.mean(x, axis=0, keepdims=True)
    mean = np.mean(mean, axis=(1,2), keepdims=True)
    x_centered = x - mean
    return x_centered


[0 1 2 3 4 5 6 7]

In [31]:
#crop and cache to TRAIN_CROP_DIR by BBannotations
if not os.path.exists(TRAIN_CROP_DIR):
    os.mkdir(TRAIN_CROP_DIR)

for c in FISH_CLASSES:
    TRAIN_CROP_DIR_c = TRAIN_CROP_DIR + '{}/'.format(c)
    if not os.path.exists(TRAIN_CROP_DIR_c):
        os.mkdir(TRAIN_CROP_DIR_c)
    files = glob.glob(TRAIN_CROP_DIR_c+'*')
    for f in files:
        os.remove(f)
        
GT_crop_bboxs_df = pd.DataFrame(columns=['GT_crop_files', 'xmin', 'ymin', 'xmax', 'ymax'])  

crop_classes=FISH_CLASSES[:]
crop_classes.remove('NoF')
count = {}

for c in crop_classes:
    j = json.load(open('../data/BBannotations/{}.json'.format(c), 'r'))
    for l in j: 
        filename = l["filename"]
        head, tail = os.path.split(filename)
        basename, file_extension = os.path.splitext(tail) 
        image = Image.open(TRAIN_DIR+c+'/'+tail)
        for i in range(len(l["annotations"])):
            a = l["annotations"][i]
            file_crop = TRAIN_CROP_DIR + '{}/'.format(a["class"])+c+'_'+basename+'_{}_'.format(i)+a["class"]+'.jpg'
            xmin = (a["x"])
            ymin = (a["y"])
            width = (a["width"])
            height = (a["height"])
            xmax = xmin + width
            ymax = ymin + height
            #save cropped img
            cropped = image.crop((max(xmin,0), max(ymin,0), xmax, ymax))
            width_cropped, height_cropped = cropped.size
            if height_cropped > width_cropped: cropped = cropped.transpose(method=2)
            cropped.save(file_crop)
            if a["class"] != c: print(file_crop)
            GT_crop_bboxs_df.loc[len(GT_crop_bboxs_df)]=[file_crop.split('/')[-1],max(xmin,0),max(ymin,0),xmax,ymax]
    count[c] = len(os.listdir(TRAIN_CROP_DIR+c))

num_NoF = sum(count.values())*3

#crop and cache to TRAIN_CROP_DIR/NoF by RFCN
#crop images by detections_full_AGNOSTICnms.pkl

RFCN_MODEL = 'resnet101_rfcn_ohem_iter_30000'

with open('../data/RFCN_detections/detections_full_AGNOSTICnms_'+RFCN_MODEL+'.pkl','rb') as f:
    detections_full_AGNOSTICnms = pickle.load(f, encoding='latin1') 

train_detections_full_AGNOSTICnms = detections_full_AGNOSTICnms[1000:]
num_NoF_perIm = math.ceil(num_NoF / len(train_detections_full_AGNOSTICnms))

outputs = []
for im in range(len(train_detections_full_AGNOSTICnms)):
#for im in range(1):
    outputs_im = []
    detects_im = train_detections_full_AGNOSTICnms[im]
    for i in range(len(detects_im)):
        if detects_im[i,4] >= 0.999:
            outputs_im.append(detects_im[i,:]) 
    outputs_im = np.asarray(outputs_im)
    outputs_im = outputs_im[np.random.choice(outputs_im.shape[0], num_NoF_perIm, replace=False), :]
    outputs.append(outputs_im)
train_outputs = outputs

with open("../RFCN/ImageSets/Main/train_test.txt","r") as f:
    train_files = f.readlines()

for i in range(len(train_outputs)):
    basename = train_files[i][:9]
    bboxes = train_outputs[i] 
    image = Image.open(TEST_DIR+basename+'.jpg')
    for j in range(len(bboxes)):
        bbox = bboxes[j]
        xmin = bbox[0]
        ymin = bbox[1]
        xmax = bbox[2]
        ymax = bbox[3]
        file_crop = TRAIN_CROP_DIR+'NoF/'+train_files[i][10:-1]+'_'+basename+'_{}_NoF'.format(j)+'.jpg'
        cropped = image.crop((xmin, ymin, xmax, ymax))
        width_cropped, height_cropped = cropped.size
        if height_cropped > width_cropped: cropped = cropped.transpose(method=2)
        cropped.save(file_crop)
        GT_crop_bboxs_df.loc[len(GT_crop_bboxs_df)]=[file_crop.split('/')[-1],xmin,ymin,xmax,ymax]

GT_crop_bboxs_df.to_pickle('../data/train_crop/GT_crop_files_BBox.pickle') 
    
count['NoF'] = len(os.listdir(TRAIN_CROP_DIR+'NoF'))
print(count)


../data/train_crop/LAG/ALB_img_01800_6_LAG.jpg
../data/train_crop/LAG/ALB_img_03397_1_LAG.jpg
../data/train_crop/LAG/ALB_img_03451_1_LAG.jpg
../data/train_crop/LAG/ALB_img_03748_4_LAG.jpg
../data/train_crop/ALB/DOL_img_07212_1_ALB.jpg
../data/train_crop/ALB/DOL_img_07212_2_ALB.jpg
../data/train_crop/LAG/SHARK_img_06082_0_LAG.jpg
../data/train_crop/ALB/SHARK_img_06082_1_ALB.jpg
../data/train_crop/ALB/SHARK_img_06082_2_ALB.jpg
{'ALB': 2509, 'BET': 306, 'DOL': 126, 'NoF': 15108, 'YFT': 799, 'SHARK': 189, 'LAG': 104, 'OTHER': 333}
#visualize FISH_CLASSES = ['NoF', 'ALB', 'BET', 'DOL', 'LAG', 'OTHER', 'SHARK', 'YFT'] with open("../RFCN/ImageSets/Main/test.txt","r") as f: ims = f.readlines() train_files = [im[:-1]+'.jpg' for im in ims][1000:] for j in range(10): dets = train_outputs[j] im = Image.open("../RFCN/JPEGImages/"+train_files[j]) im = np.asarray(im) fig, ax = plt.subplots(figsize=(8, 5)) ax.imshow(im, aspect='equal') for i in range(dets.shape[0]): bbox = dets[i, :4] score = np.amax(dets[i,4:]) index = np.argmax(dets[i,4:]) class_name = FISH_CLASSES[index] ax.add_patch(plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor='red', linewidth=3.5)) ax.text(bbox[0], bbox[1] - 2, '{:s} {:.3f}'.format(class_name, score), bbox=dict(facecolor='blue', alpha=0.5), fontsize=14, color='white') ax.set_title(('Image {} detections with ' 'p({} | box) >= {:.1f}').format(j, class_name, CONF_THRESH),fontsize=14) plt.axis('off') plt.tight_layout() plt.draw()

In [21]:
#visualize
ALB_images = os.listdir(TRAIN_CROP_DIR+'ALB')
for j in range(1000,1010):
    im = Image.open(TRAIN_CROP_DIR+'ALB/'+ALB_images[j])
    im = np.asarray(im)
    fig, ax = plt.subplots()
    ax.imshow(im, aspect='equal')
    ax.set_title(('{:s}').format(ALB_images[j]),fontsize=14)
    plt.axis('off')
    plt.tight_layout()
    plt.draw()


#visualize im = Image.open('../data/train_crop/ALB/SHARK_img_06082_2_ALB.jpg') imshow(np.asarray(im)) im_sizes = [] for c in crop_classes: TRAIN_CROP_DIR_c = TRAIN_CROP_DIR + '{}/'.format(c) files = glob.glob(TRAIN_CROP_DIR_c+'*') for file in files: im = Image.open(file) #size = (width, height) size = im.size im_sizes.append(size) im_sizes = np.asarray(im_sizes) len(im_sizes) np.mean(im_sizes[:,1]/im_sizes[:,0]) plt.hist(im_sizes[:,1]/im_sizes[:,0], bins=10) plt.scatter(im_sizes[:,0],im_sizes[:,1])

In [3]:
#Loading data
import pickle

def read_image(src):
    """Read and resize individual images"""
    im = Image.open(src)
    im = im.resize((COLS, ROWS), Image.BILINEAR)
    im = np.asarray(im)
    return im

if os.path.exists('../data/data_train_BBCrop_{}_{}.pickle'.format(ROWS, COLS)):
    print ('Exist data_train_BBCrop_{}_{}.pickle. Loading data from file.'.format(ROWS, COLS))
    with open('../data/data_train_BBCrop_{}_{}.pickle'.format(ROWS, COLS), 'rb') as f:
        data_train = pickle.load(f)
    X_train_crop = data_train['X_train_crop']
    y_train_crop = data_train['y_train_crop']
    train_crop_files = data_train['train_crop_files']
    class_weight = data_train['class_weight']
else:
    print ('Loading data from original images. Generating data_train_BBCrop_{}_{}.pickle.'.format(ROWS, COLS))
    
    y_train_crop = []
    train_crop_files = []

    for fish in FISH_CLASSES:
        fish_dir = TRAIN_CROP_DIR+'{}'.format(fish)
        fish_files = [fish+'/'+im for im in os.listdir(fish_dir)]
        train_crop_files.extend(fish_files)

        y_fish = np.tile(fish, len(fish_files))
        y_train_crop.extend(y_fish)

    y_train_crop = np.array(y_train_crop)
    X_train_crop = np.ndarray((len(train_crop_files), ROWS, COLS, 3), dtype=np.uint8)

    for i, im in enumerate(train_crop_files): 
        X_train_crop[i] = read_image(TRAIN_CROP_DIR+im)
        if i%1000 == 0: print('Processed {} of {}'.format(i, len(train_crop_files)))

    # class_weight
    y_train_crop = le.transform(y_train_crop)
    class_weight = dict(collections.Counter(y_train_crop))
    ref = max(class_weight.values())
    for key,value in class_weight.items():
        class_weight[key] = ref/value
    # One Hot Encoding Labels
    y_train_crop = np_utils.to_categorical(y_train_crop)
    
    train_crop_files = [file.split('/')[-1] for file in train_crop_files]
    
    #save data to file
    data_train = {'X_train_crop': X_train_crop,'y_train_crop': y_train_crop,'train_crop_files': train_crop_files,'class_weight':class_weight}

    with open('../data/data_train_BBCrop_{}_{}.pickle'.format(ROWS, COLS), 'wb') as f:
        pickle.dump(data_train, f)

#rescale
# print('Loading data done.')
# X_train_crop = X_train_crop.astype(np.float32)
# print('Convert to float32 done.')
# X_train_crop /= 255.
# print('Rescale by 255 done.')
# #traing stg1 and stg2 and resume should have the same train test split!!! Remenber to set the random_state!
# X_train, X_valid, y_train, y_valid = train_test_split(X_train_crop, y_train_crop, test_size=0.2, random_state=1986, stratify=y_train_crop)
# del X_train_crop


Exist data_train_BBCrop_224_224.pickle. Loading data from file.

In [23]:
y_train_crop = []
train_crop_files = []

crop_classes=FISH_CLASSES[:]
crop_classes.remove('NoF')
count = {}

for fish in crop_classes:
    fish_dir = TRAIN_CROP_DIR+'{}'.format(fish)
    fish_files = [fish+'/'+im for im in os.listdir(fish_dir)]
    train_crop_files.extend(fish_files)

    y_fish = np.tile(fish, len(fish_files))
    y_train_crop.extend(y_fish)

y_train_crop = np.array(y_train_crop)
X_train_crop = np.ndarray((len(train_crop_files), ROWS, COLS, 3), dtype=np.uint8)

for i, im in enumerate(train_crop_files): 
    X_train_crop[i] = read_image(TRAIN_CROP_DIR+im)
    if i%1000 == 0: print('Processed {} of {}'.format(i, len(train_crop_files)))


Processed 0 of 4371
Processed 1000 of 4371
Processed 2000 of 4371
Processed 3000 of 4371
Processed 4000 of 4371

In [4]:
#data preprocessing

train_datagen = ImageDataGenerator(
    featurewise_center=True,
    #featurewise_std_normalization=True,
    #rescale=1./255,
    rotation_range=180,
    shear_range=0.2,
    zoom_range=0.1,
    width_shift_range=0.1,
    height_shift_range=0.1,
    horizontal_flip=True,
    vertical_flip=True)
train_datagen.fit(X_train)
train_generator = train_datagen.flow(X_train, y_train, batch_size=BatchSize, shuffle=True, seed=None)

valid_datagen = ImageDataGenerator(
    featurewise_center=True)
    #featurewise_std_normalization=True)
    #rescale=1./255
valid_datagen.fit(X_valid)   
valid_generator = valid_datagen.flow(X_valid, y_valid, batch_size=BatchSize, shuffle=True, seed=None)

In [5]:
#callbacks

early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1, mode='auto')        

model_checkpoint = ModelCheckpoint(filepath=CHECKPOINT_DIR+'weights.{epoch:03d}-{val_loss:.4f}.hdf5', monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, mode='auto')
        
learningrate_schedule = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=1, mode='auto', epsilon=0.001, cooldown=0, min_lr=0)

tensorboard = TensorBoard(log_dir='./logs/log3', histogram_freq=0, write_graph=True, write_images=True)

In [8]:
#Resnet50
#stg1 training

from keras.applications.resnet50 import ResNet50

base_model = ResNet50(weights='imagenet', include_top=False)
x = base_model.output
x = GlobalAveragePooling2D()(x)
#x = Flatten()(x)
#x = Dense(256, init='glorot_normal', activation='relu')(x)
#x = LeakyReLU(alpha=0.33)(x)
#x = Dropout(0.5)(x)
#x = Dense(256, init='glorot_normal', activation='relu')(x)
#x = LeakyReLU(alpha=0.33)(x)
#x = Dropout(0.5)(x)
predictions = Dense(len(FISH_CLASSES), init='glorot_normal', activation='softmax')(x)

# this is the model we will train
model = Model(input=base_model.input, output=predictions)

# first: train only the top layers (which were randomly initialized)
# i.e. freeze all convolutional VGG16 layers
for layer in base_model.layers:
    layer.trainable = False

# compile the model (should be done *after* setting layers to non-trainable)
optimizer = Adam(lr=LearningRate)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])

# train the model on the new data for a few epochs
model.fit_generator(train_generator, samples_per_epoch=len(X_train), nb_epoch=30, verbose=1, 
                    callbacks=[early_stopping, model_checkpoint, learningrate_schedule, tensorboard], 
                    validation_data=valid_generator, nb_val_samples=len(X_valid), class_weight=class_weight, nb_worker=3, pickle_safe=True)


Epoch 1/30
15488/15583 [============================>.] - ETA: 1s - loss: 14.6895 - acc: 0.1697
/opt/anaconda3/lib/python3.5/site-packages/keras/engine/training.py:1470: UserWarning: Epoch comprised more than `samples_per_epoch` samples, which might affect learning results. Set `samples_per_epoch` correctly to avoid this warning.
  warnings.warn('Epoch comprised more than '
Epoch 00000: val_loss improved from 1.90143 to 1.56554, saving model to ./checkpoints/checkpoint3/weights.000-1.5655.hdf5
15616/15583 [==============================] - 257s - loss: 14.6950 - acc: 0.1710 - val_loss: 1.5655 - val_acc: 0.7659
Epoch 2/30
15488/15583 [============================>.] - ETA: 1s - loss: 10.8415 - acc: 0.4660Epoch 00001: val_loss improved from 1.56554 to 1.12747, saving model to ./checkpoints/checkpoint3/weights.001-1.1275.hdf5
15616/15583 [==============================] - 245s - loss: 10.8314 - acc: 0.4668 - val_loss: 1.1275 - val_acc: 0.7699
Epoch 3/30
15517/15583 [============================>.] - ETA: 0s - loss: 9.4755 - acc: 0.5963Epoch 00002: val_loss improved from 1.12747 to 1.02715, saving model to ./checkpoints/checkpoint3/weights.002-1.0272.hdf5
15645/15583 [==============================] - 248s - loss: 9.4400 - acc: 0.5967 - val_loss: 1.0272 - val_acc: 0.7770
Epoch 4/30
15488/15583 [============================>.] - ETA: 1s - loss: 8.3600 - acc: 0.6429Epoch 00003: val_loss improved from 1.02715 to 1.02715, saving model to ./checkpoints/checkpoint3/weights.003-1.0272.hdf5
15616/15583 [==============================] - 245s - loss: 8.3463 - acc: 0.6434 - val_loss: 1.0272 - val_acc: 0.7797
Epoch 5/30
15488/15583 [============================>.] - ETA: 1s - loss: 7.7243 - acc: 0.6858Epoch 00004: val_loss did not improve
15616/15583 [==============================] - 245s - loss: 7.7598 - acc: 0.6856 - val_loss: 1.0852 - val_acc: 0.7671
Epoch 6/30
15517/15583 [============================>.] - ETA: 0s - loss: 6.8658 - acc: 0.7112Epoch 00005: val_loss improved from 1.02715 to 0.96912, saving model to ./checkpoints/checkpoint3/weights.005-0.9691.hdf5
15645/15583 [==============================] - 246s - loss: 6.8484 - acc: 0.7113 - val_loss: 0.9691 - val_acc: 0.7760
Epoch 7/30
15488/15583 [============================>.] - ETA: 1s - loss: 6.7349 - acc: 0.7238Epoch 00006: val_loss improved from 0.96912 to 0.83897, saving model to ./checkpoints/checkpoint3/weights.006-0.8390.hdf5
15616/15583 [==============================] - 245s - loss: 6.7184 - acc: 0.7238 - val_loss: 0.8390 - val_acc: 0.7681
Epoch 8/30
15488/15583 [============================>.] - ETA: 1s - loss: 6.4789 - acc: 0.7249Epoch 00007: val_loss improved from 0.83897 to 0.62102, saving model to ./checkpoints/checkpoint3/weights.007-0.6210.hdf5
15616/15583 [==============================] - 244s - loss: 6.4633 - acc: 0.7253 - val_loss: 0.6210 - val_acc: 0.8052
Epoch 9/30
15517/15583 [============================>.] - ETA: 0s - loss: 5.9462 - acc: 0.7521Epoch 00008: val_loss improved from 0.62102 to 0.60852, saving model to ./checkpoints/checkpoint3/weights.008-0.6085.hdf5
15645/15583 [==============================] - 246s - loss: 5.9431 - acc: 0.7525 - val_loss: 0.6085 - val_acc: 0.8044
Epoch 10/30
15488/15583 [============================>.] - ETA: 1s - loss: 5.8200 - acc: 0.7527Epoch 00009: val_loss improved from 0.60852 to 0.59970, saving model to ./checkpoints/checkpoint3/weights.009-0.5997.hdf5
15616/15583 [==============================] - 246s - loss: 5.8103 - acc: 0.7529 - val_loss: 0.5997 - val_acc: 0.8007
Epoch 11/30
15488/15583 [============================>.] - ETA: 1s - loss: 5.7120 - acc: 0.7601Epoch 00010: val_loss did not improve
15616/15583 [==============================] - 243s - loss: 5.6837 - acc: 0.7602 - val_loss: 0.6534 - val_acc: 0.7853
Epoch 12/30
15517/15583 [============================>.] - ETA: 0s - loss: 5.3849 - acc: 0.7610Epoch 00011: val_loss did not improve
15645/15583 [==============================] - 246s - loss: 5.3756 - acc: 0.7611 - val_loss: 0.6427 - val_acc: 0.7936
Epoch 13/30
15488/15583 [============================>.] - ETA: 1s - loss: 5.2207 - acc: 0.7779Epoch 00012: val_loss did not improve
15616/15583 [==============================] - 245s - loss: 5.2235 - acc: 0.7780 - val_loss: 0.6124 - val_acc: 0.7891
Epoch 14/30
15488/15583 [============================>.] - ETA: 1s - loss: 5.1792 - acc: 0.7718Epoch 00013: val_loss did not improve
15616/15583 [==============================] - 245s - loss: 5.1773 - acc: 0.7717 - val_loss: 0.6322 - val_acc: 0.7954
Epoch 15/30
15517/15583 [============================>.] - ETA: 0s - loss: 5.0452 - acc: 0.7822Epoch 00014: val_loss improved from 0.59970 to 0.56778, saving model to ./checkpoints/checkpoint3/weights.014-0.5678.hdf5
15645/15583 [==============================] - 247s - loss: 5.0400 - acc: 0.7818 - val_loss: 0.5678 - val_acc: 0.8133
Epoch 16/30
15488/15583 [============================>.] - ETA: 1s - loss: 4.8350 - acc: 0.7805Epoch 00015: val_loss improved from 0.56778 to 0.55652, saving model to ./checkpoints/checkpoint3/weights.015-0.5565.hdf5
15616/15583 [==============================] - 246s - loss: 4.8639 - acc: 0.7807 - val_loss: 0.5565 - val_acc: 0.8198
Epoch 17/30
15488/15583 [============================>.] - ETA: 1s - loss: 4.7190 - acc: 0.7943Epoch 00016: val_loss did not improve
15616/15583 [==============================] - 245s - loss: 4.7139 - acc: 0.7943 - val_loss: 0.5801 - val_acc: 0.8072
Epoch 18/30
15517/15583 [============================>.] - ETA: 0s - loss: 4.5881 - acc: 0.7945Epoch 00017: val_loss improved from 0.55652 to 0.54402, saving model to ./checkpoints/checkpoint3/weights.017-0.5440.hdf5
15645/15583 [==============================] - 247s - loss: 4.5859 - acc: 0.7944 - val_loss: 0.5440 - val_acc: 0.8175
Epoch 19/30
15488/15583 [============================>.] - ETA: 1s - loss: 4.5533 - acc: 0.7935Epoch 00018: val_loss did not improve
15616/15583 [==============================] - 246s - loss: 4.5536 - acc: 0.7937 - val_loss: 0.5737 - val_acc: 0.8183
Epoch 20/30
15488/15583 [============================>.] - ETA: 1s - loss: 4.4750 - acc: 0.8000Epoch 00019: val_loss improved from 0.54402 to 0.54086, saving model to ./checkpoints/checkpoint3/weights.019-0.5409.hdf5
15616/15583 [==============================] - 246s - loss: 4.4780 - acc: 0.8005 - val_loss: 0.5409 - val_acc: 0.8178
Epoch 21/30
15517/15583 [============================>.] - ETA: 0s - loss: 4.5401 - acc: 0.7965Epoch 00020: val_loss improved from 0.54086 to 0.52720, saving model to ./checkpoints/checkpoint3/weights.020-0.5272.hdf5
15645/15583 [==============================] - 246s - loss: 4.5318 - acc: 0.7969 - val_loss: 0.5272 - val_acc: 0.8259
Epoch 22/30
15488/15583 [============================>.] - ETA: 1s - loss: 4.2241 - acc: 0.8131Epoch 00021: val_loss improved from 0.52720 to 0.50949, saving model to ./checkpoints/checkpoint3/weights.021-0.5095.hdf5
15616/15583 [==============================] - 245s - loss: 4.2287 - acc: 0.8133 - val_loss: 0.5095 - val_acc: 0.8337
Epoch 23/30
15488/15583 [============================>.] - ETA: 1s - loss: 4.1723 - acc: 0.8099Epoch 00022: val_loss improved from 0.50949 to 0.47378, saving model to ./checkpoints/checkpoint3/weights.022-0.4738.hdf5
15616/15583 [==============================] - 246s - loss: 4.1561 - acc: 0.8099 - val_loss: 0.4738 - val_acc: 0.8458
Epoch 24/30
15517/15583 [============================>.] - ETA: 0s - loss: 4.3341 - acc: 0.8044Epoch 00023: val_loss did not improve
15645/15583 [==============================] - 245s - loss: 4.3249 - acc: 0.8043 - val_loss: 0.4954 - val_acc: 0.8276
Epoch 25/30
15488/15583 [============================>.] - ETA: 1s - loss: 3.9649 - acc: 0.8113Epoch 00024: val_loss did not improve
15616/15583 [==============================] - 246s - loss: 3.9655 - acc: 0.8113 - val_loss: 0.5058 - val_acc: 0.8367
Epoch 26/30
15488/15583 [============================>.] - ETA: 1s - loss: 4.1032 - acc: 0.8213Epoch 00025: val_loss did not improve
15616/15583 [==============================] - 248s - loss: 4.0917 - acc: 0.8208 - val_loss: 0.5043 - val_acc: 0.8256
Epoch 27/30
15517/15583 [============================>.] - ETA: 0s - loss: 4.1270 - acc: 0.8138Epoch 00026: val_loss improved from 0.47378 to 0.43112, saving model to ./checkpoints/checkpoint3/weights.026-0.4311.hdf5
15645/15583 [==============================] - 247s - loss: 4.1130 - acc: 0.8138 - val_loss: 0.4311 - val_acc: 0.8576
Epoch 28/30
15488/15583 [============================>.] - ETA: 1s - loss: 4.1460 - acc: 0.8125Epoch 00027: val_loss did not improve
15616/15583 [==============================] - 245s - loss: 4.1370 - acc: 0.8126 - val_loss: 0.5074 - val_acc: 0.8339
Epoch 29/30
15488/15583 [============================>.] - ETA: 1s - loss: 3.8039 - acc: 0.8227Epoch 00028: val_loss improved from 0.43112 to 0.43082, saving model to ./checkpoints/checkpoint3/weights.028-0.4308.hdf5
15616/15583 [==============================] - 247s - loss: 3.8108 - acc: 0.8227 - val_loss: 0.4308 - val_acc: 0.8642
Epoch 30/30
15517/15583 [============================>.] - ETA: 0s - loss: 3.9188 - acc: 0.8156Epoch 00029: val_loss did not improve
15645/15583 [==============================] - 246s - loss: 3.9311 - acc: 0.8153 - val_loss: 0.4843 - val_acc: 0.8392
Out[8]:
<keras.callbacks.History at 0x7ffa6023f4a8>

In [ ]:
#Resnet50
#stg2 training

files = glob.glob(CHECKPOINT_DIR+'*')
val_losses = [float(f.split('-')[-1][:-5]) for f in files]
index = val_losses.index(min(val_losses))
print('Loading model from checkpoints file ' + files[index])
model = load_model(files[index])
# print('Loading model from weights.004-0.0565.hdf5')
# model = load_model('./checkpoints/checkpoint3/weights.004-0.0565.hdf5')

from keras.applications.resnet50 import ResNet50

base_model = ResNet50(weights='imagenet', include_top=False)
# at this point, the top layers are well trained and we can start fine-tuning
# convolutional layers from inception V3. We will freeze the bottom N layers
# and train the remaining top layers.

# let's visualize layer names and layer indices to see how many layers
# we should freeze:
# for i, layer in enumerate(base_model.layers):
#    print(i, layer.name)

# we chose to train the top 2 inception blocks, i.e. we will freeze
# the first 172 layers and unfreeze the rest:
#164
for layer in modael.layers[:142]:
   layer.trainable = False
for layer in model.layers[142:]:
   layer.trainable = True

# we need to recompile the model for these modifications to take effect
# we use SGD with a low learning rate
optimizer = Adam(lr=LearningRate)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])

model.fit_generator(train_generator, samples_per_epoch=len(X_train), nb_epoch=300, verbose=1, 
                    callbacks=[early_stopping, model_checkpoint, learningrate_schedule, tensorboard], 
                    validation_data=valid_generator, nb_val_samples=len(X_valid), class_weight=class_weight, nb_worker=3, pickle_safe=True)


Loading model from checkpoints file ./checkpoints/checkpoint3/weights.028-0.4308.hdf5
Epoch 1/300
15488/15583 [============================>.] - ETA: 1s - loss: 3.5299 - acc: 0.8370
/opt/anaconda3/lib/python3.5/site-packages/keras/engine/training.py:1470: UserWarning: Epoch comprised more than `samples_per_epoch` samples, which might affect learning results. Set `samples_per_epoch` correctly to avoid this warning.
  warnings.warn('Epoch comprised more than '
Epoch 00000: val_loss improved from 0.43082 to 0.27371, saving model to ./checkpoints/checkpoint3/weights.000-0.2737.hdf5
15616/15583 [==============================] - 302s - loss: 3.5209 - acc: 0.8370 - val_loss: 0.2737 - val_acc: 0.9171
Epoch 2/300
15488/15583 [============================>.] - ETA: 1s - loss: 2.5931 - acc: 0.8776Epoch 00001: val_loss improved from 0.27371 to 0.23653, saving model to ./checkpoints/checkpoint3/weights.001-0.2365.hdf5
15616/15583 [==============================] - 281s - loss: 2.5844 - acc: 0.8781 - val_loss: 0.2365 - val_acc: 0.9234
Epoch 3/300
15517/15583 [============================>.] - ETA: 0s - loss: 1.7473 - acc: 0.9093Epoch 00002: val_loss improved from 0.23653 to 0.23120, saving model to ./checkpoints/checkpoint3/weights.002-0.2312.hdf5
15645/15583 [==============================] - 286s - loss: 1.7389 - acc: 0.9096 - val_loss: 0.2312 - val_acc: 0.9214
Epoch 4/300
15488/15583 [============================>.] - ETA: 1s - loss: 1.3306 - acc: 0.9235Epoch 00003: val_loss improved from 0.23120 to 0.18153, saving model to ./checkpoints/checkpoint3/weights.003-0.1815.hdf5
15616/15583 [==============================] - 282s - loss: 1.3240 - acc: 0.9235 - val_loss: 0.1815 - val_acc: 0.9405
Epoch 5/300
15488/15583 [============================>.] - ETA: 1s - loss: 1.2263 - acc: 0.9268Epoch 00004: val_loss did not improve
15616/15583 [==============================] - 281s - loss: 1.2343 - acc: 0.9269 - val_loss: 0.2646 - val_acc: 0.9224
Epoch 6/300
15517/15583 [============================>.] - ETA: 0s - loss: 1.0257 - acc: 0.9403Epoch 00005: val_loss did not improve
15645/15583 [==============================] - 284s - loss: 1.0266 - acc: 0.9402 - val_loss: 0.2668 - val_acc: 0.9221
Epoch 7/300
15488/15583 [============================>.] - ETA: 1s - loss: 0.8768 - acc: 0.9465Epoch 00006: val_loss improved from 0.18153 to 0.13691, saving model to ./checkpoints/checkpoint3/weights.006-0.1369.hdf5
15616/15583 [==============================] - 283s - loss: 0.8825 - acc: 0.9466 - val_loss: 0.1369 - val_acc: 0.9559
Epoch 8/300
15488/15583 [============================>.] - ETA: 1s - loss: 0.8070 - acc: 0.9545Epoch 00007: val_loss did not improve
15616/15583 [==============================] - 281s - loss: 0.8078 - acc: 0.9543 - val_loss: 0.1473 - val_acc: 0.9531
Epoch 9/300
15517/15583 [============================>.] - ETA: 0s - loss: 0.6419 - acc: 0.9555Epoch 00008: val_loss improved from 0.13691 to 0.12719, saving model to ./checkpoints/checkpoint3/weights.008-0.1272.hdf5
15645/15583 [==============================] - 284s - loss: 0.6408 - acc: 0.9557 - val_loss: 0.1272 - val_acc: 0.9594
Epoch 10/300
15488/15583 [============================>.] - ETA: 1s - loss: 0.4418 - acc: 0.9647Epoch 00009: val_loss did not improve
15616/15583 [==============================] - 282s - loss: 0.4407 - acc: 0.9648 - val_loss: 0.1331 - val_acc: 0.9567
Epoch 11/300
15488/15583 [============================>.] - ETA: 1s - loss: 0.5102 - acc: 0.9635Epoch 00010: val_loss improved from 0.12719 to 0.10877, saving model to ./checkpoints/checkpoint3/weights.010-0.1088.hdf5
15616/15583 [==============================] - 283s - loss: 0.5075 - acc: 0.9634 - val_loss: 0.1088 - val_acc: 0.9612
Epoch 12/300
15517/15583 [============================>.] - ETA: 0s - loss: 0.4719 - acc: 0.9667Epoch 00011: val_loss did not improve
15645/15583 [==============================] - 283s - loss: 0.4702 - acc: 0.9665 - val_loss: 0.1298 - val_acc: 0.9619
Epoch 13/300
 4864/15583 [========>.....................] - ETA: 160s - loss: 0.4867 - acc: 0.9688

In [8]:
#Resnet50
#stg3 training

files = glob.glob(CHECKPOINT_DIR+'*')
val_losses = [float(f.split('-')[-1][:-5]) for f in files]
index = val_losses.index(min(val_losses))
print('Loading model from checkpoints file ' + files[index])
model = load_model(files[index])
# print('Loading model from weights.004-0.0565.hdf5')
# model = load_model('./checkpoints/checkpoint3/weights.004-0.0565.hdf5')

# from keras.applications.resnet50 import ResNet50

# base_model = ResNet50(weights='imagenet', include_top=False)
# at this point, the top layers are well trained and we can start fine-tuning
# convolutional layers from inception V3. We will freeze the bottom N layers
# and train the remaining top layers.

# let's visualize layer names and layer indices to see how many layers
# we should freeze:
# for i, layer in enumerate(base_model.layers):
#    print(i, layer.name)

# we chose to train the top 2 inception blocks, i.e. we will freeze
# the first 172 layers and unfreeze the rest:
#164,142,80
for layer in model.layers[:80]:
   layer.trainable = False
for layer in model.layers[80:]:
   layer.trainable = True

# we need to recompile the model for these modifications to take effect
# we use SGD with a low learning rate
optimizer = Adam(lr=1e-5)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])

model.fit_generator(train_generator, samples_per_epoch=len(X_train), nb_epoch=300, verbose=1, 
                    callbacks=[early_stopping, model_checkpoint, learningrate_schedule, tensorboard], 
                    validation_data=valid_generator, nb_val_samples=len(X_valid), class_weight=class_weight, nb_worker=3, pickle_safe=True)


Loading model from checkpoints file ./checkpoints/checkpoint3/weights.029-0.0654.hdf5
Epoch 1/300
15488/15583 [============================>.] - ETA: 1s - loss: 0.5935 - acc: 0.9737
/opt/anaconda3/lib/python3.5/site-packages/keras/engine/training.py:1470: UserWarning: Epoch comprised more than `samples_per_epoch` samples, which might affect learning results. Set `samples_per_epoch` correctly to avoid this warning.
  warnings.warn('Epoch comprised more than '
Epoch 00000: val_loss improved from inf to 0.06351, saving model to ./checkpoints/checkpoint3/weights.000-0.0635.hdf5
15616/15583 [==============================] - 374s - loss: 0.5911 - acc: 0.9738 - val_loss: 0.0635 - val_acc: 0.9806
Epoch 2/300
15488/15583 [============================>.] - ETA: 1s - loss: 0.4470 - acc: 0.9768Epoch 00001: val_loss improved from 0.06351 to 0.06211, saving model to ./checkpoints/checkpoint3/weights.001-0.0621.hdf5
15616/15583 [==============================] - 348s - loss: 0.4447 - acc: 0.9767 - val_loss: 0.0621 - val_acc: 0.9831
Epoch 3/300
15517/15583 [============================>.] - ETA: 1s - loss: 0.3207 - acc: 0.9771Epoch 00002: val_loss improved from 0.06211 to 0.05610, saving model to ./checkpoints/checkpoint3/weights.002-0.0561.hdf5
15645/15583 [==============================] - 354s - loss: 0.3206 - acc: 0.9771 - val_loss: 0.0561 - val_acc: 0.9879
Epoch 4/300
15488/15583 [============================>.] - ETA: 1s - loss: 0.3141 - acc: 0.9784Epoch 00003: val_loss did not improve
15616/15583 [==============================] - 349s - loss: 0.3119 - acc: 0.9785 - val_loss: 0.0654 - val_acc: 0.9811
Epoch 5/300
15488/15583 [============================>.] - ETA: 1s - loss: 0.2683 - acc: 0.9789Epoch 00004: val_loss improved from 0.05610 to 0.05582, saving model to ./checkpoints/checkpoint3/weights.004-0.0558.hdf5
15616/15583 [==============================] - 349s - loss: 0.2688 - acc: 0.9789 - val_loss: 0.0558 - val_acc: 0.9859
Epoch 6/300
15517/15583 [============================>.] - ETA: 1s - loss: 0.2055 - acc: 0.9809Epoch 00005: val_loss improved from 0.05582 to 0.05561, saving model to ./checkpoints/checkpoint3/weights.005-0.0556.hdf5
15645/15583 [==============================] - 350s - loss: 0.2065 - acc: 0.9810 - val_loss: 0.0556 - val_acc: 0.9846
Epoch 7/300
15488/15583 [============================>.] - ETA: 1s - loss: 0.2275 - acc: 0.9828Epoch 00006: val_loss improved from 0.05561 to 0.05139, saving model to ./checkpoints/checkpoint3/weights.006-0.0514.hdf5
15616/15583 [==============================] - 349s - loss: 0.2274 - acc: 0.9827 - val_loss: 0.0514 - val_acc: 0.9871
Epoch 8/300
15488/15583 [============================>.] - ETA: 1s - loss: 0.1798 - acc: 0.9837Epoch 00007: val_loss improved from 0.05139 to 0.04840, saving model to ./checkpoints/checkpoint3/weights.007-0.0484.hdf5
15616/15583 [==============================] - 349s - loss: 0.1817 - acc: 0.9839 - val_loss: 0.0484 - val_acc: 0.9879
Epoch 9/300
15517/15583 [============================>.] - ETA: 1s - loss: 0.1558 - acc: 0.9838Epoch 00008: val_loss did not improve
15645/15583 [==============================] - 349s - loss: 0.1559 - acc: 0.9838 - val_loss: 0.0624 - val_acc: 0.9836
Epoch 10/300
15488/15583 [============================>.] - ETA: 1s - loss: 0.1534 - acc: 0.9853Epoch 00009: val_loss improved from 0.04840 to 0.04759, saving model to ./checkpoints/checkpoint3/weights.009-0.0476.hdf5
15616/15583 [==============================] - 349s - loss: 0.1542 - acc: 0.9853 - val_loss: 0.0476 - val_acc: 0.9861
Epoch 11/300
15488/15583 [============================>.] - ETA: 1s - loss: 0.1601 - acc: 0.9853Epoch 00010: val_loss did not improve
15616/15583 [==============================] - 349s - loss: 0.1592 - acc: 0.9853 - val_loss: 0.0547 - val_acc: 0.9849
Epoch 12/300
15517/15583 [============================>.] - ETA: 1s - loss: 0.1289 - acc: 0.9879Epoch 00011: val_loss did not improve
15645/15583 [==============================] - 349s - loss: 0.1281 - acc: 0.9880 - val_loss: 0.0479 - val_acc: 0.9846
Epoch 13/300
15488/15583 [============================>.] - ETA: 1s - loss: 0.1186 - acc: 0.9879Epoch 00012: val_loss did not improve
15616/15583 [==============================] - 348s - loss: 0.1181 - acc: 0.9880 - val_loss: 0.0510 - val_acc: 0.9856
Epoch 14/300
15488/15583 [============================>.] - ETA: 1s - loss: 0.1596 - acc: 0.9892Epoch 00013: val_loss improved from 0.04759 to 0.04548, saving model to ./checkpoints/checkpoint3/weights.013-0.0455.hdf5
15616/15583 [==============================] - 350s - loss: 0.1587 - acc: 0.9890 - val_loss: 0.0455 - val_acc: 0.9894
Epoch 15/300
15517/15583 [============================>.] - ETA: 1s - loss: 0.1049 - acc: 0.9885Epoch 00014: val_loss improved from 0.04548 to 0.03690, saving model to ./checkpoints/checkpoint3/weights.014-0.0369.hdf5
15645/15583 [==============================] - 349s - loss: 0.1044 - acc: 0.9886 - val_loss: 0.0369 - val_acc: 0.9887
Epoch 16/300
15488/15583 [============================>.] - ETA: 1s - loss: 0.1038 - acc: 0.9900Epoch 00015: val_loss did not improve
15616/15583 [==============================] - 347s - loss: 0.1035 - acc: 0.9901 - val_loss: 0.0488 - val_acc: 0.9877
Epoch 17/300
15488/15583 [============================>.] - ETA: 1s - loss: 0.0843 - acc: 0.9909Epoch 00016: val_loss did not improve
15616/15583 [==============================] - 348s - loss: 0.0840 - acc: 0.9908 - val_loss: 0.0450 - val_acc: 0.9871
Epoch 18/300
15517/15583 [============================>.] - ETA: 1s - loss: 0.0961 - acc: 0.9894Epoch 00017: val_loss did not improve
15645/15583 [==============================] - 349s - loss: 0.0957 - acc: 0.9893 - val_loss: 0.0427 - val_acc: 0.9874
Epoch 19/300
15488/15583 [============================>.] - ETA: 1s - loss: 0.0931 - acc: 0.9906Epoch 00018: val_loss did not improve
15616/15583 [==============================] - 349s - loss: 0.0932 - acc: 0.9906 - val_loss: 0.0416 - val_acc: 0.9882
Epoch 20/300
15488/15583 [============================>.] - ETA: 1s - loss: 0.0825 - acc: 0.9917Epoch 00019: val_loss did not improve
15616/15583 [==============================] - 348s - loss: 0.0822 - acc: 0.9917 - val_loss: 0.0481 - val_acc: 0.9851
Epoch 21/300
15517/15583 [============================>.] - ETA: 1s - loss: 0.1024 - acc: 0.9895Epoch 00020: val_loss did not improve

Epoch 00020: reducing learning rate to 9.999999747378752e-07.
15645/15583 [==============================] - 352s - loss: 0.1018 - acc: 0.9896 - val_loss: 0.0440 - val_acc: 0.9874
Epoch 22/300
15488/15583 [============================>.] - ETA: 1s - loss: 0.0866 - acc: 0.9910Epoch 00021: val_loss did not improve
15616/15583 [==============================] - 348s - loss: 0.0861 - acc: 0.9909 - val_loss: 0.0465 - val_acc: 0.9874
Epoch 23/300
15488/15583 [============================>.] - ETA: 1s - loss: 0.0775 - acc: 0.9916Epoch 00022: val_loss did not improve
15616/15583 [==============================] - 350s - loss: 0.0778 - acc: 0.9915 - val_loss: 0.0422 - val_acc: 0.9882
Epoch 24/300
15517/15583 [============================>.] - ETA: 1s - loss: 0.0736 - acc: 0.9921Epoch 00023: val_loss did not improve
15645/15583 [==============================] - 350s - loss: 0.0733 - acc: 0.9921 - val_loss: 0.0450 - val_acc: 0.9877
Epoch 25/300
15488/15583 [============================>.] - ETA: 1s - loss: 0.0732 - acc: 0.9916Epoch 00024: val_loss did not improve
15616/15583 [==============================] - 349s - loss: 0.0732 - acc: 0.9915 - val_loss: 0.0381 - val_acc: 0.9892
Epoch 26/300
15488/15583 [============================>.] - ETA: 1s - loss: 0.0700 - acc: 0.9917Epoch 00025: val_loss did not improve

Epoch 00025: reducing learning rate to 9.999999974752428e-08.
15616/15583 [==============================] - 349s - loss: 0.0698 - acc: 0.9917 - val_loss: 0.0421 - val_acc: 0.9894
Epoch 00025: early stopping
Out[8]:
<keras.callbacks.History at 0x7fce46ca16d8>

In [7]:
#Resnet50
#stg4 training

files = glob.glob(CHECKPOINT_DIR+'*')
val_losses = [float(f.split('-')[-1][:-5]) for f in files]
index = val_losses.index(min(val_losses))
print('Loading model from checkpoints file ' + files[index])
model = load_model(files[index])
# print('Loading model from weights.004-0.0565.hdf5')
# model = load_model('./checkpoints/checkpoint3/weights.004-0.0565.hdf5')

# from keras.applications.resnet50 import ResNet50

# base_model = ResNet50(weights='imagenet', include_top=False)
# at this point, the top layers are well trained and we can start fine-tuning
# convolutional layers from inception V3. We will freeze the bottom N layers
# and train the remaining top layers.

# let's visualize layer names and layer indices to see how many layers
# we should freeze:
# for i, layer in enumerate(base_model.layers):
#     print(i, layer.name)

# we chose to train the top 2 inception blocks, i.e. we will freeze
# the first 172 layers and unfreeze the rest:
#164,142,80,38
# for layer in model.layers[:38]:
#    layer.trainable = False
# for layer in model.layers[38:]:
#    layer.trainable = True

# we need to recompile the model for these modifications to take effect
# we use SGD with a low learning rate
optimizer = Adam(lr=1e-5)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])

model.fit_generator(train_generator, samples_per_epoch=len(X_train), nb_epoch=300, verbose=1, 
                    callbacks=[early_stopping, model_checkpoint, learningrate_schedule, tensorboard], 
                    validation_data=valid_generator, nb_val_samples=len(X_valid), class_weight=class_weight, nb_worker=3, pickle_safe=True)


Loading model from checkpoints file ./checkpoints/checkpoint3/weights.014-0.0369.hdf5
Epoch 1/300
15488/15583 [============================>.] - ETA: 1s - loss: 0.1055 - acc: 0.9901
/opt/anaconda3/lib/python3.5/site-packages/keras/engine/training.py:1470: UserWarning: Epoch comprised more than `samples_per_epoch` samples, which might affect learning results. Set `samples_per_epoch` correctly to avoid this warning.
  warnings.warn('Epoch comprised more than '
Epoch 00000: val_loss improved from inf to 0.03269, saving model to ./checkpoints/checkpoint3/weights.000-0.0327.hdf5
15616/15583 [==============================] - 369s - loss: 0.1053 - acc: 0.9899 - val_loss: 0.0327 - val_acc: 0.9917
Epoch 2/300
15488/15583 [============================>.] - ETA: 1s - loss: 0.0950 - acc: 0.9912Epoch 00001: val_loss did not improve
15616/15583 [==============================] - 342s - loss: 0.0952 - acc: 0.9911 - val_loss: 0.0376 - val_acc: 0.9894
Epoch 3/300
15517/15583 [============================>.] - ETA: 1s - loss: 0.0856 - acc: 0.9906Epoch 00002: val_loss did not improve
15645/15583 [==============================] - 349s - loss: 0.0869 - acc: 0.9905 - val_loss: 0.0416 - val_acc: 0.9904
Epoch 4/300
15488/15583 [============================>.] - ETA: 1s - loss: 0.0964 - acc: 0.9912Epoch 00003: val_loss did not improve
15616/15583 [==============================] - 343s - loss: 0.0963 - acc: 0.9912 - val_loss: 0.0366 - val_acc: 0.9902
Epoch 5/300
15488/15583 [============================>.] - ETA: 1s - loss: 0.0763 - acc: 0.9916Epoch 00004: val_loss did not improve
15616/15583 [==============================] - 345s - loss: 0.0757 - acc: 0.9917 - val_loss: 0.0376 - val_acc: 0.9879
Epoch 6/300
15517/15583 [============================>.] - ETA: 1s - loss: 0.0836 - acc: 0.9912Epoch 00005: val_loss did not improve
15645/15583 [==============================] - 343s - loss: 0.0838 - acc: 0.9913 - val_loss: 0.0406 - val_acc: 0.9861
Epoch 7/300
15488/15583 [============================>.] - ETA: 1s - loss: 0.0689 - acc: 0.9921Epoch 00006: val_loss did not improve

Epoch 00006: reducing learning rate to 9.999999747378752e-07.
15616/15583 [==============================] - 345s - loss: 0.0694 - acc: 0.9921 - val_loss: 0.0461 - val_acc: 0.9861
Epoch 8/300
15488/15583 [============================>.] - ETA: 1s - loss: 0.0721 - acc: 0.9906Epoch 00007: val_loss did not improve
15616/15583 [==============================] - 343s - loss: 0.0718 - acc: 0.9907 - val_loss: 0.0434 - val_acc: 0.9861
Epoch 9/300
15517/15583 [============================>.] - ETA: 1s - loss: 0.0650 - acc: 0.9916Epoch 00008: val_loss did not improve
15645/15583 [==============================] - 345s - loss: 0.0647 - acc: 0.9915 - val_loss: 0.0425 - val_acc: 0.9871
Epoch 10/300
15488/15583 [============================>.] - ETA: 1s - loss: 0.0714 - acc: 0.9918Epoch 00009: val_loss did not improve
15616/15583 [==============================] - 344s - loss: 0.0710 - acc: 0.9918 - val_loss: 0.0425 - val_acc: 0.9884
Epoch 11/300
15488/15583 [============================>.] - ETA: 1s - loss: 0.0661 - acc: 0.9913Epoch 00010: val_loss did not improve
15616/15583 [==============================] - 344s - loss: 0.0661 - acc: 0.9913 - val_loss: 0.0343 - val_acc: 0.9917
Epoch 12/300
15517/15583 [============================>.] - ETA: 1s - loss: 0.0688 - acc: 0.9927Epoch 00011: val_loss did not improve

Epoch 00011: reducing learning rate to 9.999999974752428e-08.
15645/15583 [==============================] - 343s - loss: 0.0693 - acc: 0.9926 - val_loss: 0.0413 - val_acc: 0.9887
Epoch 00011: early stopping
Out[7]:
<keras.callbacks.History at 0x7ff185185940>

In [ ]:
#resume training

files = glob.glob(CHECKPOINT_DIR+'*')
val_losses = [float(f.split('-')[-1][:-5]) for f in files]
index = val_losses.index(min(val_losses))
print('Loading model from checkpoints file ' + files[index])
model = load_model(files[index])
# print('Loading model from weights.004-0.0565.hdf5')
# model = load_model('./checkpoints/checkpoint3/weights.004-0.0565.hdf5')

model.fit_generator(train_generator, samples_per_epoch=len(X_train), nb_epoch=30, verbose=1, 
                    callbacks=[early_stopping, model_checkpoint, learningrate_schedule, tensorboard], 
                    validation_data=valid_generator, nb_val_samples=len(X_valid), class_weight=class_weight, nb_worker=3, pickle_safe=True)

In [18]:
#test-time augmentation
# print ('Exist data_train_BBCrop_{}_{}.pickle. Loading data from file.'.format(ROWS, COLS))
# with open('../data/data_train_BBCrop_{}_{}.pickle'.format(ROWS, COLS), 'rb') as f:
#     data_train = pickle.load(f)
# X_train_crop = data_train['X_train_crop']
# y_train_crop = data_train['y_train_crop']
# GT_crop_files = data_train['train_crop_files']
# class_weight = data_train['class_weight']

# print('Loading data done.')
# X_train_crop = X_train_crop.astype(np.float32)
# print('Convert to float32 done.')
# X_train_crop /= 255.
# print('Rescale by 255 done.')

# #featurewise_center
# mean = np.mean(X_train_crop, axis=0, keepdims=True)
# mean = np.mean(mean, axis=(1,2), keepdims=True)
# X_train_crop_centered = X_train_crop - mean

# #pad to can be divided by BatchSize
# X_pad_array = np.zeros((math.ceil(len(X_train_crop)/BatchSize)*BatchSize-len(X_train_crop),ROWS,COLS,3), dtype=np.float32)
# X_train_crop_centered = np.concatenate((X_train_crop_centered, X_pad_array), axis=0)
# y_pad_array = np.zeros((math.ceil(len(y_train_crop)/BatchSize)*BatchSize-len(y_train_crop),len(FISH_CLASSES)), dtype=np.float64)
# y_train_crop = np.concatenate((y_train_crop, y_pad_array), axis=0)

# files = glob.glob(CHECKPOINT_DIR+'*')
# val_losses = [float(f.split('-')[-1][:-5]) for f in files]
# index = val_losses.index(min(val_losses))
# print('Loading model from checkpoints file ' + files[index])
# model = load_model(files[index])
# # print('Loading model from weights.004-0.0565.hdf5')
# # model = load_model('./checkpoints/checkpoint2/weights.004-0.0565.hdf5')

test_aug_datagen = ImageDataGenerator(
#     featurewise_center=True,
    rotation_range=0,
    shear_range=0,
    zoom_range=0,
    width_shift_range=0,
    height_shift_range=0,
    horizontal_flip=False,
    vertical_flip=False)
# test_aug_datagen.fit(X_train_crop)

nbr_augmentation = 1
random_seed = [1986,8,22,7,13]
for idx in range(nbr_augmentation):
    print('{}th augmentation for testing ...'.format(idx))
    #shuffle=False!!!
    test_aug_generator = test_aug_datagen.flow(X_train_crop_centered, y_train_crop, batch_size=BatchSize, shuffle=False, seed=random_seed[idx])
    print('Begin to predict for testing data ...')
    if idx == 0:
        GT_crop_preds = model.predict_generator(test_aug_generator, val_samples=len(X_train_crop_centered), nb_worker=3, pickle_safe=True)
    else:
        GT_crop_preds += model.predict_generator(test_aug_generator, val_samples=len(X_train_crop_centered), nb_worker=3, pickle_safe=True)

GT_crop_preds /= nbr_augmentation


0th augmentation for testing ...
Begin to predict for testing data ...

In [19]:
GT_crop_preds[19479:,:]


Out[19]:
array([[  1.01243975e-04,   7.18769968e-07,   2.41807174e-06,
          1.99580228e-08,   9.99779522e-01,   1.64103021e-05,
          7.38164317e-06,   9.23306288e-05],
       [  5.67315910e-05,   5.74144963e-07,   3.62269711e-05,
          5.82378183e-04,   9.99319673e-01,   2.26338670e-06,
          1.75437634e-07,   1.99888200e-06],
       [  1.81158120e-03,   2.29732495e-06,   1.28677848e-05,
          1.28166573e-06,   9.98105407e-01,   5.80258493e-05,
          2.20823928e-08,   8.61761055e-06],
       [  3.23779605e-05,   2.31471087e-04,   5.52165475e-05,
          8.99465340e-06,   9.99175489e-01,   1.54436115e-04,
          2.95495003e-04,   4.65662706e-05],
       [  4.11524146e-04,   1.11431598e-06,   7.14718681e-07,
          3.30897421e-07,   9.98724759e-01,   7.02449397e-05,
          2.66168754e-05,   7.64610711e-04],
       [  6.28126145e-05,   7.11259690e-06,   1.07649876e-05,
          9.14094073e-07,   9.99714196e-01,   9.80158147e-05,
          1.03790167e-04,   2.40488816e-06],
       [  2.43342176e-01,   2.39938381e-03,   5.16776287e-04,
          2.94830006e-05,   7.48372257e-01,   3.70077766e-03,
          1.62159314e-03,   1.75369041e-05],
       [  9.21519822e-05,   1.61129196e-06,   3.05202454e-01,
          1.23640306e-10,   6.87853217e-01,   1.03495004e-04,
          2.55249237e-04,   6.49181940e-03],
       [  1.87865878e-03,   1.53880042e-04,   6.74778013e-04,
          1.02366198e-06,   9.95069027e-01,   4.59190014e-05,
          2.12684716e-03,   5.00175993e-05],
       [  1.91529193e-08,   3.54096323e-06,   8.74561556e-06,
          9.21760411e-12,   9.99987006e-01,   4.32079958e-07,
          2.14709637e-08,   2.87946989e-07],
       [  1.15192262e-02,   2.04893749e-05,   3.71531064e-06,
          7.52492087e-06,   9.86303270e-01,   1.11599089e-04,
          3.48473550e-05,   1.99940032e-03],
       [  2.22815524e-05,   2.15920568e-06,   1.13978933e-06,
          7.54377083e-07,   9.99968052e-01,   4.44308762e-06,
          5.60160800e-07,   6.29824910e-07],
       [  2.52191912e-05,   9.39964724e-04,   2.37222121e-05,
          1.89568212e-07,   9.98985469e-01,   1.79824383e-06,
          1.75124605e-05,   6.15942326e-06],
       [  5.98648505e-04,   1.07759763e-04,   1.43487501e-04,
          2.06489894e-05,   9.98204470e-01,   2.71253142e-04,
          6.42322237e-04,   1.13772003e-05],
       [  4.63355798e-04,   4.83970609e-07,   3.95193105e-08,
          3.26752114e-09,   9.99534726e-01,   1.44077865e-06,
          5.97699135e-09,   1.14881242e-08],
       [  2.00800750e-05,   1.37274583e-05,   1.66242262e-06,
          1.03239290e-06,   9.99956250e-01,   4.72338843e-06,
          1.89960747e-06,   6.21469212e-07],
       [  2.79957982e-04,   4.66054516e-05,   1.21034609e-04,
          4.55576264e-05,   9.99363124e-01,   7.75444132e-05,
          3.75067539e-05,   2.87014500e-05],
       [  4.00904159e-04,   7.60920557e-06,   1.63383265e-06,
          5.95277641e-07,   9.99580562e-01,   3.26325403e-06,
          5.13696341e-06,   3.67749976e-07],
       [  4.12722565e-02,   2.04944561e-04,   2.44371950e-06,
          4.27520182e-03,   9.53774631e-01,   4.19242540e-04,
          2.00355244e-05,   3.12197662e-05],
       [  2.65258190e-04,   5.11534563e-06,   8.63270648e-03,
          1.91923810e-09,   9.91000950e-01,   7.96131644e-06,
          2.16128319e-06,   8.58841231e-05],
       [  6.43114618e-06,   8.07388369e-06,   9.24337542e-07,
          1.51826157e-07,   9.99981284e-01,   9.11852226e-07,
          1.97043300e-06,   2.91563907e-07],
       [  1.46550601e-05,   1.13465896e-04,   4.43826957e-06,
          5.68738699e-07,   9.99855042e-01,   8.37247171e-06,
          1.87326132e-09,   3.43610168e-06],
       [  3.32202693e-03,   6.22143307e-06,   6.13983648e-05,
          6.56776189e-09,   9.44793403e-01,   1.84875898e-04,
          2.70270836e-03,   4.89292294e-02],
       [  1.71141991e-07,   5.43769102e-06,   1.26677875e-07,
          6.90654556e-09,   9.99952435e-01,   2.58265175e-07,
          2.41751454e-08,   4.15547402e-05],
       [  2.33881892e-05,   1.34546153e-05,   1.63196535e-06,
          2.49816117e-06,   9.99849439e-01,   3.09173993e-05,
          1.37978918e-06,   7.73514985e-05],
       [  1.21819755e-06,   1.57854430e-07,   9.52938353e-06,
          1.87916993e-09,   9.99980927e-01,   4.55159380e-07,
          5.30288617e-07,   7.29317662e-06],
       [  1.57116435e-03,   8.17727596e-06,   2.83819645e-06,
          2.67417988e-07,   9.19261456e-01,   1.15939190e-04,
          7.90209994e-02,   1.91995005e-05],
       [  5.62818896e-05,   3.36082521e-05,   2.63676211e-05,
          1.88578961e-05,   9.99835372e-01,   2.28051340e-05,
          5.81150880e-06,   9.07178730e-07],
       [  1.97456266e-05,   1.26105675e-07,   2.07298186e-07,
          4.72524192e-07,   9.99978185e-01,   1.26405865e-07,
          7.11019766e-07,   3.29305180e-07],
       [  2.42236289e-04,   7.33270511e-08,   3.44177309e-09,
          9.82066428e-09,   9.99751031e-01,   3.11526846e-11,
          5.13537515e-08,   6.57114560e-06],
       [  3.66708264e-02,   1.20054698e-04,   1.55075429e-06,
          5.69238819e-06,   9.62677062e-01,   2.25916901e-05,
          7.55480050e-06,   4.94672451e-04],
       [  4.16969473e-04,   1.14768998e-04,   3.85479798e-05,
          1.58721668e-05,   9.98750329e-01,   1.75858426e-04,
          4.56721405e-04,   3.09007337e-05],
       [  7.79625785e-04,   3.31491356e-05,   2.25929452e-05,
          6.22854204e-05,   9.94217753e-01,   3.35232448e-03,
          8.53514532e-04,   6.78741490e-04],
       [  4.76448895e-06,   4.97815745e-05,   4.29708098e-06,
          1.83375937e-09,   9.99938846e-01,   5.86227372e-07,
          6.43235722e-08,   1.51152346e-06],
       [  5.90406125e-05,   4.67090227e-04,   1.31177894e-05,
          1.41643413e-05,   9.98698711e-01,   5.86362381e-04,
          6.41835795e-05,   9.72374037e-05],
       [  3.23780514e-05,   2.31471757e-04,   5.52167076e-05,
          8.99466158e-06,   9.99175489e-01,   1.54436406e-04,
          2.95495585e-04,   4.65663979e-05],
       [  9.71747359e-05,   9.72524285e-05,   2.17346224e-05,
          4.67073914e-06,   9.99182642e-01,   5.40351575e-05,
          5.34934050e-04,   7.54072562e-06],
       [  1.89102488e-03,   5.96835307e-05,   2.97300168e-03,
          2.88566098e-05,   9.91753697e-01,   1.81251904e-03,
          1.34466204e-03,   1.36585964e-04],
       [  3.23780514e-05,   2.31471757e-04,   5.52167076e-05,
          8.99466158e-06,   9.99175489e-01,   1.54436406e-04,
          2.95495585e-04,   4.65663979e-05],
       [  2.67898431e-03,   7.89448768e-06,   5.43110109e-05,
          4.07576590e-06,   9.97074604e-01,   1.17099073e-04,
          2.62549719e-07,   6.27225090e-05],
       [  4.64246463e-04,   9.92496716e-05,   1.16535877e-04,
          4.18091076e-06,   9.99209404e-01,   1.14213271e-05,
          7.23831399e-05,   2.26006032e-05],
       [  2.45258707e-06,   2.85099583e-10,   7.45148680e-13,
          3.51358526e-11,   9.99997377e-01,   1.83502954e-15,
          1.09213359e-13,   1.77034607e-07],
       [  1.02057739e-03,   2.28312160e-06,   1.20692914e-06,
          3.94104472e-06,   9.98913884e-01,   7.42786051e-06,
          1.38450405e-05,   3.67550274e-05],
       [  2.37398694e-04,   5.66290284e-04,   7.07920162e-06,
          3.97679081e-08,   9.99171972e-01,   7.01327281e-08,
          9.22382640e-07,   1.62423748e-05],
       [  6.63015991e-04,   3.89490197e-06,   2.88907158e-05,
          1.11383781e-06,   9.99266803e-01,   8.68136794e-06,
          2.17365123e-05,   5.79924790e-06],
       [  1.76037993e-05,   1.91592608e-06,   4.66305483e-03,
          7.33355819e-07,   9.95311677e-01,   3.63486629e-06,
          1.32594403e-06,   1.55123189e-07],
       [  1.70048806e-05,   1.24684448e-04,   2.08346537e-05,
          5.41393206e-08,   9.99787986e-01,   6.29080341e-06,
          4.00861900e-05,   3.04694368e-06],
       [  8.79424897e-06,   2.50706103e-06,   1.41827243e-08,
          6.94921320e-09,   9.99986768e-01,   1.88913089e-06,
          3.25621876e-08,   2.62882938e-09],
       [  3.23780514e-05,   2.31471990e-04,   5.52167076e-05,
          8.99467068e-06,   9.99175489e-01,   1.54436537e-04,
          2.95495585e-04,   4.65663979e-05],
       [  4.05847823e-06,   3.84773994e-05,   2.02579713e-07,
          4.96490393e-03,   9.94992435e-01,   3.21765892e-10,
          2.79400614e-09,   4.36256320e-09],
       [  5.01757674e-03,   5.70068778e-06,   2.67605560e-06,
          1.48101451e-08,   9.92248893e-01,   9.49804758e-07,
          3.08133487e-04,   2.41609965e-03],
       [  1.52384688e-03,   4.47074817e-05,   2.68644013e-04,
          2.93672856e-05,   9.98031318e-01,   9.89023738e-06,
          3.03041834e-05,   6.19098646e-05],
       [  1.82445382e-03,   4.45674232e-04,   2.01407107e-04,
          4.33347850e-06,   9.96842742e-01,   2.98966828e-04,
          6.09344934e-05,   3.21493018e-04],
       [  4.45278885e-04,   4.43982799e-07,   9.76858155e-06,
          3.96320665e-08,   9.99437034e-01,   1.00671514e-05,
          9.55614887e-05,   1.90595040e-06],
       [  7.40689877e-03,   1.79588369e-06,   9.53912604e-06,
          2.59549743e-05,   9.92416263e-01,   1.38940959e-04,
          1.56030652e-07,   4.18760408e-07],
       [  2.02322903e-04,   1.79601489e-06,   3.54502708e-06,
          8.22388529e-07,   9.99784648e-01,   5.32070089e-06,
          9.60184934e-07,   5.44948819e-07],
       [  6.57759028e-06,   7.34121977e-06,   8.69490730e-04,
          3.77777205e-08,   9.99011278e-01,   2.58836899e-05,
          7.75784144e-07,   7.85862940e-05],
       [  1.52533203e-05,   2.37264180e-06,   3.06635037e-08,
          2.28195400e-08,   9.99981999e-01,   3.21732387e-07,
          7.26180316e-09,   3.49164919e-09],
       [  1.13251393e-04,   1.13004062e-05,   6.22221535e-07,
          3.41674820e-07,   9.98757958e-01,   9.22073028e-04,
          1.93667976e-04,   9.08243010e-07],
       [  7.20134631e-05,   7.21386687e-06,   6.49689100e-05,
          2.71102017e-05,   9.99827206e-01,   8.04316585e-07,
          2.53532821e-08,   8.29112594e-07],
       [  9.35905191e-05,   1.48140180e-05,   2.04983389e-05,
          6.83887856e-06,   9.99829650e-01,   2.68936765e-05,
          4.23498750e-06,   3.37127221e-06],
       [  4.24490310e-08,   1.65820797e-08,   2.79358005e-06,
          6.07685846e-11,   9.99996662e-01,   1.12210028e-08,
          3.85743304e-09,   5.90255638e-07],
       [  1.78940987e-10,   4.76262585e-08,   7.39465733e-08,
          1.06124723e-12,   9.99993205e-01,   2.00566133e-10,
          1.09868597e-06,   5.59685486e-06],
       [  1.07139662e-04,   1.70777170e-08,   3.37455539e-08,
          1.53647861e-09,   9.99892354e-01,   4.60369222e-07,
          2.65302047e-09,   9.38616740e-09],
       [  1.91835147e-06,   1.89168334e-10,   3.61326474e-13,
          3.64846825e-12,   9.99998093e-01,   3.83090537e-08,
          1.93842304e-15,   3.83488206e-12],
       [  3.20981453e-05,   5.93386005e-07,   5.16447130e-07,
          1.25830818e-10,   9.99838591e-01,   1.25064122e-04,
          3.20481632e-07,   2.86073964e-06],
       [  1.16342686e-04,   6.08365190e-05,   2.33494188e-06,
          4.78646086e-07,   9.99806345e-01,   8.78900573e-06,
          7.88804869e-07,   4.12329427e-06],
       [  1.09926891e-02,   4.84511852e-02,   6.20127776e-06,
          2.89214131e-06,   8.81421864e-01,   1.21165579e-02,
          3.76940332e-02,   9.31464788e-03],
       [  7.15023634e-05,   5.58990632e-06,   5.01174181e-05,
          3.13804023e-08,   9.99683619e-01,   1.16122232e-04,
          7.23185149e-05,   8.00175428e-07],
       [  3.58687976e-06,   3.77302436e-04,   3.31425554e-07,
          9.27758174e-06,   9.99603450e-01,   3.77497895e-06,
          2.17749721e-06,   9.39073033e-08],
       [  2.99139763e-04,   1.39827557e-06,   1.54421889e-06,
          5.92723879e-08,   9.99695420e-01,   1.95323946e-06,
          3.02325361e-07,   2.83839938e-07],
       [  1.91550771e-05,   4.21809364e-06,   2.74913182e-05,
          3.58777220e-06,   9.99855995e-01,   2.59808130e-05,
          2.36810337e-07,   6.32216761e-05],
       [  1.26455677e-06,   1.27211047e-06,   1.01248361e-07,
          1.18623733e-08,   9.99996901e-01,   1.51567676e-07,
          8.65737420e-08,   8.71471357e-08],
       [  8.69949222e-01,   8.53342044e-06,   2.02039996e-06,
          4.69045489e-07,   1.27883941e-01,   6.23543610e-06,
          6.61327722e-05,   2.08353484e-03],
       [  9.89911496e-04,   5.87781956e-09,   7.55872662e-08,
          7.30729743e-09,   9.98962760e-01,   1.17482646e-09,
          4.29044121e-05,   4.24374275e-06],
       [  9.83481223e-05,   2.75708444e-04,   2.40931217e-06,
          3.59536034e-06,   9.99376833e-01,   4.96858811e-05,
          1.92376232e-04,   1.06231914e-06],
       [  1.77970954e-08,   5.68386931e-05,   8.35520098e-07,
          3.65505889e-11,   9.99940991e-01,   2.68364857e-07,
          6.69831053e-08,   9.84224130e-07],
       [  4.32999485e-08,   1.02553925e-12,   3.33985534e-10,
          8.39842008e-16,   1.00000000e+00,   1.63780955e-09,
          6.21981248e-15,   8.95912593e-15],
       [  3.23780514e-05,   2.31471757e-04,   5.52167076e-05,
          8.99466158e-06,   9.99175489e-01,   1.54436406e-04,
          2.95495585e-04,   4.65663979e-05],
       [  1.14454444e-04,   4.42707551e-06,   1.02265574e-07,
          4.76190650e-07,   9.99873042e-01,   7.22579216e-06,
          6.15646982e-08,   2.92474397e-07],
       [  1.55611124e-04,   2.11431598e-06,   5.38023869e-07,
          1.64904634e-06,   9.99756753e-01,   4.09911900e-06,
          5.70021839e-05,   2.21608534e-05],
       [  6.98779259e-05,   9.40030906e-04,   4.73876280e-05,
          7.30696229e-06,   9.98562992e-01,   1.49149972e-04,
          2.10220867e-04,   1.30244534e-05],
       [  5.96736856e-02,   2.97886481e-06,   1.74541870e-04,
          3.93687624e-05,   9.39419568e-01,   6.37036923e-04,
          3.53435535e-05,   1.75754467e-05],
       [  6.38939525e-07,   4.11671985e-09,   2.11641052e-10,
          1.47776463e-12,   9.99999404e-01,   2.05441424e-08,
          7.17805047e-13,   3.51182841e-12],
       [  2.53276987e-04,   3.45445209e-04,   6.55819749e-05,
          1.26750010e-05,   9.98909593e-01,   1.33832640e-04,
          2.55624967e-04,   2.40165955e-05],
       [  3.08041857e-03,   1.11443796e-06,   1.69325431e-05,
          1.57117128e-07,   9.96890843e-01,   1.47406013e-07,
          2.72916161e-08,   1.02605818e-05],
       [  2.43768142e-07,   7.62662421e-06,   2.20548918e-06,
          3.39382827e-10,   9.99980688e-01,   8.96332676e-06,
          2.73555997e-07,   3.35405552e-08],
       [  4.89918282e-04,   1.12345033e-05,   2.17348952e-05,
          5.44578761e-05,   9.85371649e-01,   9.73869395e-03,
          5.22605260e-04,   3.78961721e-03],
       [  6.54932082e-05,   2.81862012e-05,   3.68764699e-06,
          2.25091367e-06,   9.99859691e-01,   2.64118535e-05,
          1.17889285e-05,   2.48098308e-06],
       [  6.31650537e-02,   6.91100897e-04,   8.13878374e-04,
          1.22837242e-04,   9.34040427e-01,   2.92067649e-04,
          8.51029399e-05,   7.89420097e-04],
       [  1.43640762e-04,   8.22358459e-07,   8.43375219e-08,
          2.65469269e-09,   9.99854922e-01,   2.52867238e-07,
          1.38392878e-07,   1.46409477e-07],
       [  3.99709534e-05,   2.45159481e-06,   7.35864392e-04,
          1.12427685e-07,   9.95518267e-01,   3.68894706e-03,
          1.28752663e-05,   1.51726306e-06],
       [  3.23780514e-05,   2.31471757e-04,   5.52167076e-05,
          8.99466158e-06,   9.99175489e-01,   1.54436406e-04,
          2.95495585e-04,   4.65663979e-05],
       [  2.38811372e-05,   7.50488205e-09,   9.47905221e-08,
          9.75203585e-10,   9.99975324e-01,   7.26244664e-09,
          1.22734278e-07,   5.72493036e-07],
       [  1.76845875e-04,   4.94043093e-07,   4.93894959e-06,
          4.20418047e-07,   9.99817073e-01,   8.71879919e-08,
          3.27301848e-08,   5.45348406e-08],
       [  1.22560632e-05,   4.45023215e-06,   5.73813793e-07,
          1.70130154e-09,   9.99982238e-01,   4.42757369e-07,
          1.05624185e-08,   3.54522385e-08],
       [  1.21253971e-02,   7.45111493e-06,   1.90075286e-06,
          1.34842571e-06,   9.87595081e-01,   2.64301034e-05,
          8.47003903e-05,   1.57634320e-04],
       [  1.13035038e-01,   3.10809177e-04,   2.35536918e-05,
          7.25676728e-08,   8.74091744e-01,   7.17534334e-04,
          1.49005253e-04,   1.16721904e-02],
       [  1.26687937e-05,   1.36818445e-09,   1.93099914e-09,
          2.15458407e-09,   9.99802887e-01,   1.84247692e-04,
          8.24681740e-13,   1.79570776e-07],
       [  6.79152872e-05,   4.43221143e-06,   1.76886108e-03,
          2.74395173e-09,   9.94807065e-01,   6.83287974e-04,
          8.64006579e-04,   1.80441537e-03],
       [  3.51097765e-06,   2.03917210e-07,   3.47667839e-10,
          1.68266096e-11,   9.99996305e-01,   1.04770495e-08,
          6.50960230e-10,   2.50483700e-09],
       [  8.98771978e-05,   5.41841928e-07,   1.85789322e-05,
          2.13458815e-08,   9.99477446e-01,   1.68563940e-06,
          3.31781303e-05,   3.78694240e-04],
       [  2.12677764e-06,   8.33934337e-07,   5.07958816e-07,
          1.41344046e-07,   9.99983907e-01,   3.47703690e-06,
          7.08225161e-06,   1.93945834e-06],
       [  2.00036025e-04,   4.04596176e-05,   1.39866583e-03,
          1.26850800e-05,   9.98288214e-01,   8.96212805e-06,
          4.43377940e-06,   4.66096535e-05],
       [  4.70367115e-04,   3.91134051e-07,   4.54986730e-06,
          2.18132286e-08,   9.94804323e-01,   2.41997745e-03,
          2.29978538e-03,   6.27662246e-07]], dtype=float32)

In [21]:
GT_crop_preds[3000:3005]


Out[21]:
array([[  9.75128233e-01,   6.61880942e-04,   3.49770324e-10,
          9.54435997e-09,   1.36938374e-02,   1.67527687e-05,
          4.39272715e-11,   1.04993181e-02],
       [  9.99776065e-01,   6.95186282e-06,   1.86960469e-09,
          1.07511866e-07,   2.04159398e-04,   1.11168019e-05,
          3.87707900e-07,   1.17236073e-06],
       [  9.97722685e-01,   2.41736983e-04,   8.48091190e-07,
          3.50547786e-08,   1.24716156e-04,   1.66591688e-03,
          1.09547074e-07,   2.43850300e-04],
       [  5.54468691e-01,   8.93118046e-03,   1.14930035e-05,
          2.54409248e-03,   1.85485900e-04,   2.25799112e-03,
          2.61029963e-05,   4.31574970e-01],
       [  9.99539495e-01,   8.41121528e-06,   6.67353173e-10,
          9.13688902e-10,   4.50788357e-04,   2.67058784e-08,
          1.37856249e-09,   1.30069316e-06]], dtype=float32)

In [23]:
GT_crop_preds = GT_crop_preds[:19479,:]

In [9]:
#get GT_crop_BBClassifier_preds 

print ('Exist data_train_BBCrop_{}_{}.pickle. Loading data from file.'.format(ROWS, COLS))
with open('../data/data_train_BBCrop_{}_{}.pickle'.format(ROWS, COLS), 'rb') as f:
    data_train = pickle.load(f)
X_train_crop = data_train['X_train_crop']
y_train_crop = data_train['y_train_crop']
GT_crop_files = data_train['train_crop_files']
class_weight = data_train['class_weight']

print('Loading data done.')
X_train_crop = X_train_crop.astype(np.float32)
print('Convert to float32 done.')
X_train_crop /= 255.
print('Rescale by 255 done.')

#featurewise_center
mean = np.mean(X_train_crop, axis=0, keepdims=True)
mean = np.mean(mean, axis=(1,2), keepdims=True)
X_train_crop_centered = X_train_crop - mean

files = glob.glob(CHECKPOINT_DIR+'*')
val_losses = [float(f.split('-')[-1][:-5]) for f in files]
index = val_losses.index(min(val_losses))
print('Loading model from checkpoints file ' + files[index])
model = load_model(files[index])
# print('Loading model from weights.004-0.0565.hdf5')
# model = load_model('./checkpoints/checkpoint2/weights.004-0.0565.hdf5')

print(model.evaluate(X_train_crop_centered, y_train_crop, batch_size=BatchSize, verbose=1))
GT_crop_preds = model.predict(X_train_crop_centered, batch_size=BatchSize, verbose=1)

GT_crop_preds_df = pd.DataFrame(GT_crop_preds, columns=['ALB', 'BET', 'DOL', 'LAG', 'NoF', 'OTHER', 'SHARK', 'YFT'])
GT_crop_preds_df.insert(0,'GT_crop_files',GT_crop_files)
#FISH_CLASSES = ['ALB', 'BET', 'DOL', 'LAG', 'NoF', 'OTHER', 'SHARK', 'YFT']
#GT_crop_preds_df.insert(0,'y_train_crop',[FISH_CLASSES[i] for i in np.argmax(y_train_crop, axis=1)])

GT_crop_bboxs_df = pd.read_pickle('../data/train_crop/GT_crop_files_BBox.pickle')

GT_crop_BBClassifier_preds = pd.merge(GT_crop_bboxs_df, GT_crop_preds_df) 

GT_crop_BBClassifier_preds['image_files'] = GT_crop_BBClassifier_preds.GT_crop_files.apply(lambda x: 'img_'+x.split('_')[2])
GT_crop_BBClassifier_preds['gt'] = GT_crop_BBClassifier_preds.GT_crop_files.apply(lambda x: x.split('_')[-1][:-4])

#add logloss
columns_reorder = ['image_files', 'GT_crop_files', 'gt', 'xmin', 'ymin', 'xmax', 'ymax', 'NoF', 'ALB', 'BET', 'DOL', 'LAG', 'OTHER', 'SHARK', 'YFT']
GT_crop_BBClassifier_preds = GT_crop_BBClassifier_preds[columns_reorder]
def f_logloss(row):
    fish = row[2]
    ind = ['NoF', 'ALB', 'BET', 'DOL', 'LAG', 'OTHER', 'SHARK', 'YFT'].index(fish)
    return -math.log(row[7+ind])
GT_crop_BBClassifier_preds['logloss'] = GT_crop_BBClassifier_preds.apply(f_logloss, axis=1)

columns_reorder = ['image_files', 'GT_crop_files', 'gt', 'logloss', 'xmin', 'ymin', 'xmax', 'ymax', 'NoF', 'ALB', 'BET', 'DOL', 'LAG', 'OTHER', 'SHARK', 'YFT']
GT_crop_BBClassifier_preds = GT_crop_BBClassifier_preds[columns_reorder]

GT_crop_BBClassifier_preds[5000:5005]


Exist data_train_BBCrop_224_224.pickle. Loading data from file.
Loading data done.
Convert to float32 done.
Rescale by 255 done.
Loading model from checkpoints file ./checkpoints/checkpoint3/weights.014-0.0369.hdf5
19479/19479 [==============================] - 186s   
[0.040039356157520814, 0.9886031110426613]
19479/19479 [==============================] - 186s   
Out[9]:
image_files GT_crop_files gt logloss xmin ymin xmax ymax NoF ALB BET DOL LAG OTHER SHARK YFT
5000 img_00756 ALB_img_00756_1_NoF.jpg NoF 0.001064 902.370483 695.567139 1052.943848 713.209473 0.998936 0.000046 8.144959e-05 7.667454e-04 9.777488e-06 5.438638e-05 3.133449e-05 7.343937e-05
5001 img_00756 ALB_img_00756_2_NoF.jpg NoF 0.000040 808.216187 8.685810 872.366211 59.957012 0.999960 0.000020 4.806904e-07 1.144817e-06 5.986817e-08 1.389091e-05 3.767447e-06 4.659757e-07
5002 img_00756 ALB_img_00756_3_NoF.jpg NoF 0.000126 332.880005 575.668457 692.472290 687.581421 0.999874 0.000108 6.362619e-09 5.230358e-08 2.646751e-12 8.413108e-08 3.308853e-08 1.793942e-05
5003 img_00762 ALB_img_00762_0_NoF.jpg NoF 3.118900 851.809082 557.221191 1155.078979 685.999146 0.044206 0.954231 6.584862e-04 1.994942e-08 1.470334e-07 1.177057e-06 7.160058e-07 9.028077e-04
5004 img_00762 ALB_img_00762_1_NoF.jpg NoF 0.002344 463.857727 7.145826 660.958191 40.875198 0.997658 0.001371 3.139507e-05 3.640607e-04 7.965326e-07 4.707745e-04 9.931045e-05 4.427475e-06

In [10]:
#class_weight stg3 weights.014-0.0369.hdf5
GT_crop_BBClassifier_preds.groupby(['gt'])['logloss'].mean()


Out[10]:
gt
ALB      0.095330
BET      0.147724
DOL      0.106721
LAG      0.000496
NoF      0.026522
OTHER    0.088398
SHARK    0.012763
YFT      0.064794
Name: logloss, dtype: float64

In [27]:
#class_weight stg2
GT_crop_BBClassifier_preds.groupby(['gt'])['logloss'].mean()


Out[27]:
gt
ALB      0.122500
BET      0.294225
DOL      0.346351
LAG      0.004012
NoF      0.047464
OTHER    0.215948
SHARK    0.026612
YFT      0.208748
Name: logloss, dtype: float64

In [12]:
#before class_weight
GT_crop_BBClassifier_preds.groupby(['gt'])['logloss'].mean()


Out[12]:
gt
ALB      0.079761
BET      0.743728
DOL      0.325572
LAG      0.096724
NoF      0.013140
OTHER    0.343257
SHARK    0.056072
YFT      0.310518
Name: logloss, dtype: float64

In [11]:
#get RFCN bbox from detections_full_AGNOSTICnms.pkl
RFCN_MODEL = 'resnet101_rfcn_ohem_iter_30000'

import pickle 
with open('../data/RFCN_detections/detections_full_AGNOSTICnms_'+RFCN_MODEL+'.pkl','rb') as f:
    detections_full_AGNOSTICnms = pickle.load(f, encoding='latin1') 
    
outputs = []
count = np.zeros(len(detections_full_AGNOSTICnms))

for im in range(len(detections_full_AGNOSTICnms)):
    outputs_im = []
    detects_im = detections_full_AGNOSTICnms[im]
    for i in range(len(detects_im)):
        if np.max(detects_im[i,5:]) >= CONF_THRESH:
            outputs_im.append(detects_im[i,:]) 
    count[im] = len(outputs_im)
    if len(outputs_im) == 0:
        ind = np.argmax(np.max(detects_im[:,5:], axis=1))
        outputs_im.append(detects_im[ind,:])
    outputs_im = np.asarray(outputs_im)
    outputs.append(outputs_im)
    
#crop test images and cache to TEST_CROP_DIR

# if not os.path.exists(TEST_CROP_DIR):
#     os.mkdir(TEST_CROP_DIR)
# files = glob.glob(TEST_CROP_DIR+'*')
# for f in files:
#     os.remove(f)
    
# with open("../RFCN/ImageSets/Main/test.txt","r") as f:
#     ims = f.readlines()
# test_files = [im[:-1]+'.jpg' for im in ims]

# for i in range(len(outputs)):
#     if i%1000 == 0:
#         print(i)
#     filename = test_files[i]
#     bboxes = outputs[i]
#     basename, file_extension = os.path.splitext(filename) 
#     image = Image.open(TEST_DIR+filename)
#     for j in range(len(bboxes)):
#         bbox = bboxes[j]
#         xmin = bbox[0]
#         ymin = bbox[1]
#         xmax = bbox[2]
#         ymax = bbox[3]
#         file_crop = TEST_CROP_DIR+basename+'_{}'.format(j)+'.jpg'
#         cropped = image.crop((xmin, ymin, xmax, ymax))
#         width_cropped, height_cropped = cropped.size
#         if height_cropped > width_cropped: cropped = cropped.transpose(method=2)
#         cropped.save(file_crop)

print('train_image:{} RFCN>conf_crop:{} RFCN_total_crop:{}'.format(len(outputs)-1000,int(sum(count[1000:])), int(sum([outputs[i].shape[0] for i in range(1000,len(outputs))]))))
print('test_image: {} RFCN>conf_crop:{} RFCN_total_crop:{}'.format(1000,int(sum(count[:1000])), int(sum([outputs[i].shape[0] for i in range(1000)]))))


train_image:3777 RFCN>conf_crop:4321 RFCN_total_crop:4786
test_image: 1000 RFCN>conf_crop:1034 RFCN_total_crop:1251

In [12]:
#get RFCN_crop_RFCN_preds
test_crop_preds = np.vstack(outputs)[:,:]

columns = ['x0', 'y0', 'x1', 'y1','NoF_RFCN', 'ALB_RFCN', 'BET_RFCN', 'DOL_RFCN', 'LAG_RFCN', 'OTHER_RFCN', 'SHARK_RFCN', 'YFT_RFCN']
RFCN_preds_df = pd.DataFrame(test_crop_preds, columns=columns)


with open("../RFCN/ImageSets/Main/test.txt","r") as f:
    ims = f.readlines()
test_files = [im[:-1]+'.jpg' for im in ims]

test_crop_files_RFCN = []
for i in range(len(outputs)):
    filename = test_files[i]
    basename, file_extension = os.path.splitext(filename) 
    for j in range(len(outputs[i])):
        file_crop = basename+'_{}_'.format(j)+'.jpg'
        test_crop_files_RFCN.append(file_crop)
        
RFCN_preds_df.insert(0, 'test_crop_files', test_crop_files_RFCN)

In [ ]:
#Load test data

import datetime

def read_image(src):
    """Read and resize individual images"""
    im = Image.open(src)
    im = im.resize((COLS, ROWS), Image.BILINEAR)
    im = np.asarray(im)
    return im

if os.path.exists('../data/data_test_BBCrop_{}_{}.pickle'.format(ROWS, COLS)):
    print ('Exist data_test_BBCrop_{}_{}.pickle. Loading test data from file.'.format(ROWS, COLS))
    with open('../data/data_test_BBCrop_{}_{}.pickle'.format(ROWS, COLS), 'rb') as f:
        data_test = pickle.load(f)
    X_test_crop = data_test['X_test_crop']
    test_crop_files = data_test['test_crop_files']
else:
    print ('Loading test data from original images. Generating data_test_BBCrop_{}_{}.pickle.'.format(ROWS, COLS))

    test_crop_files = sorted([im for im in os.listdir(TEST_CROP_DIR)])
    X_test_crop = np.ndarray((len(test_crop_files), ROWS, COLS, 3), dtype=np.uint8)

    for i, im in enumerate(test_crop_files): 
        X_test_crop[i] = read_image(TEST_CROP_DIR+im)
        if i%1000 == 0: print('Processed {} of {}'.format(i, len(test_crop_files)))
            
    data_test = {'X_test_crop': X_test_crop,'test_crop_files': test_crop_files }
    
    with open('../data/data_test_BBCrop_{}_{}.pickle'.format(ROWS, COLS), 'wb') as f:
        pickle.dump(data_test, f, protocol=4)

print('Loading data done.')
X_test_crop = X_test_crop.astype(np.float32)
print('Convert to float32 done.')
X_test_crop /= 255.
print('Rescale by 255 done.')


Exist data_test_BBCrop_224_224.pickle. Loading test data from file.

In [14]:
#get RFCN_crop_BBClassifier_preds
files = glob.glob(CHECKPOINT_DIR+'*')
val_losses = [float(f.split('-')[-1][:-5]) for f in files]
index = val_losses.index(min(val_losses))
print('Loading model from checkpoints file ' + files[index])
model = load_model(files[index])
# print('Loading model from weights.004-0.0565.hdf5')
# model = load_model('./checkpoints/checkpoint2/weights.004-0.0565.hdf5')

X_test_crop_centered = featurewise_center(X_test_crop)
test_crop_preds = model.predict(X_test_crop_centered, batch_size=BatchSize, verbose=1)

columns = ['ALB_BBCROP', 'BET_BBCROP', 'DOL_BBCROP', 'LAG_BBCROP', 'NoF_BBCROP', 'OTHER_BBCROP', 'SHARK_BBCROP', 'YFT_BBCROP']
BBCROP_preds_df = pd.DataFrame(test_crop_preds, columns=columns)

test_crop_files_BBCROP = test_crop_files
BBCROP_preds_df.insert(0, 'test_crop_files', test_crop_files_BBCROP)


Loading model from checkpoints file ./checkpoints/checkpoint3/weights.014-0.0369.hdf5
6037/6037 [==============================] - 61s    

In [15]:
#get RFCN_crop_RFCN_BBClassifier_preds

test_preds_df = pd.merge(RFCN_preds_df, BBCROP_preds_df)  
test_preds_df['test_files'] = test_preds_df.test_crop_files.apply(lambda x: 'img_'+x.split('_')[1])

#add ground truth from ImageSets/Main/train_test.txt to test_preds_df
with open("../RFCN/ImageSets/Main/train_test.txt","r") as f:
    train_file_labels = f.readlines()
    
for index, row in test_preds_df.iterrows():
    im = row['test_files']
    gt = 'nan'
    logloss_RFCN = -np.inf
    logloss_BBCROP = -np.inf
    for im_label in train_file_labels:
        if im_label[:9] == im:
            gt = im_label[10:-1]
            logloss_RFCN = -math.log(row[gt+'_RFCN'])
            logloss_BBCROP = -math.log(row[gt+'_BBCROP'])
    test_preds_df.set_value(index,'gt',gt)
    test_preds_df.set_value(index,'logloss_RFCN',logloss_RFCN)
    test_preds_df.set_value(index,'logloss_BBCROP',logloss_BBCROP)
    
columns_reorder = ['test_files', 'gt', 'logloss_RFCN', 'logloss_BBCROP', 'test_crop_files', 'x0', 'y0', 'x1', 'y1']
FISH_CLASSES = ['NoF', 'ALB', 'BET', 'DOL', 'LAG', 'OTHER', 'SHARK', 'YFT']
for c in FISH_CLASSES:
    columns_reorder.append(c+'_RFCN')
    columns_reorder.append(c+'_BBCROP')
test_preds_df = test_preds_df[columns_reorder]
test_preds_df.head()


Out[15]:
test_files gt logloss_RFCN logloss_BBCROP test_crop_files x0 y0 x1 y1 NoF_RFCN ... DOL_RFCN DOL_BBCROP LAG_RFCN LAG_BBCROP OTHER_RFCN OTHER_BBCROP SHARK_RFCN SHARK_BBCROP YFT_RFCN YFT_BBCROP
0 img_00005 nan -inf -inf img_00005_0_.jpg 144.539688 682.612183 224.428116 713.138794 0.999997 ... 2.145238e-07 5.284961e-06 5.994206e-07 1.130975e-08 2.103843e-07 6.882438e-08 1.282200e-07 4.956751e-08 2.089381e-07 0.000001
1 img_00007 nan -inf -inf img_00007_0_.jpg 716.991760 254.020020 1157.174316 488.850403 0.000127 ... 3.268235e-06 1.764687e-06 8.629868e-07 6.548625e-08 9.324418e-08 1.163954e-07 3.096656e-06 6.322229e-09 9.997229e-01 0.907468
2 img_00009 nan -inf -inf img_00009_0_.jpg 604.372192 99.314407 923.525879 213.722458 0.000411 ... 7.386075e-07 4.916170e-06 6.028753e-07 3.080536e-06 1.481739e-04 5.886561e-04 6.061006e-07 2.777596e-08 2.904518e-06 0.000234
3 img_00009 nan -inf -inf img_00009_1_.jpg 312.184143 127.184914 684.601257 265.319366 0.000080 ... 2.058682e-07 9.787890e-07 4.018886e-07 4.795236e-07 1.093486e-04 3.608901e-04 4.156222e-07 2.898989e-08 9.210435e-07 0.000074
4 img_00009 nan -inf -inf img_00009_2_.jpg 927.494385 143.513062 1195.756836 268.516327 0.011148 ... 3.505660e-05 1.442135e-07 7.493963e-05 1.477157e-05 2.543065e-03 2.178922e-05 9.936455e-06 1.920231e-08 6.904111e-05 0.000113

5 rows × 25 columns


In [35]:
#visualization
#import xml.etree.ElementTree

FISH_CLASSES = ['NoF', 'ALB', 'BET', 'DOL', 'LAG', 'OTHER', 'SHARK', 'YFT']

with open("../RFCN/ImageSets/Main/test.txt","r") as f:
    ims = f.readlines()
test_files = [im[:-1] for im in ims][1000:]

for j in range(20):
    RFCN_dets = test_preds_df.loc[test_preds_df['test_files']==test_files[j]]
    im = Image.open('../RFCN/JPEGImages/'+test_files[j]+'.jpg')
    im = np.asarray(im)
    fig, ax = plt.subplots(figsize=(10, 8))
    ax.imshow(im, aspect='equal')
    for index,row in RFCN_dets.iterrows():
        row = row.tolist()
        bbox = row[5:9]
        RFCN = [row[i] for i in [9,11,13,15,17,19,21,23]]
        BBCROP = [row[i] for i in [10,12,14,16,18,20,22,24]]
        score_RFCN = max(RFCN)
        score_BBCROP = max(BBCROP)
        index_RFCN = RFCN.index(score_RFCN)
        index_BBCROP = BBCROP.index(score_BBCROP)
        class_RFCN = FISH_CLASSES[index_RFCN]
        class_BBCROP = FISH_CLASSES[index_BBCROP]
        ax.add_patch(plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor='red', linewidth=2))
        ax.text(bbox[0], bbox[1] - 2, 'RFCN_{:s} {:.3f} \nCROP_{:s} {:.3f}'.format(class_RFCN, score_RFCN, class_BBCROP, score_BBCROP), bbox=dict(facecolor='red', alpha=0.5), fontsize=8, color='white')
    GT_dets = GT_crop_BBClassifier_preds.loc[GT_crop_BBClassifier_preds['image_files']==test_files[j]]
    for index,row in GT_dets.iterrows():
        row = row.tolist()
        bbox = row[4:8]
        BBCROP = row[8:]
        score_BBCROP = max(BBCROP)
        index_BBCROP = BBCROP.index(score_BBCROP)
        class_BBCROP = FISH_CLASSES[index_BBCROP]
        ax.add_patch(plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor='green', linewidth=2))
        ax.text(bbox[0], bbox[3] + 40, 'GT_{:s} \nCROP_{:s} {:.3f}'.format(row[2], class_BBCROP, score_BBCROP), bbox=dict(facecolor='green', alpha=0.5), fontsize=8, color='white')
#     root = xml.etree.ElementTree.parse('../RFCN/Annotations/'+test_files[j]+'.xml').getroot()
#     for child in root.findall('object'):
#         bbox = [child.find('bndbox').find('xmin').text, child.find('bndbox').find('ymin').text, child.find('bndbox').find('xmax').text,child.find('bndbox').find('ymax').text]
#         bbox = [float(x) for x in bbox]
#         ax.add_patch(plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor='green', linewidth=2))
#         ax.text(bbox[0], bbox[3] - 2, 'GT_{:s}'.format(child.find('name').text), bbox=dict(facecolor='green', alpha=0.5), fontsize=8, color='white')  
    ax.set_title(('Image {:s} {:s}').format(test_files[j], row[1]), fontsize=10)
    plt.axis('off')
    plt.tight_layout()
    plt.draw()



In [ ]:
#test preds clsMaxAve
FISH_CLASSES = ['ALB', 'BET', 'DOL', 'LAG', 'NoF', 'OTHER', 'SHARK', 'YFT']

files = glob.glob('./checkpoints/checkpoint3/*')
val_losses = [float(f.split('-')[-1][:-5]) for f in files]
index = val_losses.index(min(val_losses))
print('Loading model from', files[index])
model = load_model(files[index])
# print('Loading model from weights.004-0.0565.hdf5')
# model = load_model('./checkpoints/checkpoint2/weights.004-0.0565.hdf5')

X_test_crop_centered = featurewise_center(X_test_crop)
test_crop_preds = model.predict(X_test_crop_centered, batch_size=BatchSize, verbose=1)

with open("../RFCN/ImageSets/Main/test.txt","r") as f:
    ims = f.readlines()
test_files = [im[:-1]+'.jpg' for im in ims]

count = np.zeros(len(test_files))

test_preds = np.ndarray((len(test_files), test_crop_preds.shape[1]), dtype=np.float32)
for j in range(len(test_files)):
    if j%1000 == 0:
        print(j)
    file = test_files[j]
    test_preds_im = []
    for i in range(len(test_crop_files)):
        if test_crop_files[i][:9] == file[:9]:
            test_preds_im.append(test_crop_preds[i])
    test_preds_im = np.asarray(test_preds_im)
    score_max = np.max(test_preds_im, axis=1)
    inds = np.argmax(test_preds_im, axis=1)
    labels = [FISH_CLASSES[ind] for ind in inds]
    columns = FISH_CLASSES[:]
    test_preds_im_df = pd.DataFrame(test_preds_im, columns=columns)
    test_preds_im_df['max_cls'] = labels
    test_preds_im_df['max_score'] = score_max 
    test_preds_im_df['Counts'] = test_preds_im_df.groupby(['max_cls'])['max_cls'].transform('count')
    idx = test_preds_im_df.groupby(['max_cls'])['max_score'].transform(max) == test_preds_im_df['max_score']
    test_preds_im_df = test_preds_im_df[idx]
    count[j] = test_preds_im_df.shape[0]
    l = FISH_CLASSES.copy()
    l.append('Counts')
    test_preds_im_array = test_preds_im_df[l].as_matrix() 
    test_preds[j] = np.average(test_preds_im_array[:,:-1], axis=0, weights=test_preds_im_array[:,-1], returned=False)


Loading model from ./checkpoints/checkpoint3/weights.000-0.0327.hdf5
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-7-f8b4e43aaa84> in <module>()
      6 index = val_losses.index(min(val_losses))
      7 print('Loading model from', files[index])
----> 8 model = load_model(files[index])
      9 # print('Loading model from weights.004-0.0565.hdf5')
     10 # model = load_model('./checkpoints/checkpoint2/weights.004-0.0565.hdf5')

/opt/anaconda3/lib/python3.5/site-packages/keras/models.py in load_model(filepath, custom_objects)
    141 
    142     # set weights
--> 143     model.load_weights_from_hdf5_group(f['model_weights'])
    144 
    145     # instantiate optimizer

/opt/anaconda3/lib/python3.5/site-packages/keras/engine/topology.py in load_weights_from_hdf5_group(self, f)
   2603                         weight_values[0] = w
   2604                 weight_value_tuples += zip(symbolic_weights, weight_values)
-> 2605             K.batch_set_value(weight_value_tuples)
   2606 
   2607     def load_weights_from_hdf5_group_by_name(self, f):

/opt/anaconda3/lib/python3.5/site-packages/keras/backend/tensorflow_backend.py in batch_set_value(tuples)
   1048             assign_ops.append(assign_op)
   1049             feed_dict[assign_placeholder] = value
-> 1050         get_session().run(assign_ops, feed_dict=feed_dict)
   1051 
   1052 

/opt/anaconda3/lib/python3.5/site-packages/keras/backend/tensorflow_backend.py in get_session()
    109         session = _SESSION
    110     if not _MANUAL_VAR_INIT:
--> 111         _initialize_variables()
    112     return session
    113 

/opt/anaconda3/lib/python3.5/site-packages/keras/backend/tensorflow_backend.py in _initialize_variables()
    200             sess.run(tf.variables_initializer(uninitialized_variables))
    201         else:
--> 202             sess.run(tf.initialize_variables(uninitialized_variables))
    203 
    204 def placeholder(shape=None, ndim=None, dtype=_FLOATX, sparse=False, name=None):

/opt/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
    715     try:
    716       result = self._run(None, fetches, feed_dict, options_ptr,
--> 717                          run_metadata_ptr)
    718       if run_metadata:
    719         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

/opt/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
    913     if final_fetches or final_targets:
    914       results = self._do_run(handle, final_targets, final_fetches,
--> 915                              feed_dict_string, options, run_metadata)
    916     else:
    917       results = []

/opt/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
    963     if handle is None:
    964       return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
--> 965                            target_list, options, run_metadata)
    966     else:
    967       return self._do_call(_prun_fn, self._session, handle, feed_dict,

/opt/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
    970   def _do_call(self, fn, *args):
    971     try:
--> 972       return fn(*args)
    973     except errors.OpError as e:
    974       message = compat.as_text(e.message)

/opt/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
    952         return tf_session.TF_Run(session, options,
    953                                  feed_dict, fetch_list, target_list,
--> 954                                  status, run_metadata)
    955 
    956     def _prun_fn(session, handle, feed_dict, fetch_list):

KeyboardInterrupt: 

In [20]:
#temperature
T = 3
test_preds_T = np.exp(np.log(test_preds)/T)
test_preds_T = test_preds_T/np.sum(test_preds_T, axis=1, keepdims=True)

In [4]:
#calculate train logloss
FISH_CLASSES = ['ALB', 'BET', 'DOL', 'LAG', 'NoF', 'OTHER', 'SHARK', 'YFT']

train_files = test_files[1000:]
train_preds = test_preds_T[1000:,:]
with open("../RFCN/ImageSets/Main/train_test.txt","r") as f:
    train_file_labels = f.readlines()

log_losses = []
for i in range(len(train_preds)):
    im = train_files[i][:-4]
    for im_label in train_file_labels:
        if im_label[:9] == im:
            label = im_label[10:-1]
            index = FISH_CLASSES.index(label)
            log_losses.append(-math.log(train_preds[i,index]))
log_loss = sum(log_losses) / float(len(log_losses))
print('logloss of train is', log_loss )


---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-4-e7ef50e1e597> in <module>()
      2 FISH_CLASSES = ['ALB', 'BET', 'DOL', 'LAG', 'NoF', 'OTHER', 'SHARK', 'YFT']
      3 
----> 4 train_files = test_files[1000:]
      5 train_preds = test_preds_T[1000:,:]
      6 with open("../RFCN/ImageSets/Main/train_test.txt","r") as f:

NameError: name 'test_files' is not defined

In [3]:
train_preds.shape


---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-3-d9e34c689f76> in <module>()
----> 1 train_preds.shape

NameError: name 'train_preds' is not defined

In [22]:
#test submission
FISH_CLASSES = ['ALB', 'BET', 'DOL', 'LAG', 'NoF', 'OTHER', 'SHARK', 'YFT']
submission = pd.DataFrame(test_preds_T[:1000,:], columns=FISH_CLASSES)
submission.insert(0, 'image', test_files[:1000])

info = 'RFCN_AGONOSTICnms_'+RFCN_MODEL+'_BBCROP_resnet50_clsMaxAve_conf{:.2f}_T{}_'.format(CONF_THRESH, T) + '{:.4f}'.format(log_loss)
sub_file = 'submission_' + info + '.csv'
submission.to_csv(sub_file, index=False)

In [ ]:
###clear checkpoints folder

if not os.path.exists('./checkpoints'):
    os.mkdir('./checkpoints')
files = glob.glob('./checkpoints/*')
for f in files:
    os.remove(f)

In [ ]:
###clear logs folder

if not os.path.exists('./logs'):
    os.mkdir('./logs')
files = glob.glob('./logs/*')
for f in files:
    os.remove(f)

In [1]:
from keras.applications.resnet50 import ResNet50

base_model = ResNet50(weights='imagenet', include_top=False)
for i, layer in enumerate(base_model.layers):
   print(i, layer.name)


Using TensorFlow backend.
0 input_1
1 zeropadding2d_1
2 conv1
3 bn_conv1
4 activation_1
5 maxpooling2d_1
6 res2a_branch2a
7 bn2a_branch2a
8 activation_2
9 res2a_branch2b
10 bn2a_branch2b
11 activation_3
12 res2a_branch2c
13 res2a_branch1
14 bn2a_branch2c
15 bn2a_branch1
16 merge_1
17 activation_4
18 res2b_branch2a
19 bn2b_branch2a
20 activation_5
21 res2b_branch2b
22 bn2b_branch2b
23 activation_6
24 res2b_branch2c
25 bn2b_branch2c
26 merge_2
27 activation_7
28 res2c_branch2a
29 bn2c_branch2a
30 activation_8
31 res2c_branch2b
32 bn2c_branch2b
33 activation_9
34 res2c_branch2c
35 bn2c_branch2c
36 merge_3
37 activation_10
38 res3a_branch2a
39 bn3a_branch2a
40 activation_11
41 res3a_branch2b
42 bn3a_branch2b
43 activation_12
44 res3a_branch2c
45 res3a_branch1
46 bn3a_branch2c
47 bn3a_branch1
48 merge_4
49 activation_13
50 res3b_branch2a
51 bn3b_branch2a
52 activation_14
53 res3b_branch2b
54 bn3b_branch2b
55 activation_15
56 res3b_branch2c
57 bn3b_branch2c
58 merge_5
59 activation_16
60 res3c_branch2a
61 bn3c_branch2a
62 activation_17
63 res3c_branch2b
64 bn3c_branch2b
65 activation_18
66 res3c_branch2c
67 bn3c_branch2c
68 merge_6
69 activation_19
70 res3d_branch2a
71 bn3d_branch2a
72 activation_20
73 res3d_branch2b
74 bn3d_branch2b
75 activation_21
76 res3d_branch2c
77 bn3d_branch2c
78 merge_7
79 activation_22
80 res4a_branch2a
81 bn4a_branch2a
82 activation_23
83 res4a_branch2b
84 bn4a_branch2b
85 activation_24
86 res4a_branch2c
87 res4a_branch1
88 bn4a_branch2c
89 bn4a_branch1
90 merge_8
91 activation_25
92 res4b_branch2a
93 bn4b_branch2a
94 activation_26
95 res4b_branch2b
96 bn4b_branch2b
97 activation_27
98 res4b_branch2c
99 bn4b_branch2c
100 merge_9
101 activation_28
102 res4c_branch2a
103 bn4c_branch2a
104 activation_29
105 res4c_branch2b
106 bn4c_branch2b
107 activation_30
108 res4c_branch2c
109 bn4c_branch2c
110 merge_10
111 activation_31
112 res4d_branch2a
113 bn4d_branch2a
114 activation_32
115 res4d_branch2b
116 bn4d_branch2b
117 activation_33
118 res4d_branch2c
119 bn4d_branch2c
120 merge_11
121 activation_34
122 res4e_branch2a
123 bn4e_branch2a
124 activation_35
125 res4e_branch2b
126 bn4e_branch2b
127 activation_36
128 res4e_branch2c
129 bn4e_branch2c
130 merge_12
131 activation_37
132 res4f_branch2a
133 bn4f_branch2a
134 activation_38
135 res4f_branch2b
136 bn4f_branch2b
137 activation_39
138 res4f_branch2c
139 bn4f_branch2c
140 merge_13
141 activation_40
142 res5a_branch2a
143 bn5a_branch2a
144 activation_41
145 res5a_branch2b
146 bn5a_branch2b
147 activation_42
148 res5a_branch2c
149 res5a_branch1
150 bn5a_branch2c
151 bn5a_branch1
152 merge_14
153 activation_43
154 res5b_branch2a
155 bn5b_branch2a
156 activation_44
157 res5b_branch2b
158 bn5b_branch2b
159 activation_45
160 res5b_branch2c
161 bn5b_branch2c
162 merge_15
163 activation_46
164 res5c_branch2a
165 bn5c_branch2a
166 activation_47
167 res5c_branch2b
168 bn5c_branch2b
169 activation_48
170 res5c_branch2c
171 bn5c_branch2c
172 merge_16
173 activation_49
174 avg_pool

In [6]:
l = [1,2,3]
l[1:]


Out[6]:
[2, 3]

In [ ]: