In [ ]:
import os
import sys
import random

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn-white')
import seaborn as sns
sns.set_style("white")

%matplotlib inline

# import cv2
from sklearn.model_selection import train_test_split

from tqdm import tqdm_notebook, tnrange
from itertools import chain
from skimage.io import imread, imshow, concatenate_images
from skimage.transform import resize
from skimage.morphology import label

from keras.models import Model, load_model
from keras.layers import Input,Dropout,BatchNormalization,Activation,Add, LeakyReLU
from keras.layers.core import Lambda
from keras.layers.convolutional import Conv2D, Conv2DTranspose
from keras.layers.pooling import MaxPooling2D
from keras.layers.merge import concatenate
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras import backend as K

import tensorflow as tf

import sys
import warnings 
if not sys.warnoptions:
    warnings.simplefilter("ignore")
    
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img#,save_img
import sys
sys.path.insert(0, './utils/')

from cyclelr_callback import *

In [39]:
# Set some parameters
im_width = 101
im_height = 101
im_chan = 1
basicpath = './input/'
path_train = basicpath + 'train/'
path_test = basicpath + 'test/'

path_train_images = path_train + 'images/'
path_train_masks = path_train + 'masks/'
path_test_images = path_test + 'images/'

In [40]:
img_size_ori = 101
img_size_target = 128

def upsample(img):# not used
    if img_size_ori == img_size_target:
        return img
    return resize(img, (img_size_target, img_size_target), mode='constant', preserve_range=True)
    #res = np.zeros((img_size_target, img_size_target), dtype=img.dtype)
    #res[:img_size_ori, :img_size_ori] = img
    #return res
    
def downsample(img):# not used
    if img_size_ori == img_size_target:
        return img
    return resize(img, (img_size_ori, img_size_ori), mode='constant', preserve_range=True)
    #return img[:img_size_ori, :img_size_ori]

In [41]:
# Loading of training/testing ids and depths

train_df = pd.read_csv("./input/train.csv", index_col="id", usecols=[0])
depths_df = pd.read_csv("./input/depths.csv", index_col="id")
train_df = train_df.join(depths_df)
test_df = depths_df[~depths_df.index.isin(train_df.index)]

In [42]:
train_df["images"] = [np.array(load_img("./input/train/images/{}.png".format(idx), grayscale=True)) / 255 for idx in tqdm_notebook(train_df.index)]




In [43]:
train_df["masks"] = [np.array(load_img("./input/train/masks/{}.png".format(idx), grayscale=True)) / 255 for idx in tqdm_notebook(train_df.index)]




In [44]:
train_df["coverage"] = train_df.masks.map(np.sum) / pow(img_size_ori, 2)

def cov_to_class(val):    
    for i in range(0, 11):
        if val * 10 <= i :
            return i
        
train_df["coverage_class"] = train_df.coverage.map(cov_to_class)

In [45]:
# Create train/validation split stratified by salt coverage

ids_train, ids_valid, x_train, x_valid, y_train, y_valid, cov_train, cov_test, depth_train, depth_test = train_test_split(
    train_df.index.values,
    np.array(train_df.images.map(upsample).tolist()).reshape(-1, img_size_target, img_size_target, 1), 
    np.array(train_df.masks.map(upsample).tolist()).reshape(-1, img_size_target, img_size_target, 1), 
    train_df.coverage.values,
    train_df.z.values,
    test_size=0.2, stratify=train_df.coverage_class, random_state= 1234)

In [9]:
def convolution_block(x, filters, size, strides=(1,1), padding='same', activation=True):
    x = Conv2D(filters, size, strides=strides, padding=padding)(x)
    x = BatchNormalization()(x)
    if activation == True:
        x = Activation('relu')(x)
    return x

def residual_block(blockInput, num_filters=16):
    x = Activation('relu')(blockInput)
    x = BatchNormalization()(x)
    x = convolution_block(x, num_filters, (3,3) )
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = convolution_block(x, num_filters, (3,3), activation=False)
    x = Add()([x, blockInput])
    return x

In [21]:
# Build model
def build_model(input_layer, start_neurons, DropoutRatio = 0.5):
    # 128 -> 128/2
    conv1 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(input_layer)
    conv1 = residual_block(conv1,start_neurons * 1)
    conv1 = residual_block(conv1,start_neurons * 1)
    conv1 = Activation('relu')(conv1)
    pool1 = MaxPooling2D((2, 2))(conv1)
    pool1 = Dropout(DropoutRatio/2)(pool1)

    #   -> 128/4
    conv2 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(pool1)
    conv2 = residual_block(conv2,start_neurons * 2)
    conv2 = residual_block(conv2,start_neurons * 2)
    conv2 = Activation('relu')(conv2)
    pool2 = MaxPooling2D((2, 2))(conv2)
    pool2 = Dropout(DropoutRatio)(pool2)

    #  -> 128/8
    conv3 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(pool2)
    conv3 = residual_block(conv3,start_neurons * 4)
    conv3 = residual_block(conv3,start_neurons * 4)
    conv3 = Activation('relu')(conv3)
    pool3 = MaxPooling2D((2, 2))(conv3)
    pool3 = Dropout(DropoutRatio)(pool3)

    #  -> 128/16
    conv4 = Conv2D(start_neurons * 8, (3, 3), activation=None, padding="same")(pool3)
    conv4 = residual_block(conv4,start_neurons * 8)
    conv4 = residual_block(conv4,start_neurons * 8)
    conv4 = Activation('relu')(conv4)
    pool4 = MaxPooling2D((2, 2))(conv4)
    pool4 = Dropout(DropoutRatio)(pool4)

    # Middle
     #   -> 128/16
    convm = Conv2D(start_neurons * 16, (3, 3), activation=None, padding="same")(pool4)
    convm = residual_block(convm,start_neurons * 16)
    convm = residual_block(convm,start_neurons * 16)
    convm = Activation('relu')(convm)
    
    #  -> 128/8
    deconv4 = Conv2DTranspose(start_neurons * 8, (3, 3), strides=(2, 2), padding="same")(convm)
    uconv4 = concatenate([deconv4, conv4])
    uconv4 = Dropout(DropoutRatio)(uconv4)
    
    uconv4 = Conv2D(start_neurons * 8, (3, 3), activation=None, padding="same")(uconv4)
    uconv4 = residual_block(uconv4,start_neurons * 8)
    uconv4 = residual_block(uconv4,start_neurons * 8)
    uconv4 = Activation('relu')(uconv4)
    
    #  -> 128/4
    deconv3 = Conv2DTranspose(start_neurons * 4, (3, 3), strides=(2, 2), padding="same")(uconv4)
    #deconv3 = Conv2DTranspose(start_neurons * 4, (3, 3), strides=(2, 2), padding="valid")(uconv4)
    uconv3 = concatenate([deconv3, conv3])    
    uconv3 = Dropout(DropoutRatio)(uconv3)
    
    uconv3 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(uconv3)
    uconv3 = residual_block(uconv3,start_neurons * 4)
    uconv3 = residual_block(uconv3,start_neurons * 4)
    uconv3 = Activation('relu')(uconv3)

    #  -> 128/2
    deconv2 = Conv2DTranspose(start_neurons * 2, (3, 3), strides=(2, 2), padding="same")(uconv3)
    uconv2 = concatenate([deconv2, conv2])
        
    uconv2 = Dropout(DropoutRatio)(uconv2)
    uconv2 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(uconv2)
    uconv2 = residual_block(uconv2,start_neurons * 2)
    uconv2 = residual_block(uconv2,start_neurons * 2)
    uconv2 = Activation('relu')(uconv2)
    
    # 128
    deconv1 = Conv2DTranspose(start_neurons * 1, (3, 3), strides=(2, 2), padding="same")(uconv2)
    #deconv1 = Conv2DTranspose(start_neurons * 1, (3, 3), strides=(2, 2), padding="valid")(uconv2)
    uconv1 = concatenate([deconv1, conv1])
    
    uconv1 = Dropout(DropoutRatio)(uconv1)
    uconv1 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(uconv1)
    uconv1 = residual_block(uconv1,start_neurons * 1)
    uconv1 = residual_block(uconv1,start_neurons * 1)
    uconv1 = Activation('relu')(uconv1)
    
    uconv1 = Dropout(DropoutRatio/2)(uconv1)
    output_layer = Conv2D(1, (1,1), padding="same", activation="sigmoid")(uconv1)
    
    return output_layer

In [11]:
#Score the model and do a threshold optimization by the best IoU.

# src: https://www.kaggle.com/aglotero/another-iou-metric
def iou_metric(y_true_in, y_pred_in, print_table=False):
    labels = y_true_in
    y_pred = y_pred_in


    true_objects = 2
    pred_objects = 2

    # Jiaxin fin that if all zeros, then, the background is treated as object
    temp1 = np.histogram2d(labels.flatten(), y_pred.flatten(), bins=([0,0.5,1], [0,0.5, 1]))
#     temp1 = np.histogram2d(labels.flatten(), y_pred.flatten(), bins=(true_objects, pred_objects))
    #print(temp1)
    intersection = temp1[0]
    #print("temp2 = ",temp1[1])
    #print(intersection.shape)
   # print(intersection)
    # Compute areas (needed for finding the union between all objects)
    #print(np.histogram(labels, bins = true_objects))
    area_true = np.histogram(labels,bins=[0,0.5,1])[0]
    #print("area_true = ",area_true)
    area_pred = np.histogram(y_pred, bins=[0,0.5,1])[0]
    area_true = np.expand_dims(area_true, -1)
    area_pred = np.expand_dims(area_pred, 0)

    # Compute union
    union = area_true + area_pred - intersection
  
    # Exclude background from the analysis
    intersection = intersection[1:,1:]
    intersection[intersection == 0] = 1e-9
    
    union = union[1:,1:]
    union[union == 0] = 1e-9

    # Compute the intersection over union
    iou = intersection / union

    # Precision helper function
    def precision_at(threshold, iou):
        matches = iou > threshold
        true_positives = np.sum(matches, axis=1) == 1   # Correct objects
        false_positives = np.sum(matches, axis=0) == 0  # Missed objects
        false_negatives = np.sum(matches, axis=1) == 0  # Extra objects
        tp, fp, fn = np.sum(true_positives), np.sum(false_positives), np.sum(false_negatives)
        return tp, fp, fn

    # Loop over IoU thresholds
    prec = []
    if print_table:
        print("Thresh\tTP\tFP\tFN\tPrec.")
    for t in np.arange(0.5, 1.0, 0.05):
        tp, fp, fn = precision_at(t, iou)
        if (tp + fp + fn) > 0:
            p = tp / (tp + fp + fn)
        else:
            p = 0
        if print_table:
            print("{:1.3f}\t{}\t{}\t{}\t{:1.3f}".format(t, tp, fp, fn, p))
        prec.append(p)
    
    if print_table:
        print("AP\t-\t-\t-\t{:1.3f}".format(np.mean(prec)))
    return np.mean(prec)

def iou_metric_batch(y_true_in, y_pred_in):
    y_pred_in = y_pred_in > 0.5 # added by sgx 20180728
    batch_size = y_true_in.shape[0]
    metric = []
    for batch in range(batch_size):
        value = iou_metric(y_true_in[batch], y_pred_in[batch])
        metric.append(value)
    #print("metric = ",metric)
    return np.mean(metric)

def my_iou_metric(label, pred):
    metric_value = tf.py_func(iou_metric_batch, [label, pred], tf.float64)
    return metric_value

In [23]:
import keras.backend as K
from keras.backend.tensorflow_backend import _to_tensor
from keras.losses import binary_crossentropy


def dice_coef_clipped(y_true, y_pred, smooth=1.0):
    y_true_f = K.flatten(K.round(y_true))
    y_pred_f = K.flatten(K.round(y_pred))
    intersection = K.sum(y_true_f * y_pred_f)
    return 100. * (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)


def dice_coef(y_true, y_pred, smooth=1.0):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)


def bootstrapped_crossentropy(y_true, y_pred, bootstrap_type='hard', alpha=0.95):
    target_tensor = y_true
    prediction_tensor = y_pred
    _epsilon = _to_tensor(K.epsilon(), prediction_tensor.dtype.base_dtype)
    prediction_tensor = K.tf.clip_by_value(prediction_tensor, _epsilon, 1 - _epsilon)
    prediction_tensor = K.tf.log(prediction_tensor / (1 - prediction_tensor))

    if bootstrap_type == 'soft':
        bootstrap_target_tensor = alpha * target_tensor + (1.0 - alpha) * K.tf.sigmoid(prediction_tensor)
    else:
        bootstrap_target_tensor = alpha * target_tensor + (1.0 - alpha) * K.tf.cast(
            K.tf.sigmoid(prediction_tensor) > 0.5, K.tf.float32)
    return K.mean(K.tf.nn.sigmoid_cross_entropy_with_logits(
        labels=bootstrap_target_tensor, logits=prediction_tensor))


def online_bootstrapping(y_true, y_pred, pixels=512, threshold=0.5):
    """ Implements nline Bootstrapping crossentropy loss, to train only on hard pixels,
        see  https://arxiv.org/abs/1605.06885 Bridging Category-level and Instance-level Semantic Image Segmentation
        The implementation is a bit different as we use binary crossentropy instead of softmax
        SUPPORTS ONLY MINIBATCH WITH 1 ELEMENT!
    # Arguments
        y_true: A tensor with labels.
        y_pred: A tensor with predicted probabilites.
        pixels: number of hard pixels to keep
        threshold: confidence to use, i.e. if threshold is 0.7, y_true=1, prediction=0.65 then we consider that pixel as hard
    # Returns
        Mean loss value
    """
    y_true = K.flatten(y_true)
    y_pred = K.flatten(y_pred)
    difference = K.abs(y_true - y_pred)

    values, indices = K.tf.nn.top_k(difference, sorted=True, k=pixels)
    min_difference = (1 - threshold)
    y_true = K.tf.gather(K.gather(y_true, indices), K.tf.where(values > min_difference))
    y_pred = K.tf.gather(K.gather(y_pred, indices), K.tf.where(values > min_difference))

    return K.mean(K.binary_crossentropy(y_true, y_pred))


def dice_coef_loss_border(y_true, y_pred):
    return (1 - dice_coef_border(y_true, y_pred)) * 0.05 + 0.95 * dice_coef_loss(y_true, y_pred)

def bce_dice_loss_border(y_true, y_pred):
    return bce_border(y_true, y_pred) * 0.05 + 0.95 * dice_coef_loss(y_true, y_pred)


def dice_coef_border(y_true, y_pred):
    border = get_border_mask((21, 21), y_true)

    border = K.flatten(border)
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    y_true_f = K.tf.gather(y_true_f, K.tf.where(border > 0.5))
    y_pred_f = K.tf.gather(y_pred_f, K.tf.where(border > 0.5))

    return dice_coef(y_true_f, y_pred_f)


def bce_border(y_true, y_pred):
    border = get_border_mask((21, 21), y_true)

    border = K.flatten(border)
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    y_true_f = K.tf.gather(y_true_f, K.tf.where(border > 0.5))
    y_pred_f = K.tf.gather(y_pred_f, K.tf.where(border > 0.5))

    return binary_crossentropy(y_true_f, y_pred_f)


def get_border_mask(pool_size, y_true):
    negative = 1 - y_true
    positive = y_true
    positive = K.pool2d(positive, pool_size=pool_size, padding="same")
    negative = K.pool2d(negative, pool_size=pool_size, padding="same")
    border = positive * negative
    return border


def dice_coef_loss(y_true, y_pred):
    return 1 - dice_coef(y_true, y_pred)


def dice_coef_loss_bce(y_true, y_pred, dice=0.5, bce=0.5, bootstrapping='hard', alpha=1.):
    return bootstrapped_crossentropy(y_true, y_pred, bootstrapping, alpha) * bce + dice_coef_loss(y_true, y_pred) * dice


def make_loss(loss_name):
    if loss_name == 'crossentropy':
        return K.binary_crossentropy
    elif loss_name == 'crossentropy_boot':
        def loss(y, p):
            return bootstrapped_crossentropy(y, p, 'hard', 0.9)
        return loss
    elif loss_name == 'dice':
        return dice_coef_loss
    elif loss_name == 'bce_dice':
        def loss(y, p):
            return dice_coef_loss_bce(y, p, dice=0.8, bce=0.2, bootstrapping='soft', alpha=1)

        return loss
    elif loss_name == 'boot_soft':
        def loss(y, p):
            return dice_coef_loss_bce(y, p, dice=0.8, bce=0.2, bootstrapping='soft', alpha=0.95)

        return loss
    elif loss_name == 'boot_hard':
        def loss(y, p):
            return dice_coef_loss_bce(y, p, dice=0.8, bce=0.2, bootstrapping='hard', alpha=0.95)

        return loss
    elif loss_name == 'online_bootstrapping':
        def loss(y, p):
            return online_bootstrapping(y, p, pixels=512 * 64, threshold=0.7)

        return loss
    elif loss_name == 'dice_coef_loss_border':
        return dice_coef_loss_border
    elif loss_name == 'bce_dice_loss_border':
        return bce_dice_loss_border
    else:
        ValueError("Unknown loss.")

In [24]:
#Data augmentation
x_train2 = np.append(x_train, [np.fliplr(x) for x in x_train], axis=0)
y_train2 = np.append(y_train, [np.fliplr(x) for x in y_train], axis=0)
print(x_train2.shape)
print(y_valid.shape)


(6400, 128, 128, 1)
(800, 128, 128, 1)

In [25]:
# model
input_layer = Input((img_size_target, img_size_target, 1))
# input_layer2 = Input((img_size_target, img_size_target, 1))
output_layer = build_model(input_layer, 16,0.5)

# del model
model = Model(input_layer, output_layer)
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=[my_iou_metric])

model.summary()


__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_2 (InputLayer)            (None, 128, 128, 1)  0                                            
__________________________________________________________________________________________________
conv2d_47 (Conv2D)              (None, 128, 128, 16) 160         input_2[0][0]                    
__________________________________________________________________________________________________
activation_46 (Activation)      (None, 128, 128, 16) 0           conv2d_47[0][0]                  
__________________________________________________________________________________________________
batch_normalization_55 (BatchNo (None, 128, 128, 16) 64          activation_46[0][0]              
__________________________________________________________________________________________________
conv2d_48 (Conv2D)              (None, 128, 128, 16) 2320        batch_normalization_55[0][0]     
__________________________________________________________________________________________________
batch_normalization_56 (BatchNo (None, 128, 128, 16) 64          conv2d_48[0][0]                  
__________________________________________________________________________________________________
activation_47 (Activation)      (None, 128, 128, 16) 0           batch_normalization_56[0][0]     
__________________________________________________________________________________________________
batch_normalization_57 (BatchNo (None, 128, 128, 16) 64          activation_47[0][0]              
__________________________________________________________________________________________________
activation_48 (Activation)      (None, 128, 128, 16) 0           batch_normalization_57[0][0]     
__________________________________________________________________________________________________
conv2d_49 (Conv2D)              (None, 128, 128, 16) 2320        activation_48[0][0]              
__________________________________________________________________________________________________
batch_normalization_58 (BatchNo (None, 128, 128, 16) 64          conv2d_49[0][0]                  
__________________________________________________________________________________________________
add_19 (Add)                    (None, 128, 128, 16) 0           batch_normalization_58[0][0]     
                                                                 conv2d_47[0][0]                  
__________________________________________________________________________________________________
activation_49 (Activation)      (None, 128, 128, 16) 0           add_19[0][0]                     
__________________________________________________________________________________________________
batch_normalization_59 (BatchNo (None, 128, 128, 16) 64          activation_49[0][0]              
__________________________________________________________________________________________________
conv2d_50 (Conv2D)              (None, 128, 128, 16) 2320        batch_normalization_59[0][0]     
__________________________________________________________________________________________________
batch_normalization_60 (BatchNo (None, 128, 128, 16) 64          conv2d_50[0][0]                  
__________________________________________________________________________________________________
activation_50 (Activation)      (None, 128, 128, 16) 0           batch_normalization_60[0][0]     
__________________________________________________________________________________________________
batch_normalization_61 (BatchNo (None, 128, 128, 16) 64          activation_50[0][0]              
__________________________________________________________________________________________________
activation_51 (Activation)      (None, 128, 128, 16) 0           batch_normalization_61[0][0]     
__________________________________________________________________________________________________
conv2d_51 (Conv2D)              (None, 128, 128, 16) 2320        activation_51[0][0]              
__________________________________________________________________________________________________
batch_normalization_62 (BatchNo (None, 128, 128, 16) 64          conv2d_51[0][0]                  
__________________________________________________________________________________________________
add_20 (Add)                    (None, 128, 128, 16) 0           batch_normalization_62[0][0]     
                                                                 add_19[0][0]                     
__________________________________________________________________________________________________
activation_52 (Activation)      (None, 128, 128, 16) 0           add_20[0][0]                     
__________________________________________________________________________________________________
max_pooling2d_5 (MaxPooling2D)  (None, 64, 64, 16)   0           activation_52[0][0]              
__________________________________________________________________________________________________
dropout_10 (Dropout)            (None, 64, 64, 16)   0           max_pooling2d_5[0][0]            
__________________________________________________________________________________________________
conv2d_52 (Conv2D)              (None, 64, 64, 32)   4640        dropout_10[0][0]                 
__________________________________________________________________________________________________
activation_53 (Activation)      (None, 64, 64, 32)   0           conv2d_52[0][0]                  
__________________________________________________________________________________________________
batch_normalization_63 (BatchNo (None, 64, 64, 32)   128         activation_53[0][0]              
__________________________________________________________________________________________________
conv2d_53 (Conv2D)              (None, 64, 64, 32)   9248        batch_normalization_63[0][0]     
__________________________________________________________________________________________________
batch_normalization_64 (BatchNo (None, 64, 64, 32)   128         conv2d_53[0][0]                  
__________________________________________________________________________________________________
activation_54 (Activation)      (None, 64, 64, 32)   0           batch_normalization_64[0][0]     
__________________________________________________________________________________________________
batch_normalization_65 (BatchNo (None, 64, 64, 32)   128         activation_54[0][0]              
__________________________________________________________________________________________________
activation_55 (Activation)      (None, 64, 64, 32)   0           batch_normalization_65[0][0]     
__________________________________________________________________________________________________
conv2d_54 (Conv2D)              (None, 64, 64, 32)   9248        activation_55[0][0]              
__________________________________________________________________________________________________
batch_normalization_66 (BatchNo (None, 64, 64, 32)   128         conv2d_54[0][0]                  
__________________________________________________________________________________________________
add_21 (Add)                    (None, 64, 64, 32)   0           batch_normalization_66[0][0]     
                                                                 conv2d_52[0][0]                  
__________________________________________________________________________________________________
activation_56 (Activation)      (None, 64, 64, 32)   0           add_21[0][0]                     
__________________________________________________________________________________________________
batch_normalization_67 (BatchNo (None, 64, 64, 32)   128         activation_56[0][0]              
__________________________________________________________________________________________________
conv2d_55 (Conv2D)              (None, 64, 64, 32)   9248        batch_normalization_67[0][0]     
__________________________________________________________________________________________________
batch_normalization_68 (BatchNo (None, 64, 64, 32)   128         conv2d_55[0][0]                  
__________________________________________________________________________________________________
activation_57 (Activation)      (None, 64, 64, 32)   0           batch_normalization_68[0][0]     
__________________________________________________________________________________________________
batch_normalization_69 (BatchNo (None, 64, 64, 32)   128         activation_57[0][0]              
__________________________________________________________________________________________________
activation_58 (Activation)      (None, 64, 64, 32)   0           batch_normalization_69[0][0]     
__________________________________________________________________________________________________
conv2d_56 (Conv2D)              (None, 64, 64, 32)   9248        activation_58[0][0]              
__________________________________________________________________________________________________
batch_normalization_70 (BatchNo (None, 64, 64, 32)   128         conv2d_56[0][0]                  
__________________________________________________________________________________________________
add_22 (Add)                    (None, 64, 64, 32)   0           batch_normalization_70[0][0]     
                                                                 add_21[0][0]                     
__________________________________________________________________________________________________
activation_59 (Activation)      (None, 64, 64, 32)   0           add_22[0][0]                     
__________________________________________________________________________________________________
max_pooling2d_6 (MaxPooling2D)  (None, 32, 32, 32)   0           activation_59[0][0]              
__________________________________________________________________________________________________
dropout_11 (Dropout)            (None, 32, 32, 32)   0           max_pooling2d_6[0][0]            
__________________________________________________________________________________________________
conv2d_57 (Conv2D)              (None, 32, 32, 64)   18496       dropout_11[0][0]                 
__________________________________________________________________________________________________
activation_60 (Activation)      (None, 32, 32, 64)   0           conv2d_57[0][0]                  
__________________________________________________________________________________________________
batch_normalization_71 (BatchNo (None, 32, 32, 64)   256         activation_60[0][0]              
__________________________________________________________________________________________________
conv2d_58 (Conv2D)              (None, 32, 32, 64)   36928       batch_normalization_71[0][0]     
__________________________________________________________________________________________________
batch_normalization_72 (BatchNo (None, 32, 32, 64)   256         conv2d_58[0][0]                  
__________________________________________________________________________________________________
activation_61 (Activation)      (None, 32, 32, 64)   0           batch_normalization_72[0][0]     
__________________________________________________________________________________________________
batch_normalization_73 (BatchNo (None, 32, 32, 64)   256         activation_61[0][0]              
__________________________________________________________________________________________________
activation_62 (Activation)      (None, 32, 32, 64)   0           batch_normalization_73[0][0]     
__________________________________________________________________________________________________
conv2d_59 (Conv2D)              (None, 32, 32, 64)   36928       activation_62[0][0]              
__________________________________________________________________________________________________
batch_normalization_74 (BatchNo (None, 32, 32, 64)   256         conv2d_59[0][0]                  
__________________________________________________________________________________________________
add_23 (Add)                    (None, 32, 32, 64)   0           batch_normalization_74[0][0]     
                                                                 conv2d_57[0][0]                  
__________________________________________________________________________________________________
activation_63 (Activation)      (None, 32, 32, 64)   0           add_23[0][0]                     
__________________________________________________________________________________________________
batch_normalization_75 (BatchNo (None, 32, 32, 64)   256         activation_63[0][0]              
__________________________________________________________________________________________________
conv2d_60 (Conv2D)              (None, 32, 32, 64)   36928       batch_normalization_75[0][0]     
__________________________________________________________________________________________________
batch_normalization_76 (BatchNo (None, 32, 32, 64)   256         conv2d_60[0][0]                  
__________________________________________________________________________________________________
activation_64 (Activation)      (None, 32, 32, 64)   0           batch_normalization_76[0][0]     
__________________________________________________________________________________________________
batch_normalization_77 (BatchNo (None, 32, 32, 64)   256         activation_64[0][0]              
__________________________________________________________________________________________________
activation_65 (Activation)      (None, 32, 32, 64)   0           batch_normalization_77[0][0]     
__________________________________________________________________________________________________
conv2d_61 (Conv2D)              (None, 32, 32, 64)   36928       activation_65[0][0]              
__________________________________________________________________________________________________
batch_normalization_78 (BatchNo (None, 32, 32, 64)   256         conv2d_61[0][0]                  
__________________________________________________________________________________________________
add_24 (Add)                    (None, 32, 32, 64)   0           batch_normalization_78[0][0]     
                                                                 add_23[0][0]                     
__________________________________________________________________________________________________
activation_66 (Activation)      (None, 32, 32, 64)   0           add_24[0][0]                     
__________________________________________________________________________________________________
max_pooling2d_7 (MaxPooling2D)  (None, 16, 16, 64)   0           activation_66[0][0]              
__________________________________________________________________________________________________
dropout_12 (Dropout)            (None, 16, 16, 64)   0           max_pooling2d_7[0][0]            
__________________________________________________________________________________________________
conv2d_62 (Conv2D)              (None, 16, 16, 128)  73856       dropout_12[0][0]                 
__________________________________________________________________________________________________
activation_67 (Activation)      (None, 16, 16, 128)  0           conv2d_62[0][0]                  
__________________________________________________________________________________________________
batch_normalization_79 (BatchNo (None, 16, 16, 128)  512         activation_67[0][0]              
__________________________________________________________________________________________________
conv2d_63 (Conv2D)              (None, 16, 16, 128)  147584      batch_normalization_79[0][0]     
__________________________________________________________________________________________________
batch_normalization_80 (BatchNo (None, 16, 16, 128)  512         conv2d_63[0][0]                  
__________________________________________________________________________________________________
activation_68 (Activation)      (None, 16, 16, 128)  0           batch_normalization_80[0][0]     
__________________________________________________________________________________________________
batch_normalization_81 (BatchNo (None, 16, 16, 128)  512         activation_68[0][0]              
__________________________________________________________________________________________________
activation_69 (Activation)      (None, 16, 16, 128)  0           batch_normalization_81[0][0]     
__________________________________________________________________________________________________
conv2d_64 (Conv2D)              (None, 16, 16, 128)  147584      activation_69[0][0]              
__________________________________________________________________________________________________
batch_normalization_82 (BatchNo (None, 16, 16, 128)  512         conv2d_64[0][0]                  
__________________________________________________________________________________________________
add_25 (Add)                    (None, 16, 16, 128)  0           batch_normalization_82[0][0]     
                                                                 conv2d_62[0][0]                  
__________________________________________________________________________________________________
activation_70 (Activation)      (None, 16, 16, 128)  0           add_25[0][0]                     
__________________________________________________________________________________________________
batch_normalization_83 (BatchNo (None, 16, 16, 128)  512         activation_70[0][0]              
__________________________________________________________________________________________________
conv2d_65 (Conv2D)              (None, 16, 16, 128)  147584      batch_normalization_83[0][0]     
__________________________________________________________________________________________________
batch_normalization_84 (BatchNo (None, 16, 16, 128)  512         conv2d_65[0][0]                  
__________________________________________________________________________________________________
activation_71 (Activation)      (None, 16, 16, 128)  0           batch_normalization_84[0][0]     
__________________________________________________________________________________________________
batch_normalization_85 (BatchNo (None, 16, 16, 128)  512         activation_71[0][0]              
__________________________________________________________________________________________________
activation_72 (Activation)      (None, 16, 16, 128)  0           batch_normalization_85[0][0]     
__________________________________________________________________________________________________
conv2d_66 (Conv2D)              (None, 16, 16, 128)  147584      activation_72[0][0]              
__________________________________________________________________________________________________
batch_normalization_86 (BatchNo (None, 16, 16, 128)  512         conv2d_66[0][0]                  
__________________________________________________________________________________________________
add_26 (Add)                    (None, 16, 16, 128)  0           batch_normalization_86[0][0]     
                                                                 add_25[0][0]                     
__________________________________________________________________________________________________
activation_73 (Activation)      (None, 16, 16, 128)  0           add_26[0][0]                     
__________________________________________________________________________________________________
max_pooling2d_8 (MaxPooling2D)  (None, 8, 8, 128)    0           activation_73[0][0]              
__________________________________________________________________________________________________
dropout_13 (Dropout)            (None, 8, 8, 128)    0           max_pooling2d_8[0][0]            
__________________________________________________________________________________________________
conv2d_67 (Conv2D)              (None, 8, 8, 256)    295168      dropout_13[0][0]                 
__________________________________________________________________________________________________
activation_74 (Activation)      (None, 8, 8, 256)    0           conv2d_67[0][0]                  
__________________________________________________________________________________________________
batch_normalization_87 (BatchNo (None, 8, 8, 256)    1024        activation_74[0][0]              
__________________________________________________________________________________________________
conv2d_68 (Conv2D)              (None, 8, 8, 256)    590080      batch_normalization_87[0][0]     
__________________________________________________________________________________________________
batch_normalization_88 (BatchNo (None, 8, 8, 256)    1024        conv2d_68[0][0]                  
__________________________________________________________________________________________________
activation_75 (Activation)      (None, 8, 8, 256)    0           batch_normalization_88[0][0]     
__________________________________________________________________________________________________
batch_normalization_89 (BatchNo (None, 8, 8, 256)    1024        activation_75[0][0]              
__________________________________________________________________________________________________
activation_76 (Activation)      (None, 8, 8, 256)    0           batch_normalization_89[0][0]     
__________________________________________________________________________________________________
conv2d_69 (Conv2D)              (None, 8, 8, 256)    590080      activation_76[0][0]              
__________________________________________________________________________________________________
batch_normalization_90 (BatchNo (None, 8, 8, 256)    1024        conv2d_69[0][0]                  
__________________________________________________________________________________________________
add_27 (Add)                    (None, 8, 8, 256)    0           batch_normalization_90[0][0]     
                                                                 conv2d_67[0][0]                  
__________________________________________________________________________________________________
activation_77 (Activation)      (None, 8, 8, 256)    0           add_27[0][0]                     
__________________________________________________________________________________________________
batch_normalization_91 (BatchNo (None, 8, 8, 256)    1024        activation_77[0][0]              
__________________________________________________________________________________________________
conv2d_70 (Conv2D)              (None, 8, 8, 256)    590080      batch_normalization_91[0][0]     
__________________________________________________________________________________________________
batch_normalization_92 (BatchNo (None, 8, 8, 256)    1024        conv2d_70[0][0]                  
__________________________________________________________________________________________________
activation_78 (Activation)      (None, 8, 8, 256)    0           batch_normalization_92[0][0]     
__________________________________________________________________________________________________
batch_normalization_93 (BatchNo (None, 8, 8, 256)    1024        activation_78[0][0]              
__________________________________________________________________________________________________
activation_79 (Activation)      (None, 8, 8, 256)    0           batch_normalization_93[0][0]     
__________________________________________________________________________________________________
conv2d_71 (Conv2D)              (None, 8, 8, 256)    590080      activation_79[0][0]              
__________________________________________________________________________________________________
batch_normalization_94 (BatchNo (None, 8, 8, 256)    1024        conv2d_71[0][0]                  
__________________________________________________________________________________________________
add_28 (Add)                    (None, 8, 8, 256)    0           batch_normalization_94[0][0]     
                                                                 add_27[0][0]                     
__________________________________________________________________________________________________
activation_80 (Activation)      (None, 8, 8, 256)    0           add_28[0][0]                     
__________________________________________________________________________________________________
conv2d_transpose_5 (Conv2DTrans (None, 16, 16, 128)  295040      activation_80[0][0]              
__________________________________________________________________________________________________
concatenate_5 (Concatenate)     (None, 16, 16, 256)  0           conv2d_transpose_5[0][0]         
                                                                 activation_73[0][0]              
__________________________________________________________________________________________________
dropout_14 (Dropout)            (None, 16, 16, 256)  0           concatenate_5[0][0]              
__________________________________________________________________________________________________
conv2d_72 (Conv2D)              (None, 16, 16, 128)  295040      dropout_14[0][0]                 
__________________________________________________________________________________________________
activation_81 (Activation)      (None, 16, 16, 128)  0           conv2d_72[0][0]                  
__________________________________________________________________________________________________
batch_normalization_95 (BatchNo (None, 16, 16, 128)  512         activation_81[0][0]              
__________________________________________________________________________________________________
conv2d_73 (Conv2D)              (None, 16, 16, 128)  147584      batch_normalization_95[0][0]     
__________________________________________________________________________________________________
batch_normalization_96 (BatchNo (None, 16, 16, 128)  512         conv2d_73[0][0]                  
__________________________________________________________________________________________________
activation_82 (Activation)      (None, 16, 16, 128)  0           batch_normalization_96[0][0]     
__________________________________________________________________________________________________
batch_normalization_97 (BatchNo (None, 16, 16, 128)  512         activation_82[0][0]              
__________________________________________________________________________________________________
activation_83 (Activation)      (None, 16, 16, 128)  0           batch_normalization_97[0][0]     
__________________________________________________________________________________________________
conv2d_74 (Conv2D)              (None, 16, 16, 128)  147584      activation_83[0][0]              
__________________________________________________________________________________________________
batch_normalization_98 (BatchNo (None, 16, 16, 128)  512         conv2d_74[0][0]                  
__________________________________________________________________________________________________
add_29 (Add)                    (None, 16, 16, 128)  0           batch_normalization_98[0][0]     
                                                                 conv2d_72[0][0]                  
__________________________________________________________________________________________________
activation_84 (Activation)      (None, 16, 16, 128)  0           add_29[0][0]                     
__________________________________________________________________________________________________
batch_normalization_99 (BatchNo (None, 16, 16, 128)  512         activation_84[0][0]              
__________________________________________________________________________________________________
conv2d_75 (Conv2D)              (None, 16, 16, 128)  147584      batch_normalization_99[0][0]     
__________________________________________________________________________________________________
batch_normalization_100 (BatchN (None, 16, 16, 128)  512         conv2d_75[0][0]                  
__________________________________________________________________________________________________
activation_85 (Activation)      (None, 16, 16, 128)  0           batch_normalization_100[0][0]    
__________________________________________________________________________________________________
batch_normalization_101 (BatchN (None, 16, 16, 128)  512         activation_85[0][0]              
__________________________________________________________________________________________________
activation_86 (Activation)      (None, 16, 16, 128)  0           batch_normalization_101[0][0]    
__________________________________________________________________________________________________
conv2d_76 (Conv2D)              (None, 16, 16, 128)  147584      activation_86[0][0]              
__________________________________________________________________________________________________
batch_normalization_102 (BatchN (None, 16, 16, 128)  512         conv2d_76[0][0]                  
__________________________________________________________________________________________________
add_30 (Add)                    (None, 16, 16, 128)  0           batch_normalization_102[0][0]    
                                                                 add_29[0][0]                     
__________________________________________________________________________________________________
activation_87 (Activation)      (None, 16, 16, 128)  0           add_30[0][0]                     
__________________________________________________________________________________________________
conv2d_transpose_6 (Conv2DTrans (None, 32, 32, 64)   73792       activation_87[0][0]              
__________________________________________________________________________________________________
concatenate_6 (Concatenate)     (None, 32, 32, 128)  0           conv2d_transpose_6[0][0]         
                                                                 activation_66[0][0]              
__________________________________________________________________________________________________
dropout_15 (Dropout)            (None, 32, 32, 128)  0           concatenate_6[0][0]              
__________________________________________________________________________________________________
conv2d_77 (Conv2D)              (None, 32, 32, 64)   73792       dropout_15[0][0]                 
__________________________________________________________________________________________________
activation_88 (Activation)      (None, 32, 32, 64)   0           conv2d_77[0][0]                  
__________________________________________________________________________________________________
batch_normalization_103 (BatchN (None, 32, 32, 64)   256         activation_88[0][0]              
__________________________________________________________________________________________________
conv2d_78 (Conv2D)              (None, 32, 32, 64)   36928       batch_normalization_103[0][0]    
__________________________________________________________________________________________________
batch_normalization_104 (BatchN (None, 32, 32, 64)   256         conv2d_78[0][0]                  
__________________________________________________________________________________________________
activation_89 (Activation)      (None, 32, 32, 64)   0           batch_normalization_104[0][0]    
__________________________________________________________________________________________________
batch_normalization_105 (BatchN (None, 32, 32, 64)   256         activation_89[0][0]              
__________________________________________________________________________________________________
activation_90 (Activation)      (None, 32, 32, 64)   0           batch_normalization_105[0][0]    
__________________________________________________________________________________________________
conv2d_79 (Conv2D)              (None, 32, 32, 64)   36928       activation_90[0][0]              
__________________________________________________________________________________________________
batch_normalization_106 (BatchN (None, 32, 32, 64)   256         conv2d_79[0][0]                  
__________________________________________________________________________________________________
add_31 (Add)                    (None, 32, 32, 64)   0           batch_normalization_106[0][0]    
                                                                 conv2d_77[0][0]                  
__________________________________________________________________________________________________
activation_91 (Activation)      (None, 32, 32, 64)   0           add_31[0][0]                     
__________________________________________________________________________________________________
batch_normalization_107 (BatchN (None, 32, 32, 64)   256         activation_91[0][0]              
__________________________________________________________________________________________________
conv2d_80 (Conv2D)              (None, 32, 32, 64)   36928       batch_normalization_107[0][0]    
__________________________________________________________________________________________________
batch_normalization_108 (BatchN (None, 32, 32, 64)   256         conv2d_80[0][0]                  
__________________________________________________________________________________________________
activation_92 (Activation)      (None, 32, 32, 64)   0           batch_normalization_108[0][0]    
__________________________________________________________________________________________________
batch_normalization_109 (BatchN (None, 32, 32, 64)   256         activation_92[0][0]              
__________________________________________________________________________________________________
activation_93 (Activation)      (None, 32, 32, 64)   0           batch_normalization_109[0][0]    
__________________________________________________________________________________________________
conv2d_81 (Conv2D)              (None, 32, 32, 64)   36928       activation_93[0][0]              
__________________________________________________________________________________________________
batch_normalization_110 (BatchN (None, 32, 32, 64)   256         conv2d_81[0][0]                  
__________________________________________________________________________________________________
add_32 (Add)                    (None, 32, 32, 64)   0           batch_normalization_110[0][0]    
                                                                 add_31[0][0]                     
__________________________________________________________________________________________________
activation_94 (Activation)      (None, 32, 32, 64)   0           add_32[0][0]                     
__________________________________________________________________________________________________
conv2d_transpose_7 (Conv2DTrans (None, 64, 64, 32)   18464       activation_94[0][0]              
__________________________________________________________________________________________________
concatenate_7 (Concatenate)     (None, 64, 64, 64)   0           conv2d_transpose_7[0][0]         
                                                                 activation_59[0][0]              
__________________________________________________________________________________________________
dropout_16 (Dropout)            (None, 64, 64, 64)   0           concatenate_7[0][0]              
__________________________________________________________________________________________________
conv2d_82 (Conv2D)              (None, 64, 64, 32)   18464       dropout_16[0][0]                 
__________________________________________________________________________________________________
activation_95 (Activation)      (None, 64, 64, 32)   0           conv2d_82[0][0]                  
__________________________________________________________________________________________________
batch_normalization_111 (BatchN (None, 64, 64, 32)   128         activation_95[0][0]              
__________________________________________________________________________________________________
conv2d_83 (Conv2D)              (None, 64, 64, 32)   9248        batch_normalization_111[0][0]    
__________________________________________________________________________________________________
batch_normalization_112 (BatchN (None, 64, 64, 32)   128         conv2d_83[0][0]                  
__________________________________________________________________________________________________
activation_96 (Activation)      (None, 64, 64, 32)   0           batch_normalization_112[0][0]    
__________________________________________________________________________________________________
batch_normalization_113 (BatchN (None, 64, 64, 32)   128         activation_96[0][0]              
__________________________________________________________________________________________________
activation_97 (Activation)      (None, 64, 64, 32)   0           batch_normalization_113[0][0]    
__________________________________________________________________________________________________
conv2d_84 (Conv2D)              (None, 64, 64, 32)   9248        activation_97[0][0]              
__________________________________________________________________________________________________
batch_normalization_114 (BatchN (None, 64, 64, 32)   128         conv2d_84[0][0]                  
__________________________________________________________________________________________________
add_33 (Add)                    (None, 64, 64, 32)   0           batch_normalization_114[0][0]    
                                                                 conv2d_82[0][0]                  
__________________________________________________________________________________________________
activation_98 (Activation)      (None, 64, 64, 32)   0           add_33[0][0]                     
__________________________________________________________________________________________________
batch_normalization_115 (BatchN (None, 64, 64, 32)   128         activation_98[0][0]              
__________________________________________________________________________________________________
conv2d_85 (Conv2D)              (None, 64, 64, 32)   9248        batch_normalization_115[0][0]    
__________________________________________________________________________________________________
batch_normalization_116 (BatchN (None, 64, 64, 32)   128         conv2d_85[0][0]                  
__________________________________________________________________________________________________
activation_99 (Activation)      (None, 64, 64, 32)   0           batch_normalization_116[0][0]    
__________________________________________________________________________________________________
batch_normalization_117 (BatchN (None, 64, 64, 32)   128         activation_99[0][0]              
__________________________________________________________________________________________________
activation_100 (Activation)     (None, 64, 64, 32)   0           batch_normalization_117[0][0]    
__________________________________________________________________________________________________
conv2d_86 (Conv2D)              (None, 64, 64, 32)   9248        activation_100[0][0]             
__________________________________________________________________________________________________
batch_normalization_118 (BatchN (None, 64, 64, 32)   128         conv2d_86[0][0]                  
__________________________________________________________________________________________________
add_34 (Add)                    (None, 64, 64, 32)   0           batch_normalization_118[0][0]    
                                                                 add_33[0][0]                     
__________________________________________________________________________________________________
activation_101 (Activation)     (None, 64, 64, 32)   0           add_34[0][0]                     
__________________________________________________________________________________________________
conv2d_transpose_8 (Conv2DTrans (None, 128, 128, 16) 4624        activation_101[0][0]             
__________________________________________________________________________________________________
concatenate_8 (Concatenate)     (None, 128, 128, 32) 0           conv2d_transpose_8[0][0]         
                                                                 activation_52[0][0]              
__________________________________________________________________________________________________
dropout_17 (Dropout)            (None, 128, 128, 32) 0           concatenate_8[0][0]              
__________________________________________________________________________________________________
conv2d_87 (Conv2D)              (None, 128, 128, 16) 4624        dropout_17[0][0]                 
__________________________________________________________________________________________________
activation_102 (Activation)     (None, 128, 128, 16) 0           conv2d_87[0][0]                  
__________________________________________________________________________________________________
batch_normalization_119 (BatchN (None, 128, 128, 16) 64          activation_102[0][0]             
__________________________________________________________________________________________________
conv2d_88 (Conv2D)              (None, 128, 128, 16) 2320        batch_normalization_119[0][0]    
__________________________________________________________________________________________________
batch_normalization_120 (BatchN (None, 128, 128, 16) 64          conv2d_88[0][0]                  
__________________________________________________________________________________________________
activation_103 (Activation)     (None, 128, 128, 16) 0           batch_normalization_120[0][0]    
__________________________________________________________________________________________________
batch_normalization_121 (BatchN (None, 128, 128, 16) 64          activation_103[0][0]             
__________________________________________________________________________________________________
activation_104 (Activation)     (None, 128, 128, 16) 0           batch_normalization_121[0][0]    
__________________________________________________________________________________________________
conv2d_89 (Conv2D)              (None, 128, 128, 16) 2320        activation_104[0][0]             
__________________________________________________________________________________________________
batch_normalization_122 (BatchN (None, 128, 128, 16) 64          conv2d_89[0][0]                  
__________________________________________________________________________________________________
add_35 (Add)                    (None, 128, 128, 16) 0           batch_normalization_122[0][0]    
                                                                 conv2d_87[0][0]                  
__________________________________________________________________________________________________
activation_105 (Activation)     (None, 128, 128, 16) 0           add_35[0][0]                     
__________________________________________________________________________________________________
batch_normalization_123 (BatchN (None, 128, 128, 16) 64          activation_105[0][0]             
__________________________________________________________________________________________________
conv2d_90 (Conv2D)              (None, 128, 128, 16) 2320        batch_normalization_123[0][0]    
__________________________________________________________________________________________________
batch_normalization_124 (BatchN (None, 128, 128, 16) 64          conv2d_90[0][0]                  
__________________________________________________________________________________________________
activation_106 (Activation)     (None, 128, 128, 16) 0           batch_normalization_124[0][0]    
__________________________________________________________________________________________________
batch_normalization_125 (BatchN (None, 128, 128, 16) 64          activation_106[0][0]             
__________________________________________________________________________________________________
activation_107 (Activation)     (None, 128, 128, 16) 0           batch_normalization_125[0][0]    
__________________________________________________________________________________________________
conv2d_91 (Conv2D)              (None, 128, 128, 16) 2320        activation_107[0][0]             
__________________________________________________________________________________________________
batch_normalization_126 (BatchN (None, 128, 128, 16) 64          conv2d_91[0][0]                  
__________________________________________________________________________________________________
add_36 (Add)                    (None, 128, 128, 16) 0           batch_normalization_126[0][0]    
                                                                 add_35[0][0]                     
__________________________________________________________________________________________________
activation_108 (Activation)     (None, 128, 128, 16) 0           add_36[0][0]                     
__________________________________________________________________________________________________
dropout_18 (Dropout)            (None, 128, 128, 16) 0           activation_108[0][0]             
__________________________________________________________________________________________________
conv2d_92 (Conv2D)              (None, 128, 128, 1)  17          dropout_18[0][0]                 
==================================================================================================
Total params: 5,128,689
Trainable params: 5,116,913
Non-trainable params: 11,776
__________________________________________________________________________________________________

In [26]:
clr_triangular = CyclicLR(mode='triangular2', base_lr=0.0001, max_lr=0.006) 
early_stopping = EarlyStopping(monitor='val_my_iou_metric', mode = 'max',patience=20, verbose=1)
model_checkpoint = ModelCheckpoint("./unet_best1.model",monitor='val_my_iou_metric', 
                                   mode = 'max', save_best_only=True, verbose=1)
reduce_lr = ReduceLROnPlateau(monitor='val_my_iou_metric', mode = 'max',factor=0.2, patience=5, min_lr=0.00001, verbose=1)
#reduce_lr = ReduceLROnPlateau(factor=0.2, patience=5, min_lr=0.00001, verbose=1)
#https://github.com/bckenstler/CLR
epochs = 200
batch_size = 32

history = model.fit(x_train2, y_train2,
                    validation_data=[x_valid, y_valid], 
                    epochs=epochs,
                    batch_size=batch_size,
                    callbacks=[early_stopping, model_checkpoint, clr_triangular], 
                    verbose=2)


Train on 6400 samples, validate on 800 samples
Epoch 1/200
 - 67s - loss: 0.4553 - my_iou_metric: 0.1038 - val_loss: 0.4638 - val_my_iou_metric: 0.1141

Epoch 00001: val_my_iou_metric improved from -inf to 0.11413, saving model to ./unet_best1.model
Epoch 2/200
 - 56s - loss: 0.3151 - my_iou_metric: 0.2564 - val_loss: 0.5788 - val_my_iou_metric: 0.2230

Epoch 00002: val_my_iou_metric improved from 0.11413 to 0.22300, saving model to ./unet_best1.model
Epoch 3/200
 - 58s - loss: 0.2668 - my_iou_metric: 0.4053 - val_loss: 0.4495 - val_my_iou_metric: 0.2657

Epoch 00003: val_my_iou_metric improved from 0.22300 to 0.26575, saving model to ./unet_best1.model
Epoch 4/200
 - 59s - loss: 0.2650 - my_iou_metric: 0.4650 - val_loss: 0.3026 - val_my_iou_metric: 0.4848

Epoch 00004: val_my_iou_metric improved from 0.26575 to 0.48475, saving model to ./unet_best1.model
Epoch 5/200
 - 59s - loss: 0.2522 - my_iou_metric: 0.4889 - val_loss: 0.2738 - val_my_iou_metric: 0.5985

Epoch 00005: val_my_iou_metric improved from 0.48475 to 0.59850, saving model to ./unet_best1.model
Epoch 6/200
 - 56s - loss: 0.2461 - my_iou_metric: 0.5167 - val_loss: 0.3482 - val_my_iou_metric: 0.4636

Epoch 00006: val_my_iou_metric did not improve from 0.59850
Epoch 7/200
 - 55s - loss: 0.2355 - my_iou_metric: 0.5223 - val_loss: 0.2975 - val_my_iou_metric: 0.5672

Epoch 00007: val_my_iou_metric did not improve from 0.59850
Epoch 8/200
 - 56s - loss: 0.2396 - my_iou_metric: 0.5279 - val_loss: 0.5493 - val_my_iou_metric: 0.0644

Epoch 00008: val_my_iou_metric did not improve from 0.59850
Epoch 9/200
 - 55s - loss: 0.2446 - my_iou_metric: 0.5228 - val_loss: 0.2758 - val_my_iou_metric: 0.5555

Epoch 00009: val_my_iou_metric did not improve from 0.59850
Epoch 10/200
 - 55s - loss: 0.2322 - my_iou_metric: 0.5445 - val_loss: 0.3324 - val_my_iou_metric: 0.4896

Epoch 00010: val_my_iou_metric did not improve from 0.59850
Epoch 11/200
 - 55s - loss: 0.2203 - my_iou_metric: 0.5619 - val_loss: 0.4435 - val_my_iou_metric: 0.4064

Epoch 00011: val_my_iou_metric did not improve from 0.59850
Epoch 12/200
 - 55s - loss: 0.2072 - my_iou_metric: 0.5666 - val_loss: 0.3854 - val_my_iou_metric: 0.4574

Epoch 00012: val_my_iou_metric did not improve from 0.59850
Epoch 13/200
 - 55s - loss: 0.1900 - my_iou_metric: 0.6035 - val_loss: 0.2662 - val_my_iou_metric: 0.5929

Epoch 00013: val_my_iou_metric did not improve from 0.59850
Epoch 14/200
 - 56s - loss: 0.1819 - my_iou_metric: 0.6119 - val_loss: 0.1717 - val_my_iou_metric: 0.6536

Epoch 00014: val_my_iou_metric improved from 0.59850 to 0.65363, saving model to ./unet_best1.model
Epoch 15/200
 - 56s - loss: 0.1780 - my_iou_metric: 0.6353 - val_loss: 0.3408 - val_my_iou_metric: 0.4946

Epoch 00015: val_my_iou_metric did not improve from 0.65363
Epoch 16/200
 - 55s - loss: 0.2300 - my_iou_metric: 0.5289 - val_loss: 0.2401 - val_my_iou_metric: 0.5851

Epoch 00016: val_my_iou_metric did not improve from 0.65363
Epoch 17/200
 - 55s - loss: 0.1880 - my_iou_metric: 0.6077 - val_loss: 0.1683 - val_my_iou_metric: 0.6820

Epoch 00017: val_my_iou_metric improved from 0.65363 to 0.68200, saving model to ./unet_best1.model
Epoch 18/200
 - 55s - loss: 0.1732 - my_iou_metric: 0.6355 - val_loss: 0.1819 - val_my_iou_metric: 0.6671

Epoch 00018: val_my_iou_metric did not improve from 0.68200
Epoch 19/200
 - 55s - loss: 0.1598 - my_iou_metric: 0.6673 - val_loss: 0.1586 - val_my_iou_metric: 0.7021

Epoch 00019: val_my_iou_metric improved from 0.68200 to 0.70213, saving model to ./unet_best1.model
Epoch 20/200
 - 55s - loss: 0.1511 - my_iou_metric: 0.6852 - val_loss: 0.1531 - val_my_iou_metric: 0.6878

Epoch 00020: val_my_iou_metric did not improve from 0.70213
Epoch 21/200
 - 55s - loss: 0.1476 - my_iou_metric: 0.6792 - val_loss: 0.1593 - val_my_iou_metric: 0.6871

Epoch 00021: val_my_iou_metric did not improve from 0.70213
Epoch 22/200
 - 55s - loss: 0.1483 - my_iou_metric: 0.6800 - val_loss: 0.1558 - val_my_iou_metric: 0.6867

Epoch 00022: val_my_iou_metric did not improve from 0.70213
Epoch 23/200
 - 56s - loss: 0.1514 - my_iou_metric: 0.6798 - val_loss: 0.1750 - val_my_iou_metric: 0.6314

Epoch 00023: val_my_iou_metric did not improve from 0.70213
Epoch 24/200
 - 55s - loss: 0.1486 - my_iou_metric: 0.6846 - val_loss: 0.1841 - val_my_iou_metric: 0.6694

Epoch 00024: val_my_iou_metric did not improve from 0.70213
Epoch 25/200
 - 55s - loss: 0.1516 - my_iou_metric: 0.6793 - val_loss: 0.1793 - val_my_iou_metric: 0.6785

Epoch 00025: val_my_iou_metric did not improve from 0.70213
Epoch 26/200
 - 57s - loss: 0.1538 - my_iou_metric: 0.6758 - val_loss: 0.1582 - val_my_iou_metric: 0.7105

Epoch 00026: val_my_iou_metric improved from 0.70213 to 0.71050, saving model to ./unet_best1.model
Epoch 27/200
 - 57s - loss: 0.1560 - my_iou_metric: 0.6756 - val_loss: 0.2016 - val_my_iou_metric: 0.6531

Epoch 00027: val_my_iou_metric did not improve from 0.71050
Epoch 28/200
 - 56s - loss: 0.1618 - my_iou_metric: 0.6634 - val_loss: 0.1613 - val_my_iou_metric: 0.6819

Epoch 00028: val_my_iou_metric did not improve from 0.71050
Epoch 29/200
 - 58s - loss: 0.1598 - my_iou_metric: 0.6620 - val_loss: 0.1766 - val_my_iou_metric: 0.6721

Epoch 00029: val_my_iou_metric did not improve from 0.71050
Epoch 30/200
 - 56s - loss: 0.1663 - my_iou_metric: 0.6611 - val_loss: 0.1823 - val_my_iou_metric: 0.7003

Epoch 00030: val_my_iou_metric did not improve from 0.71050
Epoch 31/200
 - 57s - loss: 0.1638 - my_iou_metric: 0.6713 - val_loss: 0.1776 - val_my_iou_metric: 0.7011

Epoch 00031: val_my_iou_metric did not improve from 0.71050
Epoch 32/200
 - 56s - loss: 0.1555 - my_iou_metric: 0.6704 - val_loss: 0.1733 - val_my_iou_metric: 0.6665

Epoch 00032: val_my_iou_metric did not improve from 0.71050
Epoch 33/200
 - 57s - loss: 0.1561 - my_iou_metric: 0.6806 - val_loss: 0.1594 - val_my_iou_metric: 0.7070

Epoch 00033: val_my_iou_metric did not improve from 0.71050
Epoch 34/200
 - 57s - loss: 0.1468 - my_iou_metric: 0.6890 - val_loss: 0.1729 - val_my_iou_metric: 0.6787

Epoch 00034: val_my_iou_metric did not improve from 0.71050
Epoch 35/200
 - 57s - loss: 0.1422 - my_iou_metric: 0.7037 - val_loss: 0.1471 - val_my_iou_metric: 0.7133

Epoch 00035: val_my_iou_metric improved from 0.71050 to 0.71325, saving model to ./unet_best1.model
Epoch 36/200
 - 56s - loss: 0.1368 - my_iou_metric: 0.7086 - val_loss: 0.1600 - val_my_iou_metric: 0.7247

Epoch 00036: val_my_iou_metric improved from 0.71325 to 0.72475, saving model to ./unet_best1.model
Epoch 37/200
 - 55s - loss: 0.1311 - my_iou_metric: 0.7198 - val_loss: 0.1695 - val_my_iou_metric: 0.6849

Epoch 00037: val_my_iou_metric did not improve from 0.72475
Epoch 38/200
 - 55s - loss: 0.1305 - my_iou_metric: 0.7155 - val_loss: 0.1541 - val_my_iou_metric: 0.7229

Epoch 00038: val_my_iou_metric did not improve from 0.72475
Epoch 39/200
 - 56s - loss: 0.1225 - my_iou_metric: 0.7304 - val_loss: 0.1523 - val_my_iou_metric: 0.7249

Epoch 00039: val_my_iou_metric improved from 0.72475 to 0.72487, saving model to ./unet_best1.model
Epoch 40/200
 - 57s - loss: 0.1194 - my_iou_metric: 0.7389 - val_loss: 0.1450 - val_my_iou_metric: 0.7395

Epoch 00040: val_my_iou_metric improved from 0.72487 to 0.73950, saving model to ./unet_best1.model
Epoch 41/200
 - 56s - loss: 0.1162 - my_iou_metric: 0.7415 - val_loss: 0.1453 - val_my_iou_metric: 0.7328

Epoch 00041: val_my_iou_metric did not improve from 0.73950
Epoch 42/200
 - 57s - loss: 0.1167 - my_iou_metric: 0.7360 - val_loss: 0.1442 - val_my_iou_metric: 0.7285

Epoch 00042: val_my_iou_metric did not improve from 0.73950
Epoch 43/200
 - 56s - loss: 0.1171 - my_iou_metric: 0.7360 - val_loss: 0.1477 - val_my_iou_metric: 0.7283

Epoch 00043: val_my_iou_metric did not improve from 0.73950
Epoch 44/200
 - 57s - loss: 0.1186 - my_iou_metric: 0.7367 - val_loss: 0.1463 - val_my_iou_metric: 0.7558

Epoch 00044: val_my_iou_metric improved from 0.73950 to 0.75575, saving model to ./unet_best1.model
Epoch 45/200
 - 56s - loss: 0.1205 - my_iou_metric: 0.7334 - val_loss: 0.1361 - val_my_iou_metric: 0.7437

Epoch 00045: val_my_iou_metric did not improve from 0.75575
Epoch 46/200
 - 56s - loss: 0.1226 - my_iou_metric: 0.7319 - val_loss: 0.1498 - val_my_iou_metric: 0.7206

Epoch 00046: val_my_iou_metric did not improve from 0.75575
Epoch 47/200
 - 55s - loss: 0.1201 - my_iou_metric: 0.7315 - val_loss: 0.1510 - val_my_iou_metric: 0.7422

Epoch 00047: val_my_iou_metric did not improve from 0.75575
Epoch 48/200
 - 56s - loss: 0.1233 - my_iou_metric: 0.7277 - val_loss: 0.1702 - val_my_iou_metric: 0.7324

Epoch 00048: val_my_iou_metric did not improve from 0.75575
Epoch 49/200
 - 56s - loss: 0.1242 - my_iou_metric: 0.7313 - val_loss: 0.1584 - val_my_iou_metric: 0.7260

Epoch 00049: val_my_iou_metric did not improve from 0.75575
Epoch 50/200
 - 56s - loss: 0.1275 - my_iou_metric: 0.7284 - val_loss: 0.1959 - val_my_iou_metric: 0.6964

Epoch 00050: val_my_iou_metric did not improve from 0.75575
Epoch 51/200
 - 57s - loss: 0.1286 - my_iou_metric: 0.7195 - val_loss: 0.1681 - val_my_iou_metric: 0.7153

Epoch 00051: val_my_iou_metric did not improve from 0.75575
Epoch 52/200
 - 56s - loss: 0.1267 - my_iou_metric: 0.7247 - val_loss: 0.1579 - val_my_iou_metric: 0.7277

Epoch 00052: val_my_iou_metric did not improve from 0.75575
Epoch 53/200
 - 56s - loss: 0.1272 - my_iou_metric: 0.7288 - val_loss: 0.1458 - val_my_iou_metric: 0.7231

Epoch 00053: val_my_iou_metric did not improve from 0.75575
Epoch 54/200
 - 55s - loss: 0.1190 - my_iou_metric: 0.7357 - val_loss: 0.1578 - val_my_iou_metric: 0.7215

Epoch 00054: val_my_iou_metric did not improve from 0.75575
Epoch 55/200
 - 56s - loss: 0.1166 - my_iou_metric: 0.7414 - val_loss: 0.1524 - val_my_iou_metric: 0.7318

Epoch 00055: val_my_iou_metric did not improve from 0.75575
Epoch 56/200
 - 56s - loss: 0.1118 - my_iou_metric: 0.7465 - val_loss: 0.1386 - val_my_iou_metric: 0.7501

Epoch 00056: val_my_iou_metric did not improve from 0.75575
Epoch 57/200
 - 56s - loss: 0.1069 - my_iou_metric: 0.7519 - val_loss: 0.1369 - val_my_iou_metric: 0.7538

Epoch 00057: val_my_iou_metric did not improve from 0.75575
Epoch 58/200
 - 56s - loss: 0.1055 - my_iou_metric: 0.7515 - val_loss: 0.1368 - val_my_iou_metric: 0.7601

Epoch 00058: val_my_iou_metric improved from 0.75575 to 0.76013, saving model to ./unet_best1.model
Epoch 59/200
 - 56s - loss: 0.1029 - my_iou_metric: 0.7592 - val_loss: 0.1391 - val_my_iou_metric: 0.7506

Epoch 00059: val_my_iou_metric did not improve from 0.76013
Epoch 60/200
 - 57s - loss: 0.0999 - my_iou_metric: 0.7604 - val_loss: 0.1395 - val_my_iou_metric: 0.7537

Epoch 00060: val_my_iou_metric did not improve from 0.76013
Epoch 61/200
 - 58s - loss: 0.0995 - my_iou_metric: 0.7626 - val_loss: 0.1416 - val_my_iou_metric: 0.7546

Epoch 00061: val_my_iou_metric did not improve from 0.76013
Epoch 62/200
 - 57s - loss: 0.0991 - my_iou_metric: 0.7642 - val_loss: 0.1411 - val_my_iou_metric: 0.7567

Epoch 00062: val_my_iou_metric did not improve from 0.76013
Epoch 63/200
 - 57s - loss: 0.0971 - my_iou_metric: 0.7673 - val_loss: 0.1389 - val_my_iou_metric: 0.7584

Epoch 00063: val_my_iou_metric did not improve from 0.76013
Epoch 64/200
 - 57s - loss: 0.1008 - my_iou_metric: 0.7596 - val_loss: 0.1357 - val_my_iou_metric: 0.7482

Epoch 00064: val_my_iou_metric did not improve from 0.76013
Epoch 65/200
 - 57s - loss: 0.1005 - my_iou_metric: 0.7633 - val_loss: 0.1448 - val_my_iou_metric: 0.7551

Epoch 00065: val_my_iou_metric did not improve from 0.76013
Epoch 66/200
 - 57s - loss: 0.1012 - my_iou_metric: 0.7602 - val_loss: 0.1442 - val_my_iou_metric: 0.7564

Epoch 00066: val_my_iou_metric did not improve from 0.76013
Epoch 67/200
 - 59s - loss: 0.1035 - my_iou_metric: 0.7595 - val_loss: 0.1298 - val_my_iou_metric: 0.7486

Epoch 00067: val_my_iou_metric did not improve from 0.76013
Epoch 68/200
 - 59s - loss: 0.1051 - my_iou_metric: 0.7569 - val_loss: 0.1473 - val_my_iou_metric: 0.7486

Epoch 00068: val_my_iou_metric did not improve from 0.76013
Epoch 69/200
 - 57s - loss: 0.1060 - my_iou_metric: 0.7587 - val_loss: 0.1418 - val_my_iou_metric: 0.7469

Epoch 00069: val_my_iou_metric did not improve from 0.76013
Epoch 70/200
 - 56s - loss: 0.1041 - my_iou_metric: 0.7605 - val_loss: 0.1494 - val_my_iou_metric: 0.7565

Epoch 00070: val_my_iou_metric did not improve from 0.76013
Epoch 71/200
 - 57s - loss: 0.1006 - my_iou_metric: 0.7567 - val_loss: 0.1489 - val_my_iou_metric: 0.7534

Epoch 00071: val_my_iou_metric did not improve from 0.76013
Epoch 72/200
 - 57s - loss: 0.1036 - my_iou_metric: 0.7586 - val_loss: 0.1355 - val_my_iou_metric: 0.7503

Epoch 00072: val_my_iou_metric did not improve from 0.76013
Epoch 73/200
 - 57s - loss: 0.0999 - my_iou_metric: 0.7650 - val_loss: 0.1454 - val_my_iou_metric: 0.7439

Epoch 00073: val_my_iou_metric did not improve from 0.76013
Epoch 74/200
 - 57s - loss: 0.1017 - my_iou_metric: 0.7582 - val_loss: 0.1271 - val_my_iou_metric: 0.7610

Epoch 00074: val_my_iou_metric improved from 0.76013 to 0.76100, saving model to ./unet_best1.model
Epoch 75/200
 - 57s - loss: 0.0974 - my_iou_metric: 0.7651 - val_loss: 0.1359 - val_my_iou_metric: 0.7566

Epoch 00075: val_my_iou_metric did not improve from 0.76100
Epoch 76/200
 - 56s - loss: 0.0985 - my_iou_metric: 0.7729 - val_loss: 0.1425 - val_my_iou_metric: 0.7582

Epoch 00076: val_my_iou_metric did not improve from 0.76100
Epoch 77/200
 - 57s - loss: 0.0942 - my_iou_metric: 0.7692 - val_loss: 0.1387 - val_my_iou_metric: 0.7617

Epoch 00077: val_my_iou_metric improved from 0.76100 to 0.76175, saving model to ./unet_best1.model
Epoch 78/200
 - 57s - loss: 0.0932 - my_iou_metric: 0.7731 - val_loss: 0.1408 - val_my_iou_metric: 0.7572

Epoch 00078: val_my_iou_metric did not improve from 0.76175
Epoch 79/200
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-26-80d8d718d0fb> in <module>()
     14                     batch_size=batch_size,
     15                     callbacks=[early_stopping, model_checkpoint, clr_triangular],
---> 16                     verbose=2)

~/anaconda3/lib/python3.6/site-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)
   1035                                         initial_epoch=initial_epoch,
   1036                                         steps_per_epoch=steps_per_epoch,
-> 1037                                         validation_steps=validation_steps)
   1038 
   1039     def evaluate(self, x=None, y=None,

~/anaconda3/lib/python3.6/site-packages/keras/engine/training_arrays.py in fit_loop(model, f, ins, out_labels, batch_size, epochs, verbose, callbacks, val_f, val_ins, shuffle, callback_metrics, initial_epoch, steps_per_epoch, validation_steps)
    197                     ins_batch[i] = ins_batch[i].toarray()
    198 
--> 199                 outs = f(ins_batch)
    200                 outs = to_list(outs)
    201                 for l, o in zip(out_labels, outs):

~/anaconda3/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py in __call__(self, inputs)
   2664                 return self._legacy_call(inputs)
   2665 
-> 2666             return self._call(inputs)
   2667         else:
   2668             if py_any(is_tensor(x) for x in inputs):

~/anaconda3/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py in _call(self, inputs)
   2634                                 symbol_vals,
   2635                                 session)
-> 2636         fetched = self._callable_fn(*array_vals)
   2637         return fetched[:len(self.outputs)]
   2638 

~/anaconda3/lib/python3.6/site-packages/tensorflow/python/client/session.py in __call__(self, *args)
   1449         if self._session._created_with_new_api:
   1450           return tf_session.TF_SessionRunCallable(
-> 1451               self._session._session, self._handle, args, status, None)
   1452         else:
   1453           return tf_session.TF_DeprecatedSessionRunCallable(

KeyboardInterrupt: 

In [ ]:
import matplotlib.pyplot as plt
# summarize history for loss
plt.plot(history.history['my_iou_metric'][1:])
plt.plot(history.history['val_my_iou_metric'][1:])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train','Validation'], loc='upper left')
plt.show()

In [ ]:
fig, (ax_loss, ax_acc) = plt.subplots(1, 2, figsize=(15,5))
ax_loss.plot(history.epoch, history.history["loss"], label="Train loss")
ax_loss.plot(history.epoch, history.history["val_loss"], label="Validation loss")

In [28]:
model = load_model("./keras 741lb.model",custom_objects={'my_iou_metric': my_iou_metric})

In [13]:
def predict_result(model,x_test,img_size_target): # predict both orginal and reflect x
    x_test_reflect =  np.array([np.fliplr(x) for x in x_test])
    preds_test1 = model.predict(x_test).reshape(-1, img_size_target, img_size_target)
    preds_test2_refect = model.predict(x_test_reflect).reshape(-1, img_size_target, img_size_target)
    preds_test2 = np.array([ np.fliplr(x) for x in preds_test2_refect] )
    preds_avg = (preds_test1 +preds_test2)/2
    return preds_avg

In [46]:
preds_valid = predict_result(model,x_valid,128)
preds_valid2 = np.array([downsample(x) for x in preds_valid])

y_valid2 = np.array([downsample(x) for x in y_valid])

In [32]:
## Scoring for last model
thresholds = np.linspace(0.3, 0.7, 31)
ious = np.array([iou_metric_batch(y_valid2, np.int32(preds_valid2 > threshold)) for threshold in tqdm_notebook(thresholds)])




In [33]:
threshold_best_index = np.argmax(ious) 
iou_best = ious[threshold_best_index]
threshold_best = thresholds[threshold_best_index]

plt.plot(thresholds, ious)
plt.plot(threshold_best, iou_best, "xr", label="Best threshold")
plt.xlabel("Threshold")
plt.ylabel("IoU")
plt.title("Threshold vs IoU ({}, {})".format(threshold_best, iou_best))
plt.legend()


Out[33]:
<matplotlib.legend.Legend at 0x7f62c2092908>

In [34]:
"""
used for converting the decoded image to rle mask
Fast compared to previous one
"""
def rle_encode(im):
    '''
    im: numpy array, 1 - mask, 0 - background
    Returns run length as string formated
    '''
    pixels = im.flatten(order = 'F')
    pixels = np.concatenate([[0], pixels, [0]])
    runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
    runs[1::2] -= runs[::2]
    return ' '.join(str(x) for x in runs)

In [47]:
x_test = np.array([upsample(np.array(load_img("./input/test/images/{}.png".format(idx), grayscale=True))) / 255 for idx in tqdm_notebook(test_df.index)]).reshape(-1, img_size_target, img_size_target, 1)




In [48]:
preds_test = predict_result(model,x_test,img_size_target)

In [26]:
#preds_test.dump('7997.pkl')

In [2]:
preds_test2 = np.load("7997.pkl")

In [3]:
preds_test2.shape


Out[3]:
(18000, 101, 101)

In [58]:
temp = preds_test2

In [60]:
for i, idx in enumerate(tqdm_notebook(test_df.index.values)):
    temp[i] = downsample(preds_test[i])




In [70]:
preds_avg = ((temp)*0.3 + preds_test2*0.7)

In [ ]:
def filter_image(img):
    if img.sum() < 77:
        return np.zeros(img.shape)
    else:
        return img

In [ ]:
import time
t1 = time.time()
pred_dict = {idx: rle_encode(filter_image(preds_avg[i] > threshold_best)) for i, idx in enumerate(tqdm_notebook(test_df.index.values))}
t2 = time.time()

for idx, pred in tqdm(zip(ids, predictions)):

In [40]:
sub = pd.DataFrame.from_dict(pred_dict,orient='index')
sub.index.names = ['id']
sub.columns = ['rle_mask']
sub.to_csv('./output/resnet batch norm cycle.csv')

In [ ]: