Play with the dataset


In [2]:
%load_ext autoreload
%autoreload 2

import sys
sys.path.append('/notebook/imgaug/')

import os, numpy, glob, collections, random, h5py, shutil, pandas, \
    time, functools, json, traceback, itertools

from PIL import Image
from IPython.display import display, SVG
from joblib import Parallel, delayed

import matplotlib.pyplot as plt
%pylab inline

from sklearn.utils import shuffle
import imgaug.imgaug as ia
from imgaug.imgaug import augmenters as iaa
from imgaug.imgaug import parameters as iap

from prepare_images_utils import *


Populating the interactive namespace from numpy and matplotlib
/root/.pyenv/versions/3.6.0/lib/python3.6/site-packages/IPython/core/magics/pylab.py:160: UserWarning: pylab import has clobbered these variables: ['random']
`%matplotlib` prevents importing * from pylab and numpy
  "\n`%matplotlib` prevents importing * from pylab and numpy"
Using TensorFlow backend.

Build some statistics


In [3]:
hist_bins = numpy.arange(0, 255.1, 10, dtype=numpy.uint8)
half_bins = hist_bins.shape[0] // 2
def get_image_stat(fname):
    img = Image.open(fname)
    pixels = numpy.asarray(img)
    return numpy.histogram(pixels, bins = hist_bins)[0]


def contains_white(fname):
    img = Image.open(fname)
    pixels = numpy.asarray(img)
    return (pixels > 0).any()


def get_positive_prefixes(dirname, n_jobs=20):
    filenames = glob.glob(os.path.join(dirname, '*_out.png'))
    white_info = Parallel(n_jobs=n_jobs)(delayed(contains_white)(fname)
                                         for fname in filenames)
    return [fname[:-8] for fname, has_white
            in zip(filenames, white_info)
            if has_white]


def copy_only_positive(src_dir, target_dir, n_jobs=20):
    for prefix in get_positive_prefixes(src_dir, n_jobs=n_jobs):
        shutil.copy2(prefix + '_in.png', target_dir)
        shutil.copy2(prefix + '_out.png', target_dir)


def aggregate_stat_for_multiple_images(fnames):
    marginal_hist = numpy.zeros(hist_bins.shape[0] - 1,
                                dtype=numpy.float)
    number_of_white_images = 0
    img_cnt = 0.0
    for cur_hist in Parallel(n_jobs=20)(delayed(get_image_stat)(fname)
                                        for fname in fnames):
        marginal_hist += cur_hist
        img_cnt += 1
        if cur_hist[half_bins:].sum() > 0:
            number_of_white_images += 1
    return marginal_hist / img_cnt, number_of_white_images


def folder_stat(dirname):
    fig, (in_ax, out_ax) = plt.subplots(2)

    in_files = list(glob.glob(os.path.join(dirname, '*_in.png')))
    in_files_cnt = len(in_files)
    in_hist, in_white_cnt = aggregate_stat_for_multiple_images(in_files)
    in_ax.bar(hist_bins[:-1], in_hist, label='In')
    in_ax.legend()
    in_percent = in_white_cnt / in_files_cnt * 100.0
    print(f'In white {in_white_cnt} / {in_files_cnt}, {in_percent}%')

    out_files = glob.glob(os.path.join(dirname, '*_out.png'))
    out_files_cnt = len(out_files)
    out_hist, out_white_cnt = aggregate_stat_for_multiple_images(out_files)
    out_ax.bar(hist_bins[:-1], out_hist, label='Out')
    out_ax.legend()
    out_percent = out_white_cnt / out_files_cnt * 100.0
    print(f'Out white {out_white_cnt} / {out_files_cnt}, {out_percent}%')

In [4]:
# copy_only_positive('./data/5_ready/train/', './data/5_ready/train_only_positive/')
# copy_only_positive('./data/5_ready/val/', './data/5_ready/val_only_positive/')
# copy_only_positive('./data/5_ready/test/', './data/5_ready/test_only_positive/')

In [5]:
# folder_stat('./data/5_ready/train/')

In [6]:
# folder_stat('./data/5_ready/train_only_positive/')

In [7]:
# folder_stat('./data/5_ready/test/')

Prepare dataset

Full Train for Production


In [8]:
# convert_directory_to_hdf5('/notebook/data/7_full/train/', '/notebook/data/7_full/train.hdf')
# convert_directory_to_hdf5('/notebook/data/7_full/val/', '/notebook/data/7_full/val.hdf')

Train-Val


In [9]:
# !rm /notebook/data/5_ready/*.hdf

In [10]:
# %%time
# convert_directory_to_hdf5('/notebook/data/5_ready/train/', '/notebook/data/5_ready/train.hdf')
# convert_directory_to_hdf5('/notebook/data/5_ready/val/', '/notebook/data/5_ready/val.hdf')

CV


In [11]:
# for fold in glob.glob('/notebook/data/6_eval/*/'):
#     convert_directory_to_hdf5(os.path.join(fold, 'train'), os.path.join(fold, 'train.hdf'))
#     convert_directory_to_hdf5(os.path.join(fold, 'val'), os.path.join(fold, 'val.hdf'))

Load


In [12]:
# train_h5.close()
# val_h5.close()
# test_h5.close()

In [13]:
# train_h5 = h5py.File('/notebook/data/5_ready/train.hdf', 'r')
# val_h5 = h5py.File('/notebook/data/5_ready/val.hdf', 'r')
# train_h5 = h5py.File('/notebook/data/7_full/train.hdf', 'r')
# val_h5 = h5py.File('/notebook/data/7_full/val.hdf', 'r')

# train_in, train_out = train_h5['in_data'], train_h5['out_data']
# val_in, val_out = val_h5['in_data'], val_h5['out_data']

# train_in, train_out = load_dataset('/notebook/data/5_ready/train/')
# val_in, val_out = load_dataset('/notebook/data/5_ready/val/')
# train_in = (train_in * 255).astype('uint8')
# train_out = (train_out * 255).astype('uint8')
# val_in = (val_in * 255).astype('uint8')
# val_out = (val_out * 255).astype('uint8')
# print(train_in.shape, train_out.shape)
# print(val_in.shape, val_out.shape)
# # print(test_in.shape, test_out.shape)

In [14]:
# sample_i = 25
# display(arr_to_img(train_in[sample_i, :, :, 0]))
# display(arr_to_img(train_out[sample_i, :, :, :3]))

In [15]:
# sample_i = 14
# display(arr_to_img(val_in[sample_i, :, :, 0]))
# display(arr_to_img(val_out[sample_i, :, :, :3]))

In [25]:
def batch_getter(in_dir, batch_size=1):
    while True:
        in_data, out_data = load_dataset(in_dir, n_jobs=6, take_n=batch_size)
        in_data = (in_data * 255).astype('float32')
        out_data = (out_data * 255).astype('float32')
        yield ia.Batch(images=in_data,
                       heatmaps=out_data)

def make_batch_getter(in_dir, batch_size=32):
    def _f():
        yield from batch_getter(in_dir, batch_size=batch_size)
    return _f

def augmented_batch_gen(base_gen, augmenter, heatmaps_hooks=None):
    for batch in base_gen():
        det = augmenter.to_deterministic() if not augmenter.deterministic else augseq
        images_aug = numpy.asarray(det.augment_images(batch.images)).astype('float32') / 255
        heatmaps_aug = numpy.asarray(det.augment_images(batch.heatmaps, hooks=heatmaps_hooks)).astype('float32') / 255
        yield (images_aug, heatmaps_aug)

def make_augmented_batch_gen(base_gen, augmenter, heatmaps_hooks=None):
    def _f():
        yield from augmented_batch_gen(base_gen, augmenter, heatmaps_hooks=heatmaps_hooks)
    return _f

In [17]:
# %%time
# i = iter(make_batch_getter('./data/5_ready/train/')())
# for _ in range(20):
#     qq = next(i)

In [18]:
# print(next(iter(make_augmented_batch_gen(make_batch_getter(train_in, train_out), augmenter)()))[1].shape)
# display(arr_to_img(next(iter(make_augmented_batch_gen(make_batch_getter(train_in, train_out), augmenter)()))[1][0, :, :]))

Define U-net

Train-Val-Test


In [26]:
BATCH_SIZE = 4
TRANSFORM_TO_WIDTH = 400
TRANSFORM_TO_HEIGHT = 400
emboss_aug = iaa.Emboss(alpha=0.5, strength=13.5)
window_aug = iaa.Window((TRANSFORM_TO_HEIGHT, TRANSFORM_TO_WIDTH))
train_aug = iaa.Sequential([
    iaa.Fliplr(0.5),
    iaa.Flipud(0.5),
#     iaa.Affine(rotate=iap.DiscreteUniform(0, 3) * 90, cval=(0, 0, 255)),
    iaa.CropAndPad(percent=(-0.10, 0.10), pad_cval=255),
    emboss_aug,
    window_aug
])
val_aug = iaa.Sequential([
    emboss_aug,
    window_aug
])
fake_batch = next(iter(make_augmented_batch_gen(make_batch_getter('/notebook/data/5_ready/train/',
                                                                  batch_size=BATCH_SIZE),
                                                train_aug)()))
# for i in range(fake_batch[0].shape[0]):
#     display(arr_to_img(fake_batch[0][i, :, :, 0]))
#     display(arr_to_img(fake_batch[1][i, :, :]))

In [27]:
model = get_unet_row_col_info(fake_batch[0].shape[1:], fake_batch[1][0].shape[-1])
# model.summary()
# display(SVG(model_to_dot(model).create(prog='dot', format='svg')))

log_dir = os.path.join('.',
                       'tb_logs',
                       datetime.datetime.now().strftime('%Y%m%d_%H%M%S'))

model_file = os.path.join(log_dir, 'model')
# model_file = './full_model'

if os.path.exists(model_file):
    os.remove(model_file)
try:
    history = History()
    model.fit_generator(augmented_batch_gen(make_batch_getter('./data/5_ready/train/',
                                                              batch_size=BATCH_SIZE),
                                            train_aug),
                        BATCH_SIZE * 80,
                        epochs=200,
                        callbacks=[
                                   EarlyStopping(verbose=1, patience=10, monitor='val_dice_coef_01', mode='max'),
                                   ModelCheckpoint(filepath=model_file, verbose=1, monitor='val_dice_coef_01', mode='max', save_best_only=True),
                                   history,
                                   TensorBoard(log_dir=log_dir,
#                                                histogram_freq=1,
                                               batch_size=BATCH_SIZE,
                                               write_graph=True,
                                               write_grads=True,
                                               write_images=True),
                                   ReduceLROnPlateau(factor=0.5,
                                                     patience=40,
                                                     epsilon=1e-3,
                                                     verbose=1,
                                                     cooldown=10,
                                                     min_lr=1e-6)
                        ],
                        validation_data=augmented_batch_gen(make_batch_getter('./data/5_ready/val/',
                                                                              batch_size=BATCH_SIZE),
                                                            val_aug),
                        validation_steps=BATCH_SIZE * 20,
                        verbose=1)
except KeyboardInterrupt:
    print(traceback.format_exc())
    pass
# model = load_model(model_file, custom_objects=dict(dice_coef_loss=dice_coef_loss,
#                                                    dice_coef=dice_coef,
#                                                    dice_ce_loss=dice_ce_loss,
#                                                    dice_coef_0=dice_coef_0,
#                                                    dice_coef_1=dice_coef_1,
#                                                    dice_coef_01=dice_coef_01,
#                                                    dice_ce_01_loss=dice_ce_01_loss))


Epoch 1/200
/root/.pyenv/versions/3.6.0/lib/python3.6/site-packages/joblib/parallel.py:547: UserWarning: Multiprocessing-backed parallel loops cannot be nested below threads, setting n_jobs=1
  **self._backend_args)
 29/320 [=>............................] - ETA: 244s - loss: 2.0701 - dice_coef_0: 0.0446 - dice_coef_1: 0.1664 - dice_coef_01: 0.1055 - categorical_crossentropy: 0.8985
/root/.pyenv/versions/3.6.0/lib/python3.6/site-packages/PIL/Image.py:914: UserWarning: Palette images with Transparency   expressed in bytes should be converted to RGBA images
  'to RGBA images')
319/320 [============================>.] - ETA: 0s - loss: 1.4222 - dice_coef_0: 0.0839 - dice_coef_1: 0.2888 - dice_coef_01: 0.1864 - categorical_crossentropy: 0.5247Epoch 00000: val_dice_coef_01 improved from -inf to 0.12713, saving model to ./tb_logs/20171013_183551/model
320/320 [==============================] - ETA: 0s - loss: 1.4201 - dice_coef_0: 0.0841 - dice_coef_1: 0.2900 - dice_coef_01: 0.1870 - categorical_crossentropy: 0.5239 - val_loss: 1.5789 - val_dice_coef_0: 0.0355 - val_dice_coef_1: 0.2188 - val_dice_coef_01: 0.1271 - val_categorical_crossentropy: 0.4468
Epoch 2/200
319/320 [============================>.] - ETA: 0s - loss: 1.1966 - dice_coef_0: 0.1294 - dice_coef_1: 0.3574 - dice_coef_01: 0.2434 - categorical_crossentropy: 0.4373Epoch 00001: val_dice_coef_01 improved from 0.12713 to 0.24085, saving model to ./tb_logs/20171013_183551/model
320/320 [==============================] - ETA: 0s - loss: 1.1984 - dice_coef_0: 0.1294 - dice_coef_1: 0.3573 - dice_coef_01: 0.2433 - categorical_crossentropy: 0.4392 - val_loss: 1.1751 - val_dice_coef_0: 0.1145 - val_dice_coef_1: 0.3672 - val_dice_coef_01: 0.2408 - val_categorical_crossentropy: 0.4048
Epoch 3/200
319/320 [============================>.] - ETA: 0s - loss: 1.0849 - dice_coef_0: 0.1535 - dice_coef_1: 0.3941 - dice_coef_01: 0.2738 - categorical_crossentropy: 0.4037Epoch 00002: val_dice_coef_01 improved from 0.24085 to 0.26558, saving model to ./tb_logs/20171013_183551/model
320/320 [==============================] - ETA: 0s - loss: 1.0847 - dice_coef_0: 0.1535 - dice_coef_1: 0.3937 - dice_coef_01: 0.2736 - categorical_crossentropy: 0.4032 - val_loss: 1.1305 - val_dice_coef_0: 0.1333 - val_dice_coef_1: 0.3979 - val_dice_coef_01: 0.2656 - val_categorical_crossentropy: 0.3971
Epoch 4/200
319/320 [============================>.] - ETA: 0s - loss: 1.0386 - dice_coef_0: 0.1653 - dice_coef_1: 0.4185 - dice_coef_01: 0.2919 - categorical_crossentropy: 0.3912Epoch 00003: val_dice_coef_01 improved from 0.26558 to 0.29671, saving model to ./tb_logs/20171013_183551/model
320/320 [==============================] - ETA: 0s - loss: 1.0394 - dice_coef_0: 0.1652 - dice_coef_1: 0.4182 - dice_coef_01: 0.2917 - categorical_crossentropy: 0.3917 - val_loss: 0.9709 - val_dice_coef_0: 0.1542 - val_dice_coef_1: 0.4392 - val_dice_coef_01: 0.2967 - val_categorical_crossentropy: 0.3289
Epoch 5/200
319/320 [============================>.] - ETA: 0s - loss: 1.0554 - dice_coef_0: 0.1734 - dice_coef_1: 0.4110 - dice_coef_01: 0.2922 - categorical_crossentropy: 0.4010Epoch 00004: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 1.0574 - dice_coef_0: 0.1732 - dice_coef_1: 0.4100 - dice_coef_01: 0.2916 - categorical_crossentropy: 0.4013 - val_loss: 1.1298 - val_dice_coef_0: 0.1394 - val_dice_coef_1: 0.3574 - val_dice_coef_01: 0.2484 - val_categorical_crossentropy: 0.4054
Epoch 6/200
319/320 [============================>.] - ETA: 0s - loss: 1.0010 - dice_coef_0: 0.1909 - dice_coef_1: 0.4253 - dice_coef_01: 0.3081 - categorical_crossentropy: 0.3782Epoch 00005: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 1.0008 - dice_coef_0: 0.1909 - dice_coef_1: 0.4255 - dice_coef_01: 0.3082 - categorical_crossentropy: 0.3783 - val_loss: 1.0483 - val_dice_coef_0: 0.1597 - val_dice_coef_1: 0.3674 - val_dice_coef_01: 0.2635 - val_categorical_crossentropy: 0.3282
Epoch 7/200
319/320 [============================>.] - ETA: 0s - loss: 0.9820 - dice_coef_0: 0.1921 - dice_coef_1: 0.4406 - dice_coef_01: 0.3163 - categorical_crossentropy: 0.3733Epoch 00006: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.9820 - dice_coef_0: 0.1921 - dice_coef_1: 0.4404 - dice_coef_01: 0.3163 - categorical_crossentropy: 0.3733 - val_loss: 1.8917 - val_dice_coef_0: 0.1113 - val_dice_coef_1: 0.1650 - val_dice_coef_01: 0.1382 - val_categorical_crossentropy: 0.8070
Epoch 8/200
319/320 [============================>.] - ETA: 0s - loss: 0.9572 - dice_coef_0: 0.1970 - dice_coef_1: 0.4456 - dice_coef_01: 0.3213 - categorical_crossentropy: 0.3555Epoch 00007: val_dice_coef_01 improved from 0.29671 to 0.35662, saving model to ./tb_logs/20171013_183551/model
320/320 [==============================] - ETA: 0s - loss: 0.9558 - dice_coef_0: 0.1969 - dice_coef_1: 0.4465 - dice_coef_01: 0.3217 - categorical_crossentropy: 0.3549 - val_loss: 0.8558 - val_dice_coef_0: 0.1789 - val_dice_coef_1: 0.5344 - val_dice_coef_01: 0.3566 - val_categorical_crossentropy: 0.3130
Epoch 9/200
319/320 [============================>.] - ETA: 0s - loss: 0.9587 - dice_coef_0: 0.2036 - dice_coef_1: 0.4552 - dice_coef_01: 0.3294 - categorical_crossentropy: 0.3756Epoch 00008: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.9598 - dice_coef_0: 0.2039 - dice_coef_1: 0.4541 - dice_coef_01: 0.3290 - categorical_crossentropy: 0.3760 - val_loss: 1.6100 - val_dice_coef_0: 0.1391 - val_dice_coef_1: 0.2430 - val_dice_coef_01: 0.1910 - val_categorical_crossentropy: 0.7031
Epoch 10/200
319/320 [============================>.] - ETA: 0s - loss: 0.9206 - dice_coef_0: 0.2159 - dice_coef_1: 0.4658 - dice_coef_01: 0.3409 - categorical_crossentropy: 0.3566Epoch 00009: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.9209 - dice_coef_0: 0.2156 - dice_coef_1: 0.4659 - dice_coef_01: 0.3407 - categorical_crossentropy: 0.3567 - val_loss: 0.8978 - val_dice_coef_0: 0.1538 - val_dice_coef_1: 0.5160 - val_dice_coef_01: 0.3349 - val_categorical_crossentropy: 0.3093
Epoch 11/200
319/320 [============================>.] - ETA: 0s - loss: 0.9362 - dice_coef_0: 0.2204 - dice_coef_1: 0.4673 - dice_coef_01: 0.3439 - categorical_crossentropy: 0.3764Epoch 00010: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.9376 - dice_coef_0: 0.2206 - dice_coef_1: 0.4667 - dice_coef_01: 0.3436 - categorical_crossentropy: 0.3776 - val_loss: 0.9660 - val_dice_coef_0: 0.1360 - val_dice_coef_1: 0.4852 - val_dice_coef_01: 0.3106 - val_categorical_crossentropy: 0.3451
Epoch 12/200
319/320 [============================>.] - ETA: 0s - loss: 0.9112 - dice_coef_0: 0.2220 - dice_coef_1: 0.4628 - dice_coef_01: 0.3424 - categorical_crossentropy: 0.3441Epoch 00011: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.9123 - dice_coef_0: 0.2216 - dice_coef_1: 0.4623 - dice_coef_01: 0.3420 - categorical_crossentropy: 0.3446 - val_loss: 0.9373 - val_dice_coef_0: 0.1581 - val_dice_coef_1: 0.4956 - val_dice_coef_01: 0.3268 - val_categorical_crossentropy: 0.3378
Epoch 13/200
319/320 [============================>.] - ETA: 0s - loss: 0.9101 - dice_coef_0: 0.2161 - dice_coef_1: 0.4776 - dice_coef_01: 0.3468 - categorical_crossentropy: 0.3597Epoch 00012: val_dice_coef_01 improved from 0.35662 to 0.36967, saving model to ./tb_logs/20171013_183551/model
320/320 [==============================] - ETA: 0s - loss: 0.9095 - dice_coef_0: 0.2162 - dice_coef_1: 0.4776 - dice_coef_01: 0.3469 - categorical_crossentropy: 0.3592 - val_loss: 0.8559 - val_dice_coef_0: 0.2003 - val_dice_coef_1: 0.5391 - val_dice_coef_01: 0.3697 - val_categorical_crossentropy: 0.3012
Epoch 14/200
319/320 [============================>.] - ETA: 0s - loss: 0.8852 - dice_coef_0: 0.2297 - dice_coef_1: 0.4883 - dice_coef_01: 0.3590 - categorical_crossentropy: 0.3474Epoch 00013: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.8850 - dice_coef_0: 0.2297 - dice_coef_1: 0.4888 - dice_coef_01: 0.3592 - categorical_crossentropy: 0.3476 - val_loss: 0.8736 - val_dice_coef_0: 0.1681 - val_dice_coef_1: 0.5116 - val_dice_coef_01: 0.3399 - val_categorical_crossentropy: 0.3017
Epoch 15/200
319/320 [============================>.] - ETA: 0s - loss: 0.9564 - dice_coef_0: 0.2250 - dice_coef_1: 0.4771 - dice_coef_01: 0.3510 - categorical_crossentropy: 0.3505Epoch 00014: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.9563 - dice_coef_0: 0.2247 - dice_coef_1: 0.4769 - dice_coef_01: 0.3508 - categorical_crossentropy: 0.3502 - val_loss: 1.0890 - val_dice_coef_0: 0.2226 - val_dice_coef_1: 0.4829 - val_dice_coef_01: 0.3528 - val_categorical_crossentropy: 0.3028
Epoch 16/200
319/320 [============================>.] - ETA: 0s - loss: 0.8957 - dice_coef_0: 0.2394 - dice_coef_1: 0.4779 - dice_coef_01: 0.3587 - categorical_crossentropy: 0.3575Epoch 00015: val_dice_coef_01 improved from 0.36967 to 0.37774, saving model to ./tb_logs/20171013_183551/model
320/320 [==============================] - ETA: 0s - loss: 0.8949 - dice_coef_0: 0.2396 - dice_coef_1: 0.4783 - dice_coef_01: 0.3589 - categorical_crossentropy: 0.3571 - val_loss: 0.7802 - val_dice_coef_0: 0.2118 - val_dice_coef_1: 0.5437 - val_dice_coef_01: 0.3777 - val_categorical_crossentropy: 0.2662
Epoch 17/200
319/320 [============================>.] - ETA: 0s - loss: 0.8666 - dice_coef_0: 0.2348 - dice_coef_1: 0.4955 - dice_coef_01: 0.3652 - categorical_crossentropy: 0.3377Epoch 00016: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.8703 - dice_coef_0: 0.2347 - dice_coef_1: 0.4945 - dice_coef_01: 0.3646 - categorical_crossentropy: 0.3405 - val_loss: 0.9790 - val_dice_coef_0: 0.1616 - val_dice_coef_1: 0.4571 - val_dice_coef_01: 0.3094 - val_categorical_crossentropy: 0.3378
Epoch 18/200
319/320 [============================>.] - ETA: 0s - loss: 0.8644 - dice_coef_0: 0.2421 - dice_coef_1: 0.4962 - dice_coef_01: 0.3692 - categorical_crossentropy: 0.3422Epoch 00017: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.8647 - dice_coef_0: 0.2419 - dice_coef_1: 0.4961 - dice_coef_01: 0.3690 - categorical_crossentropy: 0.3424 - val_loss: 0.7966 - val_dice_coef_0: 0.2018 - val_dice_coef_1: 0.5341 - val_dice_coef_01: 0.3679 - val_categorical_crossentropy: 0.2642
Epoch 19/200
319/320 [============================>.] - ETA: 0s - loss: 0.9566 - dice_coef_0: 0.2335 - dice_coef_1: 0.4859 - dice_coef_01: 0.3597 - categorical_crossentropy: 0.3577Epoch 00018: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.9564 - dice_coef_0: 0.2333 - dice_coef_1: 0.4860 - dice_coef_01: 0.3596 - categorical_crossentropy: 0.3576 - val_loss: 0.8270 - val_dice_coef_0: 0.2107 - val_dice_coef_1: 0.5424 - val_dice_coef_01: 0.3766 - val_categorical_crossentropy: 0.3112
Epoch 20/200
319/320 [============================>.] - ETA: 0s - loss: 0.9149 - dice_coef_0: 0.2271 - dice_coef_1: 0.4674 - dice_coef_01: 0.3473 - categorical_crossentropy: 0.3561Epoch 00019: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.9143 - dice_coef_0: 0.2271 - dice_coef_1: 0.4678 - dice_coef_01: 0.3474 - categorical_crossentropy: 0.3558 - val_loss: 1.2404 - val_dice_coef_0: 0.1669 - val_dice_coef_1: 0.3090 - val_dice_coef_01: 0.2379 - val_categorical_crossentropy: 0.4685
Epoch 21/200
319/320 [============================>.] - ETA: 0s - loss: 0.8758 - dice_coef_0: 0.2505 - dice_coef_1: 0.4750 - dice_coef_01: 0.3628 - categorical_crossentropy: 0.3452Epoch 00020: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.8749 - dice_coef_0: 0.2503 - dice_coef_1: 0.4755 - dice_coef_01: 0.3629 - categorical_crossentropy: 0.3446 - val_loss: 0.7978 - val_dice_coef_0: 0.1942 - val_dice_coef_1: 0.5276 - val_dice_coef_01: 0.3609 - val_categorical_crossentropy: 0.2630
Epoch 22/200
319/320 [============================>.] - ETA: 0s - loss: 0.8745 - dice_coef_0: 0.2392 - dice_coef_1: 0.4968 - dice_coef_01: 0.3680 - categorical_crossentropy: 0.3482Epoch 00021: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.8737 - dice_coef_0: 0.2393 - dice_coef_1: 0.4969 - dice_coef_01: 0.3681 - categorical_crossentropy: 0.3477 - val_loss: 0.8083 - val_dice_coef_0: 0.2257 - val_dice_coef_1: 0.4742 - val_dice_coef_01: 0.3500 - val_categorical_crossentropy: 0.2552
Epoch 23/200
319/320 [============================>.] - ETA: 0s - loss: 0.8353 - dice_coef_0: 0.2621 - dice_coef_1: 0.5004 - dice_coef_01: 0.3813 - categorical_crossentropy: 0.3268Epoch 00022: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.8350 - dice_coef_0: 0.2622 - dice_coef_1: 0.5008 - dice_coef_01: 0.3815 - categorical_crossentropy: 0.3269 - val_loss: 0.8612 - val_dice_coef_0: 0.2035 - val_dice_coef_1: 0.5095 - val_dice_coef_01: 0.3565 - val_categorical_crossentropy: 0.2964
Epoch 24/200
319/320 [============================>.] - ETA: 0s - loss: 0.8924 - dice_coef_0: 0.2473 - dice_coef_1: 0.4709 - dice_coef_01: 0.3591 - categorical_crossentropy: 0.3521Epoch 00023: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.8921 - dice_coef_0: 0.2474 - dice_coef_1: 0.4707 - dice_coef_01: 0.3590 - categorical_crossentropy: 0.3518 - val_loss: 0.8146 - val_dice_coef_0: 0.1865 - val_dice_coef_1: 0.5328 - val_dice_coef_01: 0.3596 - val_categorical_crossentropy: 0.2566
Epoch 25/200
319/320 [============================>.] - ETA: 0s - loss: 0.9059 - dice_coef_0: 0.2390 - dice_coef_1: 0.4663 - dice_coef_01: 0.3527 - categorical_crossentropy: 0.3514Epoch 00024: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.9063 - dice_coef_0: 0.2387 - dice_coef_1: 0.4665 - dice_coef_01: 0.3526 - categorical_crossentropy: 0.3518 - val_loss: 0.8213 - val_dice_coef_0: 0.1941 - val_dice_coef_1: 0.5470 - val_dice_coef_01: 0.3705 - val_categorical_crossentropy: 0.2895
Epoch 26/200
319/320 [============================>.] - ETA: 0s - loss: 0.8931 - dice_coef_0: 0.2355 - dice_coef_1: 0.4886 - dice_coef_01: 0.3621 - categorical_crossentropy: 0.3450Epoch 00025: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.8927 - dice_coef_0: 0.2358 - dice_coef_1: 0.4888 - dice_coef_01: 0.3623 - categorical_crossentropy: 0.3450 - val_loss: 0.8397 - val_dice_coef_0: 0.1898 - val_dice_coef_1: 0.5310 - val_dice_coef_01: 0.3604 - val_categorical_crossentropy: 0.2983
Epoch 27/200
319/320 [============================>.] - ETA: 0s - loss: 0.8848 - dice_coef_0: 0.2412 - dice_coef_1: 0.4858 - dice_coef_01: 0.3635 - categorical_crossentropy: 0.3466Epoch 00026: val_dice_coef_01 improved from 0.37774 to 0.38403, saving model to ./tb_logs/20171013_183551/model
320/320 [==============================] - ETA: 0s - loss: 0.8865 - dice_coef_0: 0.2409 - dice_coef_1: 0.4857 - dice_coef_01: 0.3633 - categorical_crossentropy: 0.3480 - val_loss: 0.7578 - val_dice_coef_0: 0.2322 - val_dice_coef_1: 0.5358 - val_dice_coef_01: 0.3840 - val_categorical_crossentropy: 0.2604
Epoch 28/200
319/320 [============================>.] - ETA: 0s - loss: 0.8409 - dice_coef_0: 0.2630 - dice_coef_1: 0.4985 - dice_coef_01: 0.3808 - categorical_crossentropy: 0.3284Epoch 00027: val_dice_coef_01 improved from 0.38403 to 0.38616, saving model to ./tb_logs/20171013_183551/model
320/320 [==============================] - ETA: 0s - loss: 0.8407 - dice_coef_0: 0.2625 - dice_coef_1: 0.4986 - dice_coef_01: 0.3805 - categorical_crossentropy: 0.3280 - val_loss: 0.7933 - val_dice_coef_0: 0.2316 - val_dice_coef_1: 0.5407 - val_dice_coef_01: 0.3862 - val_categorical_crossentropy: 0.2806
Epoch 29/200
319/320 [============================>.] - ETA: 0s - loss: 0.9048 - dice_coef_0: 0.2503 - dice_coef_1: 0.5007 - dice_coef_01: 0.3755 - categorical_crossentropy: 0.3263Epoch 00028: val_dice_coef_01 improved from 0.38616 to 0.39637, saving model to ./tb_logs/20171013_183551/model
320/320 [==============================] - ETA: 0s - loss: 0.9041 - dice_coef_0: 0.2507 - dice_coef_1: 0.5007 - dice_coef_01: 0.3757 - categorical_crossentropy: 0.3261 - val_loss: 0.7784 - val_dice_coef_0: 0.2360 - val_dice_coef_1: 0.5568 - val_dice_coef_01: 0.3964 - val_categorical_crossentropy: 0.2845
Epoch 30/200
319/320 [============================>.] - ETA: 0s - loss: 0.8758 - dice_coef_0: 0.2438 - dice_coef_1: 0.4915 - dice_coef_01: 0.3676 - categorical_crossentropy: 0.3488Epoch 00029: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.8772 - dice_coef_0: 0.2440 - dice_coef_1: 0.4909 - dice_coef_01: 0.3674 - categorical_crossentropy: 0.3500 - val_loss: 0.8457 - val_dice_coef_0: 0.2141 - val_dice_coef_1: 0.4941 - val_dice_coef_01: 0.3541 - val_categorical_crossentropy: 0.2741
Epoch 31/200
319/320 [============================>.] - ETA: 0s - loss: 0.8776 - dice_coef_0: 0.2436 - dice_coef_1: 0.4938 - dice_coef_01: 0.3687 - categorical_crossentropy: 0.3489Epoch 00030: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.8780 - dice_coef_0: 0.2439 - dice_coef_1: 0.4926 - dice_coef_01: 0.3683 - categorical_crossentropy: 0.3487 - val_loss: 1.0989 - val_dice_coef_0: 0.1692 - val_dice_coef_1: 0.3497 - val_dice_coef_01: 0.2595 - val_categorical_crossentropy: 0.3466
Epoch 32/200
319/320 [============================>.] - ETA: 0s - loss: 0.8353 - dice_coef_0: 0.2534 - dice_coef_1: 0.5089 - dice_coef_01: 0.3811 - categorical_crossentropy: 0.3270Epoch 00031: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.8346 - dice_coef_0: 0.2537 - dice_coef_1: 0.5093 - dice_coef_01: 0.3815 - categorical_crossentropy: 0.3268 - val_loss: 0.7462 - val_dice_coef_0: 0.2077 - val_dice_coef_1: 0.5746 - val_dice_coef_01: 0.3912 - val_categorical_crossentropy: 0.2490
Epoch 33/200
319/320 [============================>.] - ETA: 0s - loss: 0.8414 - dice_coef_0: 0.2452 - dice_coef_1: 0.5149 - dice_coef_01: 0.3801 - categorical_crossentropy: 0.3296Epoch 00032: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.8416 - dice_coef_0: 0.2450 - dice_coef_1: 0.5148 - dice_coef_01: 0.3799 - categorical_crossentropy: 0.3297 - val_loss: 0.7684 - val_dice_coef_0: 0.2200 - val_dice_coef_1: 0.5587 - val_dice_coef_01: 0.3894 - val_categorical_crossentropy: 0.2661
Epoch 34/200
319/320 [============================>.] - ETA: 0s - loss: 0.8592 - dice_coef_0: 0.2622 - dice_coef_1: 0.4894 - dice_coef_01: 0.3758 - categorical_crossentropy: 0.3443Epoch 00033: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.8594 - dice_coef_0: 0.2620 - dice_coef_1: 0.4894 - dice_coef_01: 0.3757 - categorical_crossentropy: 0.3444 - val_loss: 0.7916 - val_dice_coef_0: 0.2161 - val_dice_coef_1: 0.5532 - val_dice_coef_01: 0.3846 - val_categorical_crossentropy: 0.2784
Epoch 35/200
319/320 [============================>.] - ETA: 0s - loss: 0.8194 - dice_coef_0: 0.2379 - dice_coef_1: 0.5409 - dice_coef_01: 0.3894 - categorical_crossentropy: 0.3222Epoch 00034: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.8184 - dice_coef_0: 0.2378 - dice_coef_1: 0.5415 - dice_coef_01: 0.3896 - categorical_crossentropy: 0.3217 - val_loss: 0.9067 - val_dice_coef_0: 0.1882 - val_dice_coef_1: 0.4944 - val_dice_coef_01: 0.3413 - val_categorical_crossentropy: 0.3079
Epoch 36/200
319/320 [============================>.] - ETA: 0s - loss: 0.8379 - dice_coef_0: 0.2508 - dice_coef_1: 0.5102 - dice_coef_01: 0.3805 - categorical_crossentropy: 0.3303Epoch 00035: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.8370 - dice_coef_0: 0.2513 - dice_coef_1: 0.5103 - dice_coef_01: 0.3808 - categorical_crossentropy: 0.3298 - val_loss: 1.4464 - val_dice_coef_0: 0.1623 - val_dice_coef_1: 0.2960 - val_dice_coef_01: 0.2291 - val_categorical_crossentropy: 0.4415
Epoch 37/200
319/320 [============================>.] - ETA: 0s - loss: 0.8094 - dice_coef_0: 0.2714 - dice_coef_1: 0.5234 - dice_coef_01: 0.3974 - categorical_crossentropy: 0.3223Epoch 00036: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.8104 - dice_coef_0: 0.2718 - dice_coef_1: 0.5225 - dice_coef_01: 0.3972 - categorical_crossentropy: 0.3231 - val_loss: 1.1018 - val_dice_coef_0: 0.1936 - val_dice_coef_1: 0.4945 - val_dice_coef_01: 0.3441 - val_categorical_crossentropy: 0.2760
Epoch 38/200
319/320 [============================>.] - ETA: 0s - loss: 0.8117 - dice_coef_0: 0.2693 - dice_coef_1: 0.5199 - dice_coef_01: 0.3946 - categorical_crossentropy: 0.3161Epoch 00037: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.8111 - dice_coef_0: 0.2693 - dice_coef_1: 0.5204 - dice_coef_01: 0.3948 - categorical_crossentropy: 0.3159 - val_loss: 1.1943 - val_dice_coef_0: 0.2285 - val_dice_coef_1: 0.2820 - val_dice_coef_01: 0.2553 - val_categorical_crossentropy: 0.4741
Epoch 39/200
319/320 [============================>.] - ETA: 0s - loss: 0.8331 - dice_coef_0: 0.2622 - dice_coef_1: 0.5232 - dice_coef_01: 0.3927 - categorical_crossentropy: 0.3406Epoch 00038: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.8343 - dice_coef_0: 0.2617 - dice_coef_1: 0.5227 - dice_coef_01: 0.3922 - categorical_crossentropy: 0.3411 - val_loss: 0.8198 - val_dice_coef_0: 0.2161 - val_dice_coef_1: 0.5003 - val_dice_coef_01: 0.3582 - val_categorical_crossentropy: 0.2438
Epoch 40/200
319/320 [============================>.] - ETA: 0s - loss: 0.8749 - dice_coef_0: 0.2582 - dice_coef_1: 0.4816 - dice_coef_01: 0.3699 - categorical_crossentropy: 0.3488Epoch 00039: val_dice_coef_01 improved from 0.39637 to 0.40239, saving model to ./tb_logs/20171013_183551/model
320/320 [==============================] - ETA: 0s - loss: 0.8744 - dice_coef_0: 0.2581 - dice_coef_1: 0.4815 - dice_coef_01: 0.3698 - categorical_crossentropy: 0.3482 - val_loss: 0.7137 - val_dice_coef_0: 0.2160 - val_dice_coef_1: 0.5888 - val_dice_coef_01: 0.4024 - val_categorical_crossentropy: 0.2301
Epoch 41/200
319/320 [============================>.] - ETA: 0s - loss: 0.8260 - dice_coef_0: 0.2592 - dice_coef_1: 0.5128 - dice_coef_01: 0.3860 - categorical_crossentropy: 0.3264Epoch 00040: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.8259 - dice_coef_0: 0.2597 - dice_coef_1: 0.5126 - dice_coef_01: 0.3862 - categorical_crossentropy: 0.3265 - val_loss: 0.8097 - val_dice_coef_0: 0.2095 - val_dice_coef_1: 0.5305 - val_dice_coef_01: 0.3700 - val_categorical_crossentropy: 0.2836
Epoch 42/200
319/320 [============================>.] - ETA: 0s - loss: 0.8473 - dice_coef_0: 0.2622 - dice_coef_1: 0.4986 - dice_coef_01: 0.3804 - categorical_crossentropy: 0.3358Epoch 00041: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.8474 - dice_coef_0: 0.2620 - dice_coef_1: 0.4988 - dice_coef_01: 0.3804 - categorical_crossentropy: 0.3360 - val_loss: 0.7402 - val_dice_coef_0: 0.2359 - val_dice_coef_1: 0.5641 - val_dice_coef_01: 0.4000 - val_categorical_crossentropy: 0.2498
Epoch 43/200
319/320 [============================>.] - ETA: 0s - loss: 0.8093 - dice_coef_0: 0.2612 - dice_coef_1: 0.5258 - dice_coef_01: 0.3935 - categorical_crossentropy: 0.3168Epoch 00042: val_dice_coef_01 improved from 0.40239 to 0.41088, saving model to ./tb_logs/20171013_183551/model
320/320 [==============================] - ETA: 0s - loss: 0.8092 - dice_coef_0: 0.2610 - dice_coef_1: 0.5260 - dice_coef_01: 0.3935 - categorical_crossentropy: 0.3168 - val_loss: 0.7292 - val_dice_coef_0: 0.2359 - val_dice_coef_1: 0.5859 - val_dice_coef_01: 0.4109 - val_categorical_crossentropy: 0.2504
Epoch 44/200
319/320 [============================>.] - ETA: 0s - loss: 0.8195 - dice_coef_0: 0.2763 - dice_coef_1: 0.5162 - dice_coef_01: 0.3963 - categorical_crossentropy: 0.3321Epoch 00043: val_dice_coef_01 improved from 0.41088 to 0.41790, saving model to ./tb_logs/20171013_183551/model
320/320 [==============================] - ETA: 0s - loss: 0.8221 - dice_coef_0: 0.2763 - dice_coef_1: 0.5151 - dice_coef_01: 0.3957 - categorical_crossentropy: 0.3338 - val_loss: 0.7028 - val_dice_coef_0: 0.2619 - val_dice_coef_1: 0.5739 - val_dice_coef_01: 0.4179 - val_categorical_crossentropy: 0.2408
Epoch 45/200
319/320 [============================>.] - ETA: 0s - loss: 0.8211 - dice_coef_0: 0.2610 - dice_coef_1: 0.5205 - dice_coef_01: 0.3908 - categorical_crossentropy: 0.3266Epoch 00044: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.8199 - dice_coef_0: 0.2610 - dice_coef_1: 0.5212 - dice_coef_01: 0.3911 - categorical_crossentropy: 0.3259 - val_loss: 0.8968 - val_dice_coef_0: 0.1878 - val_dice_coef_1: 0.5041 - val_dice_coef_01: 0.3460 - val_categorical_crossentropy: 0.3282
Epoch 46/200
319/320 [============================>.] - ETA: 0s - loss: 0.8449 - dice_coef_0: 0.2626 - dice_coef_1: 0.5018 - dice_coef_01: 0.3822 - categorical_crossentropy: 0.3340Epoch 00045: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.8443 - dice_coef_0: 0.2630 - dice_coef_1: 0.5014 - dice_coef_01: 0.3822 - categorical_crossentropy: 0.3337 - val_loss: 0.7130 - val_dice_coef_0: 0.2334 - val_dice_coef_1: 0.5808 - val_dice_coef_01: 0.4071 - val_categorical_crossentropy: 0.2324
Epoch 47/200
319/320 [============================>.] - ETA: 0s - loss: 0.8157 - dice_coef_0: 0.2692 - dice_coef_1: 0.5062 - dice_coef_01: 0.3877 - categorical_crossentropy: 0.3182Epoch 00046: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.8154 - dice_coef_0: 0.2687 - dice_coef_1: 0.5064 - dice_coef_01: 0.3876 - categorical_crossentropy: 0.3177 - val_loss: 0.8762 - val_dice_coef_0: 0.2032 - val_dice_coef_1: 0.4841 - val_dice_coef_01: 0.3436 - val_categorical_crossentropy: 0.2851
Epoch 48/200
319/320 [============================>.] - ETA: 0s - loss: 0.8135 - dice_coef_0: 0.2645 - dice_coef_1: 0.5240 - dice_coef_01: 0.3943 - categorical_crossentropy: 0.3259Epoch 00047: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.8126 - dice_coef_0: 0.2644 - dice_coef_1: 0.5245 - dice_coef_01: 0.3944 - categorical_crossentropy: 0.3252 - val_loss: 0.7821 - val_dice_coef_0: 0.2232 - val_dice_coef_1: 0.5622 - val_dice_coef_01: 0.3927 - val_categorical_crossentropy: 0.2643
Epoch 49/200
319/320 [============================>.] - ETA: 0s - loss: 0.7994 - dice_coef_0: 0.2727 - dice_coef_1: 0.5278 - dice_coef_01: 0.4003 - categorical_crossentropy: 0.3141Epoch 00048: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.7985 - dice_coef_0: 0.2729 - dice_coef_1: 0.5278 - dice_coef_01: 0.4004 - categorical_crossentropy: 0.3135 - val_loss: 0.7838 - val_dice_coef_0: 0.2266 - val_dice_coef_1: 0.5428 - val_dice_coef_01: 0.3847 - val_categorical_crossentropy: 0.2689
Epoch 50/200
319/320 [============================>.] - ETA: 0s - loss: 0.8192 - dice_coef_0: 0.2797 - dice_coef_1: 0.5055 - dice_coef_01: 0.3926 - categorical_crossentropy: 0.3288Epoch 00049: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.8200 - dice_coef_0: 0.2798 - dice_coef_1: 0.5054 - dice_coef_01: 0.3926 - categorical_crossentropy: 0.3297 - val_loss: 0.8655 - val_dice_coef_0: 0.1695 - val_dice_coef_1: 0.5058 - val_dice_coef_01: 0.3376 - val_categorical_crossentropy: 0.2792
Epoch 51/200
319/320 [============================>.] - ETA: 0s - loss: 0.8382 - dice_coef_0: 0.2617 - dice_coef_1: 0.5028 - dice_coef_01: 0.3822 - categorical_crossentropy: 0.3352Epoch 00050: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.8385 - dice_coef_0: 0.2619 - dice_coef_1: 0.5027 - dice_coef_01: 0.3823 - categorical_crossentropy: 0.3356 - val_loss: 0.7498 - val_dice_coef_0: 0.2049 - val_dice_coef_1: 0.5439 - val_dice_coef_01: 0.3744 - val_categorical_crossentropy: 0.2249
Epoch 52/200
319/320 [============================>.] - ETA: 0s - loss: 0.7934 - dice_coef_0: 0.2777 - dice_coef_1: 0.5210 - dice_coef_01: 0.3993 - categorical_crossentropy: 0.3094Epoch 00051: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.7935 - dice_coef_0: 0.2776 - dice_coef_1: 0.5204 - dice_coef_01: 0.3990 - categorical_crossentropy: 0.3092 - val_loss: 0.7812 - val_dice_coef_0: 0.2313 - val_dice_coef_1: 0.5843 - val_dice_coef_01: 0.4078 - val_categorical_crossentropy: 0.2723
Epoch 53/200
319/320 [============================>.] - ETA: 0s - loss: 0.8005 - dice_coef_0: 0.2825 - dice_coef_1: 0.5224 - dice_coef_01: 0.4024 - categorical_crossentropy: 0.3237Epoch 00052: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.8016 - dice_coef_0: 0.2823 - dice_coef_1: 0.5218 - dice_coef_01: 0.4021 - categorical_crossentropy: 0.3244 - val_loss: 0.7745 - val_dice_coef_0: 0.2100 - val_dice_coef_1: 0.5329 - val_dice_coef_01: 0.3715 - val_categorical_crossentropy: 0.2452
Epoch 54/200
319/320 [============================>.] - ETA: 0s - loss: 0.7903 - dice_coef_0: 0.2761 - dice_coef_1: 0.5300 - dice_coef_01: 0.4031 - categorical_crossentropy: 0.3126Epoch 00053: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.7952 - dice_coef_0: 0.2760 - dice_coef_1: 0.5287 - dice_coef_01: 0.4023 - categorical_crossentropy: 0.3162 - val_loss: 0.8254 - val_dice_coef_0: 0.1960 - val_dice_coef_1: 0.5341 - val_dice_coef_01: 0.3650 - val_categorical_crossentropy: 0.2706
Epoch 55/200
319/320 [============================>.] - ETA: 0s - loss: 0.8767 - dice_coef_0: 0.2689 - dice_coef_1: 0.5095 - dice_coef_01: 0.3892 - categorical_crossentropy: 0.3189Epoch 00054: val_dice_coef_01 did not improve
320/320 [==============================] - ETA: 0s - loss: 0.8769 - dice_coef_0: 0.2688 - dice_coef_1: 0.5094 - dice_coef_01: 0.3891 - categorical_crossentropy: 0.3192 - val_loss: 0.7990 - val_dice_coef_0: 0.1925 - val_dice_coef_1: 0.5684 - val_dice_coef_01: 0.3805 - val_categorical_crossentropy: 0.2686
Epoch 00054: early stopping

Train on CV


In [28]:
# for fold in glob.glob('/notebook/data/6_eval/*/'):
#     fold_train_h5 = h5py.File(os.path.join(fold, 'train.hdf'), 'r')
#     fold_val_h5 = h5py.File(os.path.join(fold, 'val.hdf'), 'r')

#     fold_train_in = fold_train_h5['in_data']
#     fold_train_out = fold_train_h5['out_data']

#     fold_val_in = fold_val_h5['in_data']
#     fold_val_out = fold_val_h5['out_data']

#     fold_model_fname = os.path.join(fold, 'model')
#     if os.path.exists(fold_model_fname):
#         os.remove(fold_model_fname)

#     history = History()
#     model = get_unet(fold_train_in.shape[1:], fold_val_out.shape[-1])
#     model.fit(fold_train_in,
#               fold_train_out,
#               batch_size=8,
#               epochs=20,
#               callbacks=[
#                          EarlyStopping(verbose=1, patience=10, monitor='val_dice_coef_01', mode='max'),
#                          ModelCheckpoint(filepath=fold_model_fname, verbose=1, monitor='val_dice_coef_01', mode='max', save_best_only=True),
#                          history
# #                          ReduceLROnPlateau(factor=0.3, patience=10, epsilon=1e-3, verbose=1, cooldown=5, min_lr=1e-6)
# #                          LearningRateScheduler(lambda epoch: 1e-1 * (0.93 ** epoch))
#                          ],
#               validation_data=[fold_val_in, fold_val_out],
#               shuffle='batch',
#               verbose=1)
#     with open(os.path.join(fold, 'train_history.js'), 'w') as f:
#         json.dump(history.history, f)

Results Vis


In [34]:
# cv_hist = pandas.concat([pandas.DataFrame(json.load(open(os.path.join(fold, 'train_history.js'), 'r')))
#                          for fold in glob.glob('/notebook/data/6_eval/*/')],
#                         keys=[os.path.basename(fold) for fold in glob.glob('/notebook/data/6_eval/*')],
#                         names=['fold'],
#                         axis=1)
# cv_hist.xs('val_dice_coef_01', level=1, axis=1).plot()

In [35]:
# ax = pandas.DataFrame(history.history)[[k for k in history.history.keys()]].plot(figsize=(16, 10))
# ax.set_ylim(0, 1)

In [ ]:
def make_layer_vis(model, layer_i):
    output_getter = K.function([model.layers[0].input, K.learning_phase()],
                               [model.layers[layer_i].output])
    def _impl(img):
        inp = numpy.expand_dims(numpy.expand_dims(numpy.array(img), -1), 0)
        outp = output_getter([inp, False])[0]
        flat = numpy.rollaxis(outp, -1, 2).reshape((outp.shape[1], outp.shape[2]*outp.shape[-1]))
        display((flat.min(), flat.max(), flat.mean()))
        flat -= flat.min()
        flat /= (flat.max() + 1e-4)
#         flat = outp[0, :, :, 0] #.reshape((outp.shape[1], outp.shape[2]*outp.shape[-1]))
        return Image.fromarray((flat*255).astype('uint8'), mode='L')
    return _impl

def vis_layer(img, model):
    visualizers = [(model.layers[i], make_layer_vis(model, i))
                   for i in (i for i in range(len(model.layers))
                             if not isinstance(model.layers[i],
                                               (Dropout,
                                                BatchNormalization,
                                                )))]
    for layer, v in visualizers:
        display(layer.name)
        display(v(img))
        time.sleep(1.0)

# vis_img = read_images_to_tensor(['./data/5_ready/test_tiny/12147373-0005_2_in.png'])[0]
vis_img = fake_batch[0][2, :, :, 0]
vis_layer(vis_img, model)

In [ ]: