In [1]:
import numpy as np
import pandas as pd
import tifffile as tif
import cv2
import matplotlib.pyplot as plt

from importlib import reload
import gc, os, inspect, glob

from shapely import wkt
from keras.backend.tensorflow_backend import clear_session

import utils
import dilated_nets
import global_vars


Using TensorFlow backend.

In [2]:
def get_train_sample(n_samp_per_im, buff, label_size):
    train_wkt = pd.read_csv(os.path.join(global_vars.DATA_DIR,'train_wkt_v4.csv'), index_col=0)
    train_names = np.array(sorted(train_wkt.index.unique()))
    x_train = np.zeros((25*n_samp_per_im,2*buff+label_size ,2*buff+label_size,8),dtype=np.float32)
    y_train = np.zeros((25*n_samp_per_im, label_size,label_size,9),dtype=np.float32)
    for e, name in enumerate(train_names):
        
        im_m = utils.load_m(name)
        im_m = ((im_m/((2.0**11)-1)) - 0.5)*2
        im_m = cv2.resize(im_m, (835, 835), interpolation=0)
    
        lab = utils.load_all_lab(name, 835)
        
        lab[:,:,6] = lab[:,:,[6,7]].sum(axis=-1).clip(0,1)
        lab[:,:,8] = lab[:,:,[8,9]].sum(axis=-1).clip(0,1)
        lab = lab[:,:,[0,1,2,3,4,5,6,8]]
        back_ground = (lab.sum(axis=-1).clip(0,1) == 0).astype(np.uint8).reshape(list(lab.shape[:2])+[1])
        lab = np.concatenate((back_ground, lab), axis=-1)
        
        patches, labels = utils.get_train_patches(im_m, lab, n_samp_per_im, label_size, buff)
        del im_m, lab, back_ground
        gc.collect()

        x_train[e*n_samp_per_im:(e+1)*n_samp_per_im,:,:,:] = np.array(patches, dtype=np.float32)
        y_train[e*n_samp_per_im:(e+1)*n_samp_per_im,:,:,:] = np.array(labels, dtype=np.float32)
        
        gc.collect()
    
    return x_train, y_train

In [8]:
dilated_nets = reload(dilated_nets)
model = dilated_nets.atr_tiny_top(60,16, 8,9)

In [4]:
exp_name = '_dilated16x16_debug'
start_epoch = 0
num_epochs= 1

#save the source code every time I run this. To prevent accidental loss of archetectures
source= inspect.findsource(dilated_nets)
with open(os.path.join(global_vars.DATA_DIR, 'source'+exp_name+'.py'), 'w') as f:
    for i in source[0]:
        f.write(i)

for j in range(0+start_epoch, start_epoch+num_epochs):
    print('\n', 'starting epoch ', j)
    
    x_tr, y_tr = get_train_sample(100, 60, 16)
    x_tr, y_tr = utils.augment_ims(x_tr, y_tr)
    
    
    model.fit(x_tr, y_tr, batch_size=64,
              epochs=1, verbose=1, 
              shuffle=True)
    
    preds = model.predict(x_tr[range(0,x_tr.shape[0],10),:,:,:])
    iou_50 = utils.print_scores(preds,y_tr[range(0,x_tr.shape[0],10),:,:,:],[1,2,3,4,5,6,7,8], False)
          
    model.save_weights(os.path.join(global_vars.DATA_DIR,'weights', str(j)+ '_'+ exp_name +'_'+ str(iou_50)))
          
    del x_tr, y_tr
    gc.collect()


 starting epoch  0
Epoch 1/1
2500/2500 [==============================] - 24s - loss: 0.6603 - acc: 0.6549      


scores for class 1
iou for 0.4 thresh val images  0.039265625
iou for 0.5 thresh val images  0.0
iou for 0.6 thresh val images  0.0


scores for class 2
iou for 0.4 thresh val images  0.0215625
iou for 0.5 thresh val images  0.0
iou for 0.6 thresh val images  0.0


scores for class 3
iou for 0.4 thresh val images  0.015125
iou for 0.5 thresh val images  0.000126294518818
iou for 0.6 thresh val images  0.0


scores for class 4
iou for 0.4 thresh val images  0.031171875
iou for 0.5 thresh val images  0.0
iou for 0.6 thresh val images  0.0


scores for class 5
iou for 0.4 thresh val images  0.11871875
iou for 0.5 thresh val images  0.0
iou for 0.6 thresh val images  0.0


scores for class 6
iou for 0.4 thresh val images  0.294109375
iou for 0.5 thresh val images  0.0
iou for 0.6 thresh val images  0.0


scores for class 7
iou for 0.4 thresh val images  0.0085625
iou for 0.5 thresh val images  0.0
iou for 0.6 thresh val images  0.0


scores for class 8
iou for 0.4 thresh val images  0.000390625
iou for 0.5 thresh val images  0.000390625
iou for 0.6 thresh val images  0.0

In [ ]: