In [1]:
from keras.layers import Input, Dense, Convolution2D, MaxPooling2D, UpSampling2D
from keras.models import Model
from keras.callbacks import TensorBoard
from keras.models import model_from_json
from keras.models import load_model
from keras import regularizers

import os
from os import listdir
from os.path import isfile, join
import numpy as np
from matplotlib import pyplot as plt
import  cv2
import scipy.misc
from scipy import spatial
from PIL import Image
import heapq
%matplotlib inline


Using TensorFlow backend.

In [2]:
th = 100
mul = 2
mypath1 = '/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/train_new/'
files1 = [f for f in listdir(mypath1) if isfile(join(mypath1, f))]
X_test = []
masks = None
for filen1 in files1:
    img1 = cv2.imread(mypath1+filen1)
    img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    img1 = cv2.resize(img1, (int(img1.shape[0] * mul), int(img1.shape[1] * mul)))
    img1[img1<th] = 1
    img1[img1>=th] = 0
    if masks == None:
        masks = np.zeros(img1.shape)
    masks = masks + img1
img1[masks>20] = 0
print np.average(masks)
plt.imshow(img1)


23.8044732541
-c:13: FutureWarning: comparison to `None` will result in an elementwise object comparison in the future.
Out[2]:
<matplotlib.image.AxesImage at 0x7fad4421ead0>

In [3]:
masks[masks>50] = 1
masks[masks!=1] = 0
plt.imshow(masks)


Out[3]:
<matplotlib.image.AxesImage at 0x7fad44120910>

In [4]:
input_img = Input(shape=(int(img1.shape[0]), int(img1.shape[1]),1))
x = Convolution2D(16, 3, 3, activation='relu', border_mode='same', input_shape=(224,224,1))(input_img)
x = MaxPooling2D((2, 2), border_mode='same')(x)
x = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(x)
x = MaxPooling2D((2, 2), border_mode='same')(x)
x = Convolution2D(8, 3, 3, activation='relu', border_mode='same', activity_regularizer=regularizers.activity_l1(10e-5))(x)
encoded = MaxPooling2D((2, 2), border_mode='same')(x)
model = Model(input_img, encoded)
model = Model(input_img, encoded)
model.compile(loss='binary_crossentropy', optimizer='adagrad')

In [21]:
model.load_weights('../allmods/ae_tries_mods/model_right2.h5', by_name=True)

In [22]:
def push_pqueue(queue, priority, value):
    if len(queue)>10:
       heapq.heappushpop(queue, (priority, value))
    else:
        heapq.heappush(queue, (priority, value))

In [24]:
v = 20
img = cv2.imread('/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/test_new/op_U372_A.jpg.tif')
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.resize(img, (int(img.shape[0] * mul), int(img.shape[1] * mul)))
img[img<th] = v
img[masks==1] = 0
img[img>=th] = 0
X = np.array([img])
X = X.astype('float32')#/ float(np.max(X))
X = np.reshape(X, (len(X),  int(img.shape[0]), int(img.shape[1]), 1))
pred = model.predict(X, verbose=0)[0]
plt.imshow(img)
plt.show()
plt.imshow(np.sum(pred.reshape((int(28 * mul),int(28 * mul),8)), axis=2))


Out[24]:
<matplotlib.image.AxesImage at 0x7fad1078a150>

In [ ]:
mypath1 = '/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/test_new/'
files1 = [f for f in listdir(mypath1) if isfile(join(mypath1, f))]
X_test = []
for filen1 in files1:
    img1 = cv2.imread(mypath1+filen1)
    img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    img1 = cv2.resize(img1, (int(img1.shape[0] * mul), int(img1.shape[1] * mul)))
    img1[img1<th] = v
   # img1[masks==1] = 0
    img1[img1>=th] = 0
    X_test.append(np.array([img1]))
X_test = np.array(X_test).astype('float32')#/ float(np.max(X))
X_test = np.reshape(X_test, (len(X_test),  int(img1.shape[0]), int(img1.shape[1]), 1))
X_test_pred = model.predict(X_test, verbose=0)

In [ ]:
mypath1 = '/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/train_new/'
files1 = [f for f in listdir(mypath1) if isfile(join(mypath1, f))]
X_train = []
for filen1 in files1:
    img1 = cv2.imread(mypath1+filen1)
    img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    img1 = cv2.resize(img1, (int(img1.shape[0] * mul), int(img1.shape[1] * mul)))
    img1[img1<th] = v
    #img1[masks==1] = 0
    img1[img1>=th] = 0
    X_train.append(np.array([img1]))
X_train = np.array(X_train).astype('float32')#/ float(np.max(X))
X_train = np.reshape(X_train, (len(X_train),   int(img1.shape[0]), int(img1.shape[1]), 1))
X_train_pred = model.predict(X_train, verbose=0)

In [ ]:
mypath1 = '/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/test_new/'
files1 = [f for f in listdir(mypath1) if isfile(join(mypath1, f))]
top10_correct = 0
top5_correct = 0
top1_correct = 0
run_count = 0
mp = {}
for i in np.arange(0, len(files1)):
    filen1 = files1[i]
    pred = X_test_pred[i]
    mypath = '/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/train_new/'
    files = [f for f in listdir(mypath) if isfile(join(mypath, f))]
    pqueue = []
    count = 0
    for j in np.arange(0, len(files)):
            filen = files[j]
            tpred = X_train_pred[j]
            score = 1 - spatial.distance.cosine(tpred.flatten(), pred.flatten())
            push_pqueue(pqueue, score, filen)
#     print len(files), count
   
    for top10 in pqueue:
        if top10[1].split('_')[1].split('.')[0] == filen1.split('_')[1].split('.')[0]:
            top10_correct+=1;
            break
    for top5 in pqueue[:5]:
        if top5[1].split('_')[1].split('.')[0] == filen1.split('_')[1].split('.')[0]:
            top5_correct+=1;
            break
    if pqueue[0][1].split('_')[1].split('.')[0] == filen1.split('_')[1].split('.')[0]:
        top1_correct += 1
    mp[filen1] = pqueue[0][1]
    run_count+=1
    #print filen1, max_file, max_confidence

In [ ]:
top10_correct/float(len(files1))

In [15]:
top5_correct/float(len(files1))


Out[15]:
0.00819672131147541

In [16]:
top1_correct


Out[16]:
0

In [46]:
run_count


Out[46]:
122

In [15]:
for i in np.arange(0, len(files)):
    pred = X_train_pred[i]
    if(np.sum(pred) == 0 and i > 1040):
        print 'sacdasvnd'
        print i
        break

In [16]:
print files[722], files[851] , files[1040]


op_U3264.jpg.tif op_U3007.jpg.tif op_U3605.jpg.tif

In [86]:
model.save('/home/arvind/Desktop/model31.h5')

In [91]:
model.to_json()


Out[91]:
'{"class_name": "Model", "keras_version": "1.2.2", "config": {"layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": [null, 224, 224, 1], "input_dtype": "float32", "sparse": false, "name": "input_4"}, "inbound_nodes": [], "name": "input_4"}, {"class_name": "Convolution2D", "config": {"b_regularizer": null, "W_constraint": null, "b_constraint": null, "name": "convolution2d_10", "activity_regularizer": null, "trainable": true, "dim_ordering": "tf", "nb_col": 3, "subsample": [1, 1], "init": "glorot_uniform", "bias": true, "nb_filter": 16, "input_dtype": "float32", "border_mode": "same", "batch_input_shape": [null, 224, 224, 1], "W_regularizer": null, "activation": "relu", "nb_row": 3}, "inbound_nodes": [[["input_4", 0, 0]]], "name": "convolution2d_10"}, {"class_name": "MaxPooling2D", "config": {"name": "maxpooling2d_10", "trainable": true, "dim_ordering": "tf", "pool_size": [2, 2], "strides": [2, 2], "border_mode": "same"}, "inbound_nodes": [[["convolution2d_10", 0, 0]]], "name": "maxpooling2d_10"}, {"class_name": "Convolution2D", "config": {"W_constraint": null, "b_constraint": null, "name": "convolution2d_11", "activity_regularizer": null, "trainable": true, "dim_ordering": "tf", "nb_col": 3, "subsample": [1, 1], "init": "glorot_uniform", "bias": true, "nb_filter": 8, "border_mode": "same", "b_regularizer": null, "W_regularizer": null, "activation": "relu", "nb_row": 3}, "inbound_nodes": [[["maxpooling2d_10", 0, 0]]], "name": "convolution2d_11"}, {"class_name": "MaxPooling2D", "config": {"name": "maxpooling2d_11", "trainable": true, "dim_ordering": "tf", "pool_size": [2, 2], "strides": [2, 2], "border_mode": "same"}, "inbound_nodes": [[["convolution2d_11", 0, 0]]], "name": "maxpooling2d_11"}, {"class_name": "Convolution2D", "config": {"W_constraint": null, "b_constraint": null, "name": "convolution2d_12", "activity_regularizer": {"l2": 0.0, "name": "L1L2Regularizer", "l1": 9.999999747378752e-05}, "trainable": true, "dim_ordering": "tf", "nb_col": 3, "subsample": [1, 1], "init": "glorot_uniform", "bias": true, "nb_filter": 8, "border_mode": "same", "b_regularizer": null, "W_regularizer": null, "activation": "relu", "nb_row": 3}, "inbound_nodes": [[["maxpooling2d_11", 0, 0]]], "name": "convolution2d_12"}, {"class_name": "MaxPooling2D", "config": {"name": "maxpooling2d_12", "trainable": true, "dim_ordering": "tf", "pool_size": [2, 2], "strides": [2, 2], "border_mode": "same"}, "inbound_nodes": [[["convolution2d_12", 0, 0]]], "name": "maxpooling2d_12"}], "input_layers": [["input_4", 0, 0]], "output_layers": [["maxpooling2d_12", 0, 0]], "name": "model_8"}}'

In [86]:
def gen_img():
    for k in mp.keys():
        img = cv2.imread('/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/test_new/'+k)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img = cv2.resize(img, (int(img.shape[0] * mul), int(img.shape[1] * mul)))
        img1 = cv2.imread('/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/train_new/'+mp[k])
        img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
        img1 = cv2.resize(img1, (int(img.shape[0] * mul), int(img.shape[1] * mul)))
        yield (img, img1)

In [87]:
gen = gen_img()

In [92]:
for ip in gen:
    plt.imshow(ip[0])
    plt.show()
    plt.imshow(ip[1])
    break



In [91]:
import cv2
import numpy as np

img = cv2.imread('/home/arvind/MyStuff/Desktop/Manatee_dataset/clean_better/op/op_U3375_A.tif.tif')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray,50,150,apertureSize = 3)
minLineLength = 70
maxLineGap = 10
lines = cv2.HoughLinesP(edges,10,np.pi/180,100,minLineLength,maxLineGap)
print len(lines)
for x1,y1,x2,y2 in lines[0]:
    cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2)
plt.imshow(img)


2
Out[91]:
<matplotlib.image.AxesImage at 0x7faa66d4a4d0>

In [87]:
plt.imshow(edges)


Out[87]:
<matplotlib.image.AxesImage at 0x7faa66ff4d90>

In [ ]: