In [1]:
%load_ext autoreload
%autoreload 2

from matplotlib.pyplot import imshow
import matplotlib.pyplot as plt

%matplotlib inline
import numpy as np
import sys
sys.path.append('../')
import gp


Using gpu device 0: GeForce GTX TITAN (CNMeM is disabled, CuDNN 4007)
/home/d/nolearn/local/lib/python2.7/site-packages/theano/tensor/signal/downsample.py:6: UserWarning: downsample module has been moved to the theano.tensor.signal.pool module.
  "downsample module has been moved to the theano.tensor.signal.pool module.")

In [2]:
e_p = []
p = []

In [3]:
import cPickle as pickle
with open('../nets/IPMLB_FULL.p', 'rb') as f:
    cnn = pickle.load(f)
cnn.uuid = 'IPMLB'

In [4]:
# here we just load p, and e_p
import cPickle as pickle
with open('/home/d/data/CYLPATCHES/e_p.p', 'rb') as f:
    e_p = pickle.load(f)
    
with open('/home/d/data/CYLPATCHES/p.p', 'rb') as f:
    p = pickle.load(f)

In [ ]:
#
# FOCUSED PROOFREADING
#

In [3]:
# cconvert to FP format with target
import neuroproof
import neuroproof.FocusedProofreading as fp

In [4]:
graphs = []
for z in range(250,299):
    
    g = fp.Graph('/home/d/FP/cylinder/graph_'+str(z)+'.json')
    graphs.append(g)

In [5]:
fp_y_test_proba = []
fp_y_test = []
gt_y_test = []
for z in range(49):
    
    cur_e_p = e_p[z]
    cur_p = p[z]
    
    g = graphs[z]
    
    for pa in cur_e_p:
#         print pa['l'], pa['n']
        
        label1 = pa['l']
        label2 = pa['n']

        graph_neighbors = sorted(g.find_close_bodies(label1,0,0.))

        pred = [ne[1] for ne in graph_neighbors if ne[0] == label2]
        
        fp_y_test.append(np.round(pred))
        fp_y_test_proba.append(pred)
        gt_y_test.append(1)
        
    for pa in cur_p:
#         print pa['l'], pa['n']
        
        label1 = pa['l']
        label2 = pa['n']

        graph_neighbors = sorted(g.find_close_bodies(label1,0,0.))

        pred = [ne[1] for ne in graph_neighbors if ne[0] == label2]
        
        fp_y_test.append(np.round(pred))
        fp_y_test_proba.append(pred)
        gt_y_test.append(0)
        
    print z


0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48

In [7]:
from sklearn.metrics import classification_report, accuracy_score, roc_curve, auc, precision_recall_fscore_support, f1_score, precision_recall_curve, average_precision_score, zero_one_loss

In [8]:
print 'Precision/Recall:'
print classification_report(np.array(gt_y_test), np.array(fp_y_test))


Precision/Recall:
             precision    recall  f1-score   support

          0       0.93      0.31      0.47     81184
          1       0.11      0.78      0.19      8780

avg / total       0.85      0.36      0.44     89964


In [9]:
acc_score = accuracy_score(np.array(gt_y_test), np.array(fp_y_test))
print 'Accuracy Score:', acc_score


Accuracy Score: 0.356942777111

In [11]:
with open('/home/d/GPSTUDY/fp_y_test_proba_cyl.p','wb') as f:
    pickle.dump(fp_y_test_proba, f)

In [ ]:


In [ ]:
#
# GUIDED PROOFREADING
#

In [5]:
gp_y_test_proba = []
gp_y_test = []
gt_y_test = []
for z in range(len(e_p)):
    
    cur_e_p = e_p[z]
    cur_p = p[z]
    
    for pa in cur_e_p:
#         print pa['l'], pa['n']
        
        label1 = pa['l']
        label2 = pa['n']
        pa['prob'] = 1.-pa['prob']
        pred = gp.Patch.test_and_unify([pa], cnn)
        
        gp_y_test.append(np.round(pred))
        gp_y_test_proba.append(pred)
        gt_y_test.append(1)
        
    for pa in cur_p:
#         print pa['l'], pa['n']
        
        label1 = pa['l']
        label2 = pa['n']
        pa['prob'] = 1.-pa['prob']
        pred = gp.Patch.test_and_unify([pa], cnn)        
        
        gp_y_test.append(np.round(pred))
        gp_y_test_proba.append(pred)
        gt_y_test.append(0)
        
    print z


0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49

In [6]:
from sklearn.metrics import classification_report, accuracy_score, roc_curve, auc, precision_recall_fscore_support, f1_score, precision_recall_curve, average_precision_score, zero_one_loss

In [7]:
print classification_report(np.array(gt_y_test), np.array(gp_y_test))


             precision    recall  f1-score   support

          0       1.00      0.93      0.96     81184
          1       0.61      0.96      0.74      8780

avg / total       0.96      0.94      0.94     89964


In [8]:
acc_score = accuracy_score(np.array(gt_y_test), np.array(gp_y_test))
print 'Accuracy Score:', acc_score


Accuracy Score: 0.935262994087

In [9]:
with open('/home/d/GPSTUDY/gp_y_test_proba_cyl.p','wb') as f:
    pickle.dump(gp_y_test_proba, f)

In [10]:
with open('/home/d/GPSTUDY/gt_y_test_cyl.p','wb') as f:
    pickle.dump(gt_y_test, f)

In [ ]:


In [ ]:


In [ ]:
#
# combined plots
#

In [15]:
# load fp and gp
with open('/home/d/GPSTUDY/gp_y_test_proba_cyl.p', 'rb') as f:
    gp_y_test_proba = pickle.load(f)
with open('/home/d/GPSTUDY/fp_y_test_proba_cyl.p', 'rb') as f:
    fp_y_test_proba = pickle.load(f)
with open('/home/d/GPSTUDY/gt_y_test_cyl.p', 'rb') as f:
    gt_y_test = pickle.load(f)

In [16]:
gp_precision, gp_recall, gp_thresholds = precision_recall_curve(gt_y_test, gp_y_test_proba, pos_label=1)
fp_precision, fp_recall, fp_thresholds = precision_recall_curve(gt_y_test, fp_y_test_proba, pos_label=1)
plt.plot(gp_recall, gp_precision, color='blue')
plt.plot(fp_recall, fp_precision, color='red')


Out[16]:
[<matplotlib.lines.Line2D at 0x7fa621c75890>]

In [20]:
fp_y_test_proba[0:20]


Out[20]:
[[0.8196078389883041],
 [0.8196078389883041],
 [0.5511418576822149],
 [0.5511418576822149],
 [0.7098039090633392],
 [0.7098039090633392],
 [0.5511418576822149],
 [0.5511418576822149],
 [0.3513725279943616],
 [0.3513725279943616],
 [0.7411764562129974],
 [0.7411764562129974],
 [0.8784313723444939],
 [0.8784313723444939],
 [0.3647058606147766],
 [0.3647058606147766],
 [0.37647056579589844],
 [0.37647056579589844],
 [0.4470587968826294],
 [0.4470587968826294]]

In [21]:
gt_y_test[0:20]


Out[21]:
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]

In [22]:
gp_y_test_proba[0:20]


Out[22]:
[0.82309329509735107,
 0.82309329509735107,
 0.85707062482833862,
 0.85707062482833862,
 0.99994933605194092,
 0.99994933605194092,
 0.85707062482833862,
 0.85707062482833862,
 0.9991874098777771,
 0.9991874098777771,
 0.99998998641967773,
 0.99998998641967773,
 0.99999856948852539,
 0.99999856948852539,
 0.91307896375656128,
 0.91307896375656128,
 0.9999924898147583,
 0.9999924898147583,
 0.37725311517715454,
 0.37725311517715454]

In [ ]: