In [1]:
%load_ext autoreload
%autoreload 2

from matplotlib.pyplot import imshow
import matplotlib.pyplot as plt

%matplotlib inline
import numpy as np
import sys
sys.path.append('../')
import gp


Using gpu device 0: GeForce GTX TITAN (CNMeM is disabled, CuDNN 4007)
/home/d/nolearn/local/lib/python2.7/site-packages/theano/tensor/signal/downsample.py:6: UserWarning: downsample module has been moved to the theano.tensor.signal.pool module.
  "downsample module has been moved to the theano.tensor.signal.pool module.")

In [7]:
#
#
# this needs to be run against commit 959c5e7233044f27932b41c0d311adfeaeb1fc7d
#
#

In [2]:
e_p = []
p = []

In [2]:
import cPickle as pickle
with open('../nets/IPMLB_FULL.p', 'rb') as f:
    cnn = pickle.load(f)
cnn.uuid = 'IPMLB'

In [3]:
# for z in range(0,1):
e_p = []
p = []
image, prob, gold, rhoana, bb = gp.Legacy.read_dojo_data()

for z in range(0,10):

    error_patches, patches = gp.Patch.patchify_maxoverlap(image[z], prob[z], np.zeros((image[0].shape[0], image[0].shape[1])), rhoana[z], gold[z], sample_rate=1, clamp_result=False)
    
    e_p.append(error_patches)
    p.append(patches)


a

In [5]:


In [11]:
from sklearn.metrics import classification_report, accuracy_score, roc_curve, auc, precision_recall_fscore_support
from sklearn.metrics import f1_score, precision_recall_curve, average_precision_score, zero_one_loss

In [ ]:
#
# guided proofreading
#

In [4]:
gp_y_test_proba = []
gp_y_test = []
gt_y_test = []
for z in range(10):
    
    cur_e_p = e_p[z]
    cur_p = p[z]
    
    for pa in cur_e_p:
#         print pa['l'], pa['n']
        
        label1 = pa['l']
        label2 = pa['n']

        pred = gp.Patch.test_and_unify([pa], cnn)
        
        gp_y_test.append(np.round(pred))
        gp_y_test_proba.append(pred)
        gt_y_test.append(1)
        
    for pa in cur_p:
#         print pa['l'], pa['n']
        
        label1 = pa['l']
        label2 = pa['n']

        pred = gp.Patch.test_and_unify([pa], cnn)        
        
        gp_y_test.append(np.round(pred))
        gp_y_test_proba.append(pred)
        gt_y_test.append(0)

In [28]:
print 'Gold', gt_y_test[0:20]
print 'GP', gp_y_test[0:20]
print 'GP prob', gp_y_test_proba[0:20]


Gold [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
GP [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
GP prob [0.99998939037322998, 0.99998939037322998, 0.88544589281082153, 0.88544589281082153, 0.99904423952102661, 0.99904423952102661, 0.99998939037322998, 0.99998939037322998, 0.85833513736724854, 0.85833513736724854, 0.91311854124069214, 0.91311854124069214, 0.90064680576324463, 0.90064680576324463, 0.98917317390441895, 0.98917317390441895, 0.73506379127502441, 0.73506379127502441, 0.93973934650421143, 0.93973934650421143]

In [9]:
print classification_report(np.array(gt_y_test), np.array(gp_y_test))


             precision    recall  f1-score   support

          0       1.00      0.92      0.96      3488
          1       0.54      0.95      0.69       332

avg / total       0.96      0.92      0.93      3820


In [29]:
precision_recall_fscore_support(np.array(gt_y_test), np.array(gp_y_test))


Out[29]:
(array([ 0.9950495 ,  0.53741497]),
 array([ 0.92201835,  0.95180723]),
 array([ 0.95714286,  0.68695652]),
 array([3488,  332]))

In [14]:
# use binary and only report for label==1
precision_recall_fscore_support(np.array(gt_y_test), np.array(gp_y_test), average='binary')


Out[14]:
(0.5374149659863946, 0.95180722891566261, 0.68695652173913047, None)

In [18]:
precision, recall, thresholds = precision_recall_curve(gt_y_test, gp_y_test)
plt.plot(precision, recall)


Out[18]:
[<matplotlib.lines.Line2D at 0x7fe7de529c10>]

In [ ]:
precision, recall, thresholds = precision_recall_curve(gt_y_test, gp_y_test_proba)
plt.plot(recall, precision)

In [32]:
#
# store gp_y_test_proba
#
with open('/home/d/GPSTUDY/gp_y_test_proba_dojo.p', 'wb') as f:
    pickle.dump(gp_y_test_proba, f)

In [ ]:


In [ ]:


In [ ]:


In [2]:
# cconvert to FP format with target
import neuroproof
import neuroproof.FocusedProofreading as fp

In [3]:
e_p = []
p = []
image, prob, gold, rhoana, bb = gp.Legacy.read_dojo_data()

for z in range(0,10):

    error_patches, patches = gp.Patch.patchify_maxoverlap(image[z], prob[z], np.zeros((image[0].shape[0], image[0].shape[1])), rhoana[z], gold[z], sample_rate=1, clamp_result=False)
    
    e_p.append(error_patches)
    p.append(patches)


a

In [4]:
graphs = []
for z in range(10):
    
    g = fp.Graph('/home/d/FP/dojoNEW/graph_'+str(z)+'.json')
    graphs.append(g)

In [5]:
fp_y_test_proba = []
fp_y_test = []
gt_y_test = []
for z in range(10):
    
    cur_e_p = e_p[z]
    cur_p = p[z]
    
    g = graphs[z]
    
    for pa in cur_e_p:
#         print pa['l'], pa['n']
        
        label1 = pa['l']
        label2 = pa['n']

        graph_neighbors = sorted(g.find_close_bodies(label1,0,0.))

        pred = [ne[1] for ne in graph_neighbors if ne[0] == label2]
        
        fp_y_test.append(np.round(pred))
        fp_y_test_proba.append(pred)
        gt_y_test.append(1)
        
    for pa in cur_p:
#         print pa['l'], pa['n']
        
        label1 = pa['l']
        label2 = pa['n']

        graph_neighbors = sorted(g.find_close_bodies(label1,0,0.))

        pred = [ne[1] for ne in graph_neighbors if ne[0] == label2]
        
        fp_y_test.append(np.round(pred))
        fp_y_test_proba.append(pred)
        gt_y_test.append(0)

In [6]:
from sklearn.metrics import classification_report, accuracy_score, roc_curve, auc, precision_recall_fscore_support
from sklearn.metrics import f1_score, precision_recall_curve, average_precision_score, zero_one_loss

In [7]:
print 'Gold', gt_y_test[0:20]
print 'FP', fp_y_test[0:20]
print 'FP prob', fp_y_test_proba[0:20]


Gold [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
FP [array([ 1.]), array([ 1.]), array([ 0.]), array([ 0.]), array([ 0.]), array([ 0.]), array([ 1.]), array([ 1.]), array([ 0.]), array([ 0.]), array([ 1.]), array([ 1.]), array([ 0.]), array([ 0.]), array([ 0.]), array([ 0.]), array([ 1.]), array([ 1.]), array([ 0.]), array([ 0.])]
FP prob [[0.7137254774570465], [0.7137254774570465], [0.30980390310287476], [0.30980390310287476], [0.45490193367004395], [0.45490193367004395], [0.7137254774570465], [0.7137254774570465], [0.40525950547733114], [0.40525950547733114], [0.7176470458507538], [0.7176470458507538], [0.4470587968826294], [0.4470587968826294], [0.498039186000824], [0.498039186000824], [0.6941176354885101], [0.6941176354885101], [0.427450954914093], [0.427450954914093]]

In [8]:
print 'Precision/Recall:'
print classification_report(np.array(gt_y_test), np.array(fp_y_test))


Precision/Recall:
             precision    recall  f1-score   support

          0       0.94      0.69      0.80      3488
          1       0.14      0.51      0.21       332

avg / total       0.87      0.68      0.75      3820


In [9]:
precision_recall_fscore_support(np.array(gt_y_test), np.array(fp_y_test))


Out[9]:
(array([ 0.93653251,  0.13592233]),
 array([ 0.69380734,  0.5060241 ]),
 array([ 0.79710145,  0.21428571]),
 array([3488,  332]))

In [10]:
# use binary and only report for label==1
precision_recall_fscore_support(np.array(gt_y_test), np.array(fp_y_test), average='binary')


Out[10]:
(0.13592233009708737, 0.50602409638554213, 0.21428571428571425, None)

In [11]:
precision, recall, thresholds = precision_recall_curve(gt_y_test, fp_y_test_proba, pos_label=1)
plt.plot(recall, precision)


Out[11]:
[<matplotlib.lines.Line2D at 0x7f8313fce950>]

In [12]:
print gt_y_test[0:100]
print fp_y_test_proba[0:100]
print precision
print recall
print thresholds


[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
[[0.7137254774570465], [0.7137254774570465], [0.30980390310287476], [0.30980390310287476], [0.45490193367004395], [0.45490193367004395], [0.7137254774570465], [0.7137254774570465], [0.40525950547733114], [0.40525950547733114], [0.7176470458507538], [0.7176470458507538], [0.4470587968826294], [0.4470587968826294], [0.498039186000824], [0.498039186000824], [0.6941176354885101], [0.6941176354885101], [0.427450954914093], [0.427450954914093], [0.427450954914093], [0.427450954914093], [0.4470587968826294], [0.4470587968826294], [0.498039186000824], [0.498039186000824], [0.56470587849617], [0.56470587849617], [0.7176470458507538], [0.7176470458507538], [0.45490193367004395], [0.45490193367004395], [0.6941176354885101], [0.6941176354885101], [0.56470587849617], [0.56470587849617], [0.40525950547733114], [0.40525950547733114], [0.30980390310287476], [0.30980390310287476], [0.43529409170150757], [0.43529409170150757], [0.41960781812667847], [0.41960781812667847], [0.37647056579589844], [0.37647056579589844], [0.18431371450424194], [0.18431371450424194], [0.4784313440322876], [0.4784313440322876], [0.5490196049213409], [0.5490196049213409], [0.4470587968826294], [0.4470587968826294], [0.22745096683502197], [0.22745096683502197], [0.26274508237838745], [0.26274508237838745], [0.32941174507141113], [0.32941174507141113], [0.3106804834338899], [0.3106804834338899], [0.22745096683502197], [0.22745096683502197], [0.37254899740219116], [0.37254899740219116], [0.18431371450424194], [0.18431371450424194], [0.5450980365276337], [0.5450980365276337], [0.47058820724487305], [0.47058820724487305], [0.32549017667770386], [0.32549017667770386], [0.5568627417087555], [0.5568627417087555], [0.42352938652038574], [0.42352938652038574], [0.43529409170150757], [0.43529409170150757], [0.42352938652038574], [0.42352938652038574], [0.42352938652038574], [0.42352938652038574], [0.5333333313465118], [0.5333333313465118], [0.37647056579589844], [0.37647056579589844], [0.4745097756385803], [0.4745097756385803], [0.4470587968826294], [0.4470587968826294], [0.3647058606147766], [0.3647058606147766], [0.4745097756385803], [0.4745097756385803], [0.45098036527633667], [0.45098036527633667], [0.4431372284889221], [0.4431372284889221]]
[ 0.08691099  0.08595388  0.08499475  0.08412198  0.08315789  0.08324552
  0.08333333  0.08342133  0.08253968  0.08262712  0.08271474  0.08280255
  0.08289054  0.08297872  0.08306709  0.08315565  0.0832444   0.08333333
  0.08342246  0.08351178  0.08360129  0.08369099  0.08378088  0.08387097
  0.08396125  0.08297414  0.08315335  0.08342362  0.0835141   0.08360478
  0.08369565  0.08378672  0.083878    0.08424508  0.08433735  0.08442982
  0.0845225   0.08461538  0.08470847  0.08480176  0.08498896  0.08508287
  0.08517699  0.08527132  0.08435072  0.08351893  0.08361204  0.08379888
  0.08277405  0.0832396   0.08342728  0.08352144  0.08361582  0.08371041
  0.08380521  0.08418658  0.08437856  0.08476518  0.08486239  0.08304498
  0.08314088  0.08323699  0.08333333  0.08362369  0.08381839  0.08430913
  0.08440797  0.08432304  0.08442331  0.08482676  0.08513189  0.08564536
  0.08574879  0.08606061  0.08616505  0.08626974  0.08711656  0.08722359
  0.08733087  0.08787129  0.08798017  0.08706468  0.08739076  0.0875
  0.08760951  0.08838384  0.09032258  0.08996089  0.09007833  0.09078947
  0.09236948  0.09349593  0.09366391  0.09483961  0.09497207  0.0951049
  0.09585122  0.09598854  0.09649123  0.0966325   0.09777778  0.09792285
  0.10030395  0.09893455  0.10108865  0.10124611  0.10416667  0.10673235
  0.10690789  0.10958904  0.11170213  0.11111111  0.11502783  0.11809524
  0.1208577   0.11919192  0.11691023  0.11688312  0.11308204  0.11860465
  0.11904762  0.1225      0.1202046   0.1227154   0.12303665  0.12432432
  0.12640449  0.1300578   0.13432836  0.13580247  0.13592233  0.1409396
  0.14335664  0.14130435  0.14615385  0.14285714  0.14        0.14345992
  0.1460177   0.14611872  0.15311005  0.16        0.16145833  0.16756757
  0.1734104   0.18072289  0.18181818  0.1910828   0.19463087  0.18881119
  0.18705036  0.19548872  0.20155039  0.208       0.2173913   0.2293578
  0.23076923  0.23469388  0.22916667  0.22580645  0.23529412  0.23809524
  0.24691358  0.26315789  0.2739726   0.28571429  0.3030303   0.28813559
  0.28571429  0.25925926  0.26415094  0.2745098   0.28571429  0.27272727
  0.275       0.28947368  0.27777778  0.28571429  0.32258065  0.34482759
  0.35714286  0.37037037  0.34782609  0.38095238  0.4         0.41176471
  0.4375      0.42857143  0.41666667  0.4         0.5         0.42857143
  0.5         0.6         0.33333333  0.5         1.          1.        ]
[ 1.          0.98795181  0.97590361  0.96385542  0.95180723  0.95180723
  0.95180723  0.95180723  0.93975904  0.93975904  0.93975904  0.93975904
  0.93975904  0.93975904  0.93975904  0.93975904  0.93975904  0.93975904
  0.93975904  0.93975904  0.93975904  0.93975904  0.93975904  0.93975904
  0.93975904  0.92771084  0.92771084  0.92771084  0.92771084  0.92771084
  0.92771084  0.92771084  0.92771084  0.92771084  0.92771084  0.92771084
  0.92771084  0.92771084  0.92771084  0.92771084  0.92771084  0.92771084
  0.92771084  0.92771084  0.91566265  0.90361446  0.90361446  0.90361446
  0.89156627  0.89156627  0.89156627  0.89156627  0.89156627  0.89156627
  0.89156627  0.89156627  0.89156627  0.89156627  0.89156627  0.86746988
  0.86746988  0.86746988  0.86746988  0.86746988  0.86746988  0.86746988
  0.86746988  0.85542169  0.85542169  0.85542169  0.85542169  0.85542169
  0.85542169  0.85542169  0.85542169  0.85542169  0.85542169  0.85542169
  0.85542169  0.85542169  0.85542169  0.84337349  0.84337349  0.84337349
  0.84337349  0.84337349  0.84337349  0.8313253   0.8313253   0.8313253
  0.8313253   0.8313253   0.81927711  0.81927711  0.81927711  0.81927711
  0.80722892  0.80722892  0.79518072  0.79518072  0.79518072  0.79518072
  0.79518072  0.78313253  0.78313253  0.78313253  0.78313253  0.78313253
  0.78313253  0.77108434  0.75903614  0.74698795  0.74698795  0.74698795
  0.74698795  0.71084337  0.6746988   0.65060241  0.61445783  0.61445783
  0.60240964  0.59036145  0.56626506  0.56626506  0.56626506  0.55421687
  0.54216867  0.54216867  0.54216867  0.53012048  0.5060241   0.5060241
  0.4939759   0.46987952  0.45783133  0.44578313  0.42168675  0.40963855
  0.39759036  0.38554217  0.38554217  0.38554217  0.37349398  0.37349398
  0.36144578  0.36144578  0.36144578  0.36144578  0.34939759  0.3253012
  0.31325301  0.31325301  0.31325301  0.31325301  0.30120482  0.30120482
  0.28915663  0.27710843  0.26506024  0.25301205  0.24096386  0.24096386
  0.24096386  0.24096386  0.24096386  0.24096386  0.24096386  0.20481928
  0.19277108  0.1686747   0.1686747   0.1686747   0.1686747   0.14457831
  0.13253012  0.13253012  0.12048193  0.12048193  0.12048193  0.12048193
  0.12048193  0.12048193  0.09638554  0.09638554  0.09638554  0.08433735
  0.08433735  0.07228916  0.06024096  0.04819277  0.04819277  0.03614458
  0.03614458  0.03614458  0.01204819  0.01204819  0.01204819  0.        ]
[ 0.14449825  0.15812378  0.17254901  0.17396384  0.17685503  0.18039215
  0.18429832  0.18431371  0.18435984  0.18948095  0.19252593  0.20336791
  0.20429063  0.20705881  0.20761243  0.20761244  0.20996537  0.21093423
  0.21176469  0.21505573  0.21850056  0.21960783  0.22126873  0.22139175
  0.22283736  0.2235294   0.22745097  0.22791233  0.23137254  0.23460206
  0.2352941   0.23921567  0.24313724  0.24796615  0.2519031   0.25743943
  0.25819298  0.25882351  0.26045365  0.26274508  0.26666665  0.2693733
  0.27049595  0.27058822  0.27450978  0.27607842  0.27843136  0.2802153
  0.28235292  0.28627449  0.28785849  0.29019606  0.29056516  0.29496347
  0.2980392   0.30196077  0.30588233  0.30758937  0.3098039   0.31068048
  0.31111109  0.31163398  0.31372547  0.31764704  0.3215686   0.3220915
  0.32549018  0.32575161  0.32941175  0.33333331  0.33725488  0.34091503
  0.34117645  0.34334484  0.34449826  0.34509802  0.34509803  0.34841983
  0.34901959  0.35211072  0.35294116  0.35686272  0.35856977  0.35907727
  0.36078429  0.36470586  0.36862743  0.37081122  0.372549    0.37647057
  0.38039213  0.3843137   0.38823527  0.39092656  0.39123413  0.39215684
  0.39375624  0.39607841  0.39615532  0.39999998  0.40284505  0.40392154
  0.40525951  0.40784311  0.40853517  0.41176468  0.41568625  0.41822375
  0.41960782  0.42352939  0.42745095  0.43137252  0.43529409  0.43921566
  0.44313723  0.4470588   0.45098037  0.45490193  0.4588235   0.46274507
  0.46666664  0.47058821  0.47450978  0.47833909  0.47843134  0.48235291
  0.48627448  0.49019605  0.49411762  0.49803919  0.50196078  0.50588235
  0.50980392  0.51372549  0.51764704  0.51764706  0.52156863  0.52549019
  0.52941176  0.53333333  0.5372549   0.54117647  0.54509804  0.5490196
  0.55294117  0.55670895  0.55686274  0.56078431  0.56470588  0.56862745
  0.57254902  0.57647058  0.58039215  0.58431372  0.58823529  0.59215686
  0.59607843  0.59999999  0.60392156  0.60784313  0.6117647   0.61568627
  0.61960784  0.6235294   0.62745097  0.63137254  0.63529411  0.63921568
  0.64313725  0.64705881  0.65098038  0.65490195  0.65882352  0.66274509
  0.66666666  0.67058823  0.67450979  0.67843136  0.68235293  0.6862745
  0.69019607  0.69411764  0.6980392   0.70196077  0.70588234  0.70980391
  0.71372548  0.71764705  0.72156861  0.72549018  0.73333332  0.73725489
  0.74509802  0.74901959  0.75294118  0.75686274  0.76862745]

In [14]:
#
# store fp_y_test_proba
#
import cPickle as pickle
with open('/home/d/GPSTUDY/fp_y_test_proba_dojo.p', 'wb') as f:
    pickle.dump(fp_y_test_proba, f)

In [5]:
with open('/home/d/GPSTUDY/gt_y_test_dojo.p', 'wb') as f:
    pickle.dump(gt_y_test, f)

In [ ]:
#
# combined plots
#

In [18]:
# load fp and gp
with open('/home/d/GPSTUDY/gp_y_test_proba_dojo.p', 'rb') as f:
    gp_y_test_proba = pickle.load(f)
with open('/home/d/GPSTUDY/fp_y_test_proba_dojo.p', 'rb') as f:
    fp_y_test_proba = pickle.load(f)

In [20]:
gp_precision, gp_recall, gp_thresholds = precision_recall_curve(gt_y_test, gp_y_test_proba, pos_label=1)
fp_precision, fp_recall, fp_thresholds = precision_recall_curve(gt_y_test, fp_y_test_proba, pos_label=1)
plt.plot(gp_recall, gp_precision, color='blue')
plt.plot(fp_recall, fp_precision, color='red')


Out[20]:
[<matplotlib.lines.Line2D at 0x7f830f6b9ed0>]

In [ ]: