In [1]:
%load_ext autoreload
%autoreload 2

import cPickle as pickle
import os; import sys; sys.path.append('..')
import gp
import gp.nets as nets

from nolearn.lasagne.visualize import plot_loss
from nolearn.lasagne.visualize import plot_conv_weights
from nolearn.lasagne.visualize import plot_conv_activity
from nolearn.lasagne.visualize import plot_occlusion

from sklearn.metrics import classification_report, accuracy_score, roc_curve, auc, precision_recall_fscore_support, f1_score, precision_recall_curve, average_precision_score, zero_one_loss


from matplotlib.pyplot import imshow
import matplotlib.pyplot as plt

%matplotlib inline


Using gpu device 0: GeForce GTX TITAN (CNMeM is disabled, CuDNN 4007)
/home/d/nolearn/local/lib/python2.7/site-packages/theano/tensor/signal/downsample.py:6: UserWarning: downsample module has been moved to the theano.tensor.signal.pool module.
  "downsample module has been moved to the theano.tensor.signal.pool module.")

In [2]:
NETS = []
NETS.append('../nets/IP_FULL.p') # image + prob
NETS.append('../nets/IPLB_FULL.p') # image + large border
NETS.append('../nets/IPM_FULL.p') # image + prob + binary
NETS.append('../nets/IPMB_FULL.p') # image + prob + binary + border
NETS.append('../nets/IPMLB_FULL.p') # image + prob + binary + large border

network_path = NETS[2]

with open(network_path, 'rb') as f:
    net = pickle.load(f)

In [4]:
X_test, y_test = gp.Patch.load_rgb_test_only('ipm')


Loaded /home/d/patches//ipm/ in 0.000324964523315 seconds.

In [5]:
test_prediction = net.predict(X_test)
test_prediction_prob = net.predict_proba(X_test)
print
print 'Precision/Recall:'
print classification_report(y_test, test_prediction)


Precision/Recall:
             precision    recall  f1-score   support

          0       0.96      0.93      0.95      8780
          1       0.94      0.96      0.95      8780

avg / total       0.95      0.95      0.95     17560


In [6]:
test_acc = net.score(X_test, y_test)
acc_score = accuracy_score(y_test, test_prediction)
print 'Test Accuracy:', test_acc
print 'Accuracy Score:', acc_score


Test Accuracy: 0.94589977221
Accuracy Score: 0.94589977221

In [7]:
plot_loss(net)


Out[7]:
<module 'matplotlib.pyplot' from '/home/d/nolearn/local/lib/python2.7/site-packages/matplotlib/pyplot.pyc'>

In [8]:
# ROC/AUC
fpr, tpr, _ = roc_curve(y_test, test_prediction_prob[:,1])
roc_auc = auc(fpr, tpr)
data = {}
data['CNN'] = (fpr, tpr, roc_auc)
gp.Legacy.plot_roc(data)



In [9]:
net.uuid = 'IPM'

In [30]:
gp.Stats.run_dojo_xp(net)


Finding Top 5 merge errors..
4  merge errors found.
Creating dojo bigM..

Correcting merge errors with p < .05
   Mean VI improvement 0.00459787437594
   Median VI improvement 0.0
Correcting split errors with p > .95
   Mean VI improvement 0.0584495224222
   Median VI improvement 0.0321204128706

Correcting merge errors with p < .01
   Mean VI improvement 0.00459787437594
   Median VI improvement 0.0
Correcting split errors with p > .99
   Mean VI improvement 0.0280045225696
   Median VI improvement 0.0191918271091

Correcting merge errors by simulated user (er=0)
   Mean VI improvement 0.00459787437594
   Median VI improvement 0.0
Correcting split errors by simulated user (er=0)
   Mean VI improvement 0.124773965903
   Median VI improvement 0.0914241674724

   Re-running simulated user with er=0 .. 0.2
/home/d/nolearn/local/lib/python2.7/site-packages/matplotlib/font_manager.py:1288: UserWarning: findfont: Font family [u'normal'] not found. Falling back to Bitstream Vera Sans
  (prop.get_family(), self.defaultFamily[fontext]))

In [31]:
gp.Stats.run_cylinder_xp(net)


Original median VI 0.379410649496
Creating cylinder bigM..
4772 generated in 452.891104937 seconds.
Grouped into 1159 patches in 0.0224509239197 seconds.
4568 generated in 504.386394978 seconds.
Grouped into 1115 patches in 0.0217649936676 seconds.
4624 generated in 532.711021185 seconds.
Grouped into 1123 patches in 0.0214860439301 seconds.
4276 generated in 335.807290077 seconds.
Grouped into 1054 patches in 0.0198149681091 seconds.
4220 generated in 387.971992016 seconds.
Grouped into 1040 patches in 0.0197379589081 seconds.
4348 generated in 390.036458969 seconds.
Grouped into 1061 patches in 0.0172111988068 seconds.
4372 generated in 427.490860224 seconds.
Grouped into 1065 patches in 0.019552230835 seconds.
4528 generated in 513.515380144 seconds.
Grouped into 1106 patches in 0.020968914032 seconds.
4392 generated in 546.434421062 seconds.
Grouped into 1070 patches in 0.0185980796814 seconds.
4232 generated in 522.400212049 seconds.
Grouped into 1031 patches in 0.0191111564636 seconds.
4492 generated in 558.701794147 seconds.
Grouped into 1100 patches in 0.0212180614471 seconds.
4256 generated in 467.334840059 seconds.
Grouped into 1050 patches in 0.0198171138763 seconds.
4076 generated in 582.018352032 seconds.
Grouped into 1006 patches in 0.0195128917694 seconds.
3784 generated in 526.151586056 seconds.
Grouped into 930 patches in 0.0181062221527 seconds.
3820 generated in 419.549533129 seconds.
Grouped into 935 patches in 0.0152068138123 seconds.
3544 generated in 289.70704484 seconds.
Grouped into 875 patches in 0.013867855072 seconds.
3256 generated in 205.535644054 seconds.
Grouped into 798 patches in 0.015576839447 seconds.
2800 generated in 195.358144045 seconds.
Grouped into 680 patches in 0.0112631320953 seconds.
3132 generated in 305.485315084 seconds.
Grouped into 772 patches in 0.014897108078 seconds.
2696 generated in 303.092609167 seconds.
Grouped into 657 patches in 0.0130341053009 seconds.
2556 generated in 281.450122118 seconds.
Grouped into 627 patches in 0.0121829509735 seconds.
2332 generated in 268.150767088 seconds.
Grouped into 567 patches in 0.0110051631927 seconds.
2168 generated in 252.869513035 seconds.
Grouped into 534 patches in 0.010106086731 seconds.
1964 generated in 243.976443052 seconds.
Grouped into 482 patches in 0.00932192802429 seconds.
1856 generated in 216.68571496 seconds.
Grouped into 454 patches in 0.00760507583618 seconds.
1712 generated in 215.85947299 seconds.
Grouped into 419 patches in 0.00822615623474 seconds.
1556 generated in 193.058613062 seconds.
Grouped into 380 patches in 0.00736117362976 seconds.
1420 generated in 190.535416126 seconds.
Grouped into 351 patches in 0.00682497024536 seconds.
1512 generated in 187.247636795 seconds.
Grouped into 378 patches in 0.00607895851135 seconds.
1452 generated in 195.081421852 seconds.
Grouped into 360 patches in 0.00699901580811 seconds.
1296 generated in 171.452905893 seconds.
Grouped into 320 patches in 0.00533103942871 seconds.
1028 generated in 131.18249011 seconds.
Grouped into 253 patches in 0.00496912002563 seconds.
996 generated in 76.522919178 seconds.
Grouped into 244 patches in 0.0046911239624 seconds.
836 generated in 59.6459159851 seconds.
Grouped into 203 patches in 0.00398993492126 seconds.
800 generated in 58.5655198097 seconds.
Grouped into 199 patches in 0.00384402275085 seconds.
744 generated in 53.9145228863 seconds.
Grouped into 186 patches in 0.00347900390625 seconds.
768 generated in 60.4598870277 seconds.
Grouped into 186 patches in 0.00371384620667 seconds.
616 generated in 54.8449919224 seconds.
Grouped into 154 patches in 0.00319600105286 seconds.
632 generated in 52.532282114 seconds.
Grouped into 158 patches in 0.00255107879639 seconds.
572 generated in 46.3321151733 seconds.
Grouped into 143 patches in 0.00267386436462 seconds.
512 generated in 60.4678668976 seconds.
Grouped into 128 patches in 0.0024209022522 seconds.
448 generated in 65.9324400425 seconds.
Grouped into 112 patches in 0.00224900245667 seconds.
372 generated in 60.023045063 seconds.
Grouped into 93 patches in 0.00178408622742 seconds.
316 generated in 56.7508671284 seconds.
Grouped into 79 patches in 0.00153303146362 seconds.
244 generated in 42.0286440849 seconds.
Grouped into 61 patches in 0.00119686126709 seconds.
176 generated in 35.7135658264 seconds.
Grouped into 44 patches in 0.000738143920898 seconds.
120 generated in 26.7850751877 seconds.
Grouped into 30 patches in 0.000726938247681 seconds.
56 generated in 18.2951769829 seconds.
Grouped into 14 patches in 0.000286102294922 seconds.
4 generated in 10.1509261131 seconds.
Grouped into 1 patches in 0.000175952911377 seconds.
0 generated in 0.732821941376 seconds.
Grouped into 0 patches in 3.2901763916e-05 seconds.

Correcting split errors with p > .95
30 minutes done bigM_max= 0.999721229076
60 minutes done bigM_max= 0.99816775322
90 minutes done bigM_max= 0.993361234665
120 minutes done bigM_max= 0.981953978539
150 minutes done bigM_max= 0.954852461815
   Mean VI improvement 0.041472421143
   Median VI improvement 0.0235039724767

Correcting split errors with p > .99
30 minutes done bigM_max= 0.999721229076
60 minutes done bigM_max= 0.99816775322
90 minutes done bigM_max= 0.993361234665
   Mean VI improvement 0.0308815608684
   Median VI improvement 0.0218376399118

Correcting split errors with p >= .0
30 minutes done bigM_max= 0.999721229076
60 minutes done bigM_max= 0.99816775322
90 minutes done bigM_max= 0.993361234665
120 minutes done bigM_max= 0.981953978539
150 minutes done bigM_max= 0.954852461815
180 minutes done bigM_max= 0.902934789658
210 minutes done bigM_max= 0.81784003973
240 minutes done bigM_max= 0.686863839626
270 minutes done bigM_max= 0.52119743824
300 minutes done bigM_max= 0.35077893734
330 minutes done bigM_max= 0.207722504402
360 minutes done bigM_max= 0.125022251067
390 minutes done bigM_max= 0.0780057758093
420 minutes done bigM_max= 0.0493769086897
450 minutes done bigM_max= 0.0312896221876
480 minutes done bigM_max= 0.471707431691
510 minutes done bigM_max= 0.131643637219
540 minutes done bigM_max= 0.00768568040803
570 minutes done bigM_max= 0.00491200154647
600 minutes done bigM_max= 0.237702138667
630 minutes done bigM_max= 0.00177373120096
660 minutes done bigM_max= 0.00140031054616
690 minutes done bigM_max= 0.159920871258
720 minutes done bigM_max= 0.145424574614
750 minutes done bigM_max= 0.000125587044749
780 minutes done bigM_max= 0.000599132786044
810 minutes done bigM_max= 8.36122671899e-06
840 minutes done bigM_max= 6.98070493854e-07
870 minutes done bigM_max= 4.37977876189e-09
   Mean VI improvement -2.89881670844
   Median VI improvement -2.96778073752

Correcting split errors by simulated user (er=0)
   Mean VI improvement 0.081602089592
   Median VI improvement 0.078013907317
---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
<ipython-input-31-92c57450d604> in <module>()
----> 1 gp.Stats.run_cylinder_xp(net)

/home/d/Projects/gp/gp/stats.pyc in run_cylinder_xp(cnn)
    587     # gp.Legacy.plot_vi_combined(vi_s_per_correction_auto, vi_s_per_correction, output_folder+'/cylinder_combined_vi.pdf')
    588 
--> 589     gp.Legacy.plot_vi_combined_no_interpolation(vi_s_per_correction_auto, vi_s_per_correction, output_folder+'/cylinder_combined_vi_no_interpolation.pdf', sweetspot=len(cylinder_auto_fixes_95))
    590 
    591 

TypeError: plot_vi_combined_no_interpolation() got an unexpected keyword argument 'sweetspot'

In [11]:
# load dojo data
input_image, input_prob, input_gold, input_rhoana, dojo_bbox = gp.Legacy.read_dojo_data()

In [12]:
input_image[0].max()


Out[12]:
225

In [13]:
imshow(input_prob[2], cmap='gray')


Out[13]:
<matplotlib.image.AxesImage at 0x7f75e8df7250>

In [41]:
imshow(input_rhoana[0])


Out[41]:
<matplotlib.image.AxesImage at 0x7fb72bd079d0>

In [53]:
import numpy as np
def seg_to_color(slice):
	colors = np.zeros(slice.shape+(3,),dtype=np.uint8)
	colors[:,:,0] = np.mod(107*slice[:,:],700).astype(np.uint8)
	colors[:,:,1] = np.mod(509*slice[:,:],900).astype(np.uint8)
	colors[:,:,2] = np.mod(200*slice[:,:],777).astype(np.uint8)
	return colors

i_r = input_image[0]
rrr = np.zeros((i_r.shape[0], i_r.shape[1], 3), dtype=np.uint8)
for j in range(rrr.shape[0]):
    for i in range(rrr.shape[1]):
        rrr[j,i] = (i_r[j,i], i_r[j,i], i_r[j,i])
    
rrr[input_prob[0] > 220 ] = (0,255,0)

In [57]:
plt.figure()
imshow(rrr)


Out[57]:
<matplotlib.image.AxesImage at 0x7fb6c687f450>

In [ ]:


In [55]:
imshow(input_gold[0], cmap='gray')


Out[55]:
<matplotlib.image.AxesImage at 0x7fb6c681e9d0>

In [56]:
imshow(input_rhoana[0], cmap='gray')


Out[56]:
<matplotlib.image.AxesImage at 0x7fb72bcf0f50>

In [14]:
original_mean_VI, original_median_VI, original_VI_s = gp.Legacy.VI(input_gold, input_rhoana)
original_adapted_rand = gp.metrics.adapted_rand(input_rhoana, input_gold)

In [64]:
merge_errors = gp.Legacy.get_top5_merge_errors(net, input_image, input_prob, input_rhoana)


---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-64-3647edb73200> in <module>()
----> 1 merge_errors = gp.Legacy.get_top5_merge_errors(net, input_image, input_prob, input_rhoana)

/home/d/Projects/gp/gp/legacy.py in get_top5_merge_errors(cnn, input_image, input_prob, input_rhoana, verbose)
    315                                               dilate=True,
    316                                               border_seeds=True,
--> 317                                               oversampling=False)
    318 
    319             if len(results) > 0:

/home/d/Projects/gp/gp/legacy.py in fix_single_merge(cnn, cropped_image, cropped_prob, cropped_binary, N, invert, dilate, border_seeds, erode, debug, before_merge_error, real_border, oversampling, crop)
    233 
    234           for i in range(5):
--> 235             eroded_ws2 = mh.erode(eroded_ws2)
    236 
    237           # Util.view(eroded_ws, large=True, color=False)

/home/d/nolearn/local/lib/python2.7/site-packages/mahotas/morph.pyc in erode(A, Bc, out, output)
    196     Bc = get_structuring_elem(A,Bc)
    197     output = _get_output(A, out, 'erode', output=output)
--> 198     return _morph.erode(A, Bc, output)
    199 
    200 

KeyboardInterrupt: 

In [66]:
first_rand = gp.metrics.adapted_rand(input_rhoana[0], input_gold[0])

In [78]:
net.uuid = 'IPMLB'

In [15]:
M = gp.Legacy.create_bigM_without_mask(net, input_image[0:1], input_prob[0:1], input_rhoana[0:1], verbose=False)

In [25]:
newM, new_rhoana, fixes, new_vi_s = gp.Legacy.splits_global_from_M_automatic(net, M, input_image[0:1], input_prob[0:1], 
                                                                             input_rhoana[0:1], input_gold[0:1], sureness_threshold=.95)

In [26]:
fixes


Out[26]:
[(1, 0.99975186586380005),
 (1, 0.99943643808364868),
 (1, 0.99432837963104248),
 (0, 0.98944807052612305),
 (1, 0.9682241678237915),
 (1, 0.95969843864440918),
 (1, 0.97200888395309448),
 (0, 0.95207822322845459)]

In [27]:
gp.metrics.adapted_rand(new_rhoana[0], input_gold[0])


Out[27]:
0.069209533050430649

In [89]:
first_rand


Out[89]:
0.073737703314086689

In [ ]: