In [1]:
%load_ext autoreload
%autoreload 2

import cPickle as pickle
import os; import sys; sys.path.append('..')
import gp
import gp.nets as nets

from nolearn.lasagne.visualize import plot_loss
from nolearn.lasagne.visualize import plot_conv_weights
from nolearn.lasagne.visualize import plot_conv_activity
from nolearn.lasagne.visualize import plot_occlusion

from sklearn.metrics import classification_report, accuracy_score, roc_curve, auc, precision_recall_fscore_support, f1_score, precision_recall_curve, average_precision_score, zero_one_loss


from matplotlib.pyplot import imshow
import matplotlib.pyplot as plt

%matplotlib inline


/home/d/nolearn/local/lib/python2.7/site-packages/matplotlib/font_manager.py:273: UserWarning: Matplotlib is building the font cache using fc-list. This may take a moment.
  warnings.warn('Matplotlib is building the font cache using fc-list. This may take a moment.')
Using gpu device 0: GeForce GTX TITAN (CNMeM is disabled, CuDNN 4007)
/home/d/nolearn/local/lib/python2.7/site-packages/theano/tensor/signal/downsample.py:6: UserWarning: downsample module has been moved to the theano.tensor.signal.pool module.
  "downsample module has been moved to the theano.tensor.signal.pool module.")

In [2]:
NETS = []
NETS.append('../nets/IP_FULL.p') # image + prob
NETS.append('../nets/IPLB_FULL.p') # image + large border
NETS.append('../nets/IPM_FULL.p') # image + prob + binary
NETS.append('../nets/IPMB_FULL.p') # image + prob + binary + border
NETS.append('../nets/IPMLB_FULL.p') # image + prob + binary + large border

network_path = NETS[-1]

with open(network_path, 'rb') as f:
    net = pickle.load(f)

In [4]:
input_image, input_prob, input_gold, input_rhoana, dojo_bbox = gp.Legacy.read_dojo_data()
    original_mean_VI, original_median_VI, original_VI_s = gp.Legacy.VI(input_gold, input_rhoana)

In [5]:
net.uuid = 'IPMLB'

In [ ]:


In [41]:
bigM_dojo = gp.Legacy.create_bigM_without_mask(net, input_image[2:3], input_prob[2:3], input_rhoana[2:3], verbose=False, oversampling=True)

In [ ]:


In [ ]:


In [43]:
bigM_dojo[0][5,6]


Out[43]:
0.94007781893014908

In [46]:
bigM_dojo_after, out_dojo_volume_after_sim_user, dojo_sim_user_fixes, dojo_sim_user_vi_s, vi_s_per_step2 = gp.Legacy.splits_global_from_M(
    net, bigM_dojo, input_image[2:3], input_prob[2:3], input_rhoana[2:3], input_gold[2:3], hours=.5)


done

In [47]:
vi_s_per_step2


Out[47]:
[(0.41596147559619201, 0.41596147559619201, [0.415961475596192]),
 (0.41550662413970052, 0.41550662413970052, [0.4155066241397005]),
 (0.41386227988968738, 0.41386227988968738, [0.4138622798896874]),
 (0.40783529127954399, 0.40783529127954399, [0.407835291279544]),
 (0.39681246325197428, 0.39681246325197428, [0.3968124632519743]),
 (0.38285135487078303, 0.38285135487078303, [0.38285135487078303]),
 (0.37640030542634673, 0.37640030542634673, [0.3764003054263467]),
 (0.37620673464582932, 0.37620673464582932, [0.3762067346458293]),
 (0.37617726387055139, 0.37617726387055139, [0.3761772638705514]),
 (0.3591082780815551, 0.3591082780815551, [0.3591082780815551]),
 (0.35841175297326799, 0.35841175297326799, [0.358411752973268]),
 (0.35752486408339301, 0.35752486408339301, [0.357524864083393]),
 (0.3568546487170341, 0.3568546487170341, [0.3568546487170341]),
 (0.35560799564815149, 0.35560799564815149, [0.3556079956481515]),
 (0.35546754595211283, 0.35546754595211283, [0.35546754595211283]),
 (0.35533534776649489, 0.35533534776649489, [0.3553353477664949]),
 (0.33504233898673164, 0.33504233898673164, [0.33504233898673164])]

In [ ]: