In [1]:
import matplotlib
matplotlib.use('agg')
import cPickle as pickle
import os; import sys; sys.path.append('../../')
import gp
import gp.nets as nets


/home/d/GP/local/lib/python2.7/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters
WARNING (theano.sandbox.cuda): The cuda backend is deprecated and will be removed in the next release (v0.10).  Please switch to the gpuarray backend. You can get more information about how to switch at this URL:
 https://github.com/Theano/Theano/wiki/Converting-to-the-new-gpu-back-end%28gpuarray%29

Using gpu device 0: TITAN X (Pascal) (CNMeM is disabled, cuDNN not available)
/home/d/GP/local/lib/python2.7/site-packages/sklearn/cross_validation.py:41: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.
  "This module will be removed in 0.20.", DeprecationWarning)

In [2]:
from sklearn.metrics import classification_report, accuracy_score, roc_curve, auc, precision_recall_fscore_support, f1_score, precision_recall_curve, average_precision_score, zero_one_loss
import numpy as np
import time

In [3]:
def r_d(cnn):
    # load dojo data
    input_image, input_prob, input_gold, input_rhoana, dojo_bbox = gp.Legacy.read_dojo_data()


    original_mean_VI, original_median_VI, original_VI_s = gp.Legacy.VI(input_gold, input_rhoana)

    # output folder for anything to store
    output_folder = '/home/d/netstatsCVPR2018/'+cnn.uuid+'/'
    if not os.path.exists(output_folder):
      os.makedirs(output_folder)

#     # find merge errors, if we did not generate them before
#     merge_error_file = output_folder+'/merges_new_cnn.p'
#     if os.path.exists(merge_error_file):
#       print 'Loading merge errors from file..'
#       with open(merge_error_file, 'rb') as f:
#         merge_errors = pickle.load(f)
#     else:
#       print 'Finding Top 5 merge errors..'
#       merge_errors = gp.Legacy.get_top5_merge_errors(cnn, input_image, input_prob, input_rhoana)
#       with open(merge_error_file, 'wb') as f:
#         pickle.dump(merge_errors, f)

#     print len(merge_errors), ' merge errors found.'

    # we need to create a bigM for the dojo volume
    bigM_dojo_file = output_folder + '/bigM_dojo_test2.p'
    if os.path.exists(bigM_dojo_file):
      print 'Loading dojo bigM from file..'
      with open(bigM_dojo_file, 'rb') as f:
        bigM_dojo = pickle.load(f)
    else:
      print 'Creating dojo bigM..'
      bigM_dojo = gp.Legacy.create_bigM_without_mask(cnn, input_image, input_prob, input_rhoana, verbose=False)
      with open(bigM_dojo_file, 'wb') as f:
        pickle.dump(bigM_dojo, f)    



    print
    dojo_vi_95_file = output_folder + '/dojo_vi_95_t6.p'

    dojo_merge_vis = output_folder + '/dojo_merge_auto95_vis.p'
    dojo_split_vis = output_folder + '/dojo_split_auto95_vis.p'

    dojo_merge_fixes = output_folder + '/dojo_merge_auto95_fixes.p'
    dojo_split_fixes = output_folder + '/dojo_split_auto95_fixes.p'

    dojo_output_95 = output_folder + '/dojo_auto95_output.p'

    if os.path.exists(dojo_vi_95_file):
      print 'Loading merge errors p < .05 and split errors p > .95 from file..'
      with open(dojo_vi_95_file, 'rb') as f:
        dojo_vi_95 = pickle.load(f)
    else:      
#       #
#       # perform merge correction with p < .05
#       #
#       print 'Correcting merge errors with p < .05'
#       bigM_dojo_05, corrected_rhoana_05, dojo_auto_merge_fixes, vi_s_per_step = gp.Legacy.perform_auto_merge_correction(cnn, bigM_dojo, input_image, input_prob, input_rhoana, merge_errors, .05, input_gold=input_gold)

#       print '   Mean VI improvement', original_mean_VI-gp.Legacy.VI(input_gold, corrected_rhoana_05)[0]
#       print '   Median VI improvement', original_median_VI-gp.Legacy.VI(input_gold, corrected_rhoana_05)[1]

#       with open(dojo_merge_vis, 'wb') as f:
#         pickle.dump(vi_s_per_step, f)


#       with open(dojo_merge_fixes, 'wb') as f:
#         pickle.dump(dojo_auto_merge_fixes, f) 

      #
      # perform split correction with p > .95
      #
      print 'Correcting split errors with p > .95'
      bigM_dojo_after_95, out_dojo_volume_after_auto_95, dojo_auto_fixes_95, dojo_auto_vi_s_95, vi_s_per_step2 = gp.Legacy.splits_global_from_M_automatic(cnn, bigM_dojo, input_image, input_prob, input_rhoana, input_gold, sureness_threshold=.95)

      dojo_vi_95 = gp.Legacy.VI(input_gold, out_dojo_volume_after_auto_95)

      with open(dojo_vi_95_file, 'wb') as f:
        pickle.dump(dojo_vi_95, f)

      with open(dojo_split_vis, 'wb') as f:
        pickle.dump(vi_s_per_step2, f)

      with open(dojo_split_fixes, 'wb') as f:
        pickle.dump(dojo_auto_fixes_95, f)       

      with open(dojo_output_95, 'wb') as f:
        pickle.dump(out_dojo_volume_after_auto_95, f) 

    print '   Mean VI improvement', original_mean_VI-dojo_vi_95[0]
    print '   Median VI improvement', original_median_VI-dojo_vi_95[1]

In [ ]:


In [ ]:


In [ ]:


In [4]:
NETS = []
NETS.append('../../nets/MLB_FULL.p')
print NETS


['../../nets/MLB_FULL.p']

In [5]:
roc1 = []
roc2 = []

In [6]:
#
# binary + large_border
#
network_path = NETS[0]

with open(network_path, 'rb') as f:
    net = pickle.load(f)
X_test, y_test = gp.Patch.load_rgba_test_only('ipmlb')
X_test = np.delete(X_test, [0,1] , axis=1)
test_prediction = net.predict(X_test)
test_prediction_prob = net.predict_proba(X_test)
print
print 'Precision/Recall:'
print classification_report(y_test, test_prediction)
test_acc = net.score(X_test, y_test)
acc_score = accuracy_score(y_test, test_prediction)
print 'Test Accuracy:', test_acc
print 'Accuracy Score:', acc_score

fpr, tpr, thresholds = roc_curve(y_test, test_prediction)
area = auc(fpr, tpr)
print 'AUC', area

fpr2, tpr2, thresholds = roc_curve(y_test, test_prediction_prob[:,1])
area2 = auc(fpr2, tpr2)
print 'AUC2', area2

roc1.append({'ip':[fpr,tpr,area]})
roc2.append({'ip':[fpr2,tpr2,area2]})

t0 = time.time()
net.uuid = 'MLB'

r_d(net)
print time.time()-t0, 'seconds'


Loaded /home/d/patches//ipmlb/ in 0.000350952148438 seconds.

Precision/Recall:
             precision    recall  f1-score   support

          0       0.87      0.93      0.90      8780
          1       0.93      0.87      0.89      8780

avg / total       0.90      0.90      0.90     17560

Test Accuracy: 0.898234624146
Accuracy Score: 0.8982346241457859
AUC 0.8982346241457859
AUC2 0.9598637927366505
Loading dojo bigM from file..

Loading merge errors p < .05 and split errors p > .95 from file..
   Mean VI improvement -0.007924602725132734
   Median VI improvement -0.008273382786244898
0.353229045868 seconds

In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]: