In [1]:
import matplotlib
matplotlib.use('agg')
import cPickle as pickle
import os; import sys; sys.path.append('../../')
import gp
import gp.nets as nets
In [2]:
from sklearn.metrics import classification_report, accuracy_score, roc_curve, auc, precision_recall_fscore_support, f1_score, precision_recall_curve, average_precision_score, zero_one_loss
import numpy as np
import time
In [3]:
def r_d(cnn):
# load dojo data
input_image, input_prob, input_gold, input_rhoana, dojo_bbox = gp.Legacy.read_dojo_data()
original_mean_VI, original_median_VI, original_VI_s = gp.Legacy.VI(input_gold, input_rhoana)
# output folder for anything to store
output_folder = '/home/d/netstatsCVPR2018/'+cnn.uuid+'/'
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# # find merge errors, if we did not generate them before
# merge_error_file = output_folder+'/merges_new_cnn.p'
# if os.path.exists(merge_error_file):
# print 'Loading merge errors from file..'
# with open(merge_error_file, 'rb') as f:
# merge_errors = pickle.load(f)
# else:
# print 'Finding Top 5 merge errors..'
# merge_errors = gp.Legacy.get_top5_merge_errors(cnn, input_image, input_prob, input_rhoana)
# with open(merge_error_file, 'wb') as f:
# pickle.dump(merge_errors, f)
# print len(merge_errors), ' merge errors found.'
# we need to create a bigM for the dojo volume
bigM_dojo_file = output_folder + '/bigM_dojo_test2.p'
if os.path.exists(bigM_dojo_file):
print 'Loading dojo bigM from file..'
with open(bigM_dojo_file, 'rb') as f:
bigM_dojo = pickle.load(f)
else:
print 'Creating dojo bigM..'
bigM_dojo = gp.Legacy.create_bigM_without_mask(cnn, input_image, input_prob, input_rhoana, verbose=False)
with open(bigM_dojo_file, 'wb') as f:
pickle.dump(bigM_dojo, f)
print
dojo_vi_95_file = output_folder + '/dojo_vi_95_t6.p'
dojo_merge_vis = output_folder + '/dojo_merge_auto95_vis.p'
dojo_split_vis = output_folder + '/dojo_split_auto95_vis.p'
dojo_merge_fixes = output_folder + '/dojo_merge_auto95_fixes.p'
dojo_split_fixes = output_folder + '/dojo_split_auto95_fixes.p'
dojo_output_95 = output_folder + '/dojo_auto95_output.p'
if os.path.exists(dojo_vi_95_file):
print 'Loading merge errors p < .05 and split errors p > .95 from file..'
with open(dojo_vi_95_file, 'rb') as f:
dojo_vi_95 = pickle.load(f)
else:
# #
# # perform merge correction with p < .05
# #
# print 'Correcting merge errors with p < .05'
# bigM_dojo_05, corrected_rhoana_05, dojo_auto_merge_fixes, vi_s_per_step = gp.Legacy.perform_auto_merge_correction(cnn, bigM_dojo, input_image, input_prob, input_rhoana, merge_errors, .05, input_gold=input_gold)
# print ' Mean VI improvement', original_mean_VI-gp.Legacy.VI(input_gold, corrected_rhoana_05)[0]
# print ' Median VI improvement', original_median_VI-gp.Legacy.VI(input_gold, corrected_rhoana_05)[1]
# with open(dojo_merge_vis, 'wb') as f:
# pickle.dump(vi_s_per_step, f)
# with open(dojo_merge_fixes, 'wb') as f:
# pickle.dump(dojo_auto_merge_fixes, f)
#
# perform split correction with p > .95
#
print 'Correcting split errors with p > .95'
bigM_dojo_after_95, out_dojo_volume_after_auto_95, dojo_auto_fixes_95, dojo_auto_vi_s_95, vi_s_per_step2 = gp.Legacy.splits_global_from_M_automatic(cnn, bigM_dojo, input_image, input_prob, input_rhoana, input_gold, sureness_threshold=.95)
dojo_vi_95 = gp.Legacy.VI(input_gold, out_dojo_volume_after_auto_95)
with open(dojo_vi_95_file, 'wb') as f:
pickle.dump(dojo_vi_95, f)
with open(dojo_split_vis, 'wb') as f:
pickle.dump(vi_s_per_step2, f)
with open(dojo_split_fixes, 'wb') as f:
pickle.dump(dojo_auto_fixes_95, f)
with open(dojo_output_95, 'wb') as f:
pickle.dump(out_dojo_volume_after_auto_95, f)
print ' Mean VI improvement', original_mean_VI-dojo_vi_95[0]
print ' Median VI improvement', original_median_VI-dojo_vi_95[1]
In [ ]:
In [ ]:
In [ ]:
In [4]:
NETS = []
NETS.append('../../nets/PMLB_FULL.p')
print NETS
In [5]:
roc1 = []
roc2 = []
In [6]:
#
# binary + large_border
#
network_path = NETS[0]
with open(network_path, 'rb') as f:
net = pickle.load(f)
X_test, y_test = gp.Patch.load_rgba_test_only('ipmlb')
X_test = np.delete(X_test, [0] , axis=1)
test_prediction = net.predict(X_test)
test_prediction_prob = net.predict_proba(X_test)
print
print 'Precision/Recall:'
print classification_report(y_test, test_prediction)
test_acc = net.score(X_test, y_test)
acc_score = accuracy_score(y_test, test_prediction)
print 'Test Accuracy:', test_acc
print 'Accuracy Score:', acc_score
fpr, tpr, thresholds = roc_curve(y_test, test_prediction)
area = auc(fpr, tpr)
print 'AUC', area
fpr2, tpr2, thresholds = roc_curve(y_test, test_prediction_prob[:,1])
area2 = auc(fpr2, tpr2)
print 'AUC2', area2
roc1.append({'ip':[fpr,tpr,area]})
roc2.append({'ip':[fpr2,tpr2,area2]})
t0 = time.time()
net.uuid = 'PMLB'
r_d(net)
print time.time()-t0, 'seconds'
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]: