In [1]:
%matplotlib inline
%pylab inline


Populating the interactive namespace from numpy and matplotlib

Load Results


In [2]:
from alabortijcv2015.utils import pickle_load

path = '/Users/joan/'

noise_std = [None, 0.0, 0.02, 0.04, 0.06]

global_pwa = []
global_tps = []
patch = []
linear_global_pwa = []
linear_global_tps = []
linear_patch = []
parts = []

for n in noise_std:
    global_pwa.append(pickle_load(path + 'PhD/Results/ijcv2015/exp1_global_pwa_int_fastsic' + str(n)))
    global_tps.append(pickle_load(path + 'PhD/Results/ijcv2015/exp1_global_tps_int_fastsic' + str(n)))
    patch.append(pickle_load(path + 'PhD/Results/ijcv2015/exp1_patch_int_fastsic' + str(n)))
    linear_global_pwa.append(pickle_load(path + 'PhD/Results/ijcv2015/exp1_linear_global_pwa_int_fastsic' + str(n)))
    linear_global_tps.append(pickle_load(path + 'PhD/Results/ijcv2015/exp1_linear_global_tps_int_fastsic' + str(n)))
    linear_patch.append(pickle_load(path + 'PhD/Results/ijcv2015/exp1_linear_patch_int_fastsic' + str(n)))
    parts.append(pickle_load(path + 'PhD/Results/ijcv2015/exp1_parts_int_fastsic' + str(n)))

results = [global_pwa, global_tps, patch, linear_global_pwa, linear_global_tps, linear_patch, parts]

Experiment 1


In [8]:
legend_entries = ['Global-PWA',
                  'Global-TPS',
                  'Patch',
                  'Linear-Global-PWA',
                  'Linear-Global-TPS',
                  'Linear-Patch',
                  'Parts']

In [9]:
all_errors = []

for fitter_results in results:
    mean_errors = []
    for frs in fitter_results:
        errors = [fr.final_error() for fr in frs]
        mean_error = np.mean(errors)
        mean_errors.append(mean_error)
    all_errors.append(mean_errors)
    
all_errors = np.asarray(all_errors)

plot([-0.01, 0.0, 0.02, 0.04, 0.06], all_errors[0], linewidth=2, color='b', marker='o', ms=10, mec='b', mfc='b')
plot([-0.01, 0.0, 0.02, 0.04, 0.06], all_errors[1], linewidth=2, color='g', marker='<', ms=10, mec='g', mfc='g')
plot([-0.01, 0.0, 0.02, 0.04, 0.06], all_errors[2], linewidth=2, color='y', marker='>', ms=10, mec='y', mfc='y')
plot([-0.01, 0.0, 0.02, 0.04, 0.06], all_errors[3], linewidth=2, color='m', marker='v', ms=10, mec='m', mfc='m')
plot([-0.01, 0.0, 0.02, 0.04, 0.06], all_errors[4], linewidth=2, color='c', marker='^', ms=10, mec='c', mfc='c')
plot([-0.01, 0.0, 0.02, 0.04, 0.06], all_errors[5], linewidth=2, color='black', marker='d', ms=10, mec='black', mfc='black')
plot([-0.01, 0.0, 0.02, 0.04, 0.06], all_errors[6], linewidth=2, color='r', marker='s', ms=10, mec='r', mfc='r')
legend(legend_entries, loc='best')
#xlim((0, 0.06))
#ylim((0, 0.1))
xlabel('Initialization')
ylabel('Mean Normalized Point-to-Point Error')
plt.gcf().set_size_inches(np.asanyarray((7, 5))*2)


Explore Results

CED per Algorithm


In [58]:
j = 1

errors = [[fr.initial_error() for fr in results[j][0]]]
for fitter_results in results[j]:
    errors.append([fr.final_error() for fr in fitter_results])

In [59]:
from menpofit.visualize import plot_ced

legend_entries = ['gt-projection',
                  'gt',
                  '0.00',
                  '0.02',
                  '0.04',
                  '0.06']

plot_ced(errors[1:], legend_entries=legend_entries[1:])


Statistics per Algorithm


In [60]:
from __future__ import division

print '\t', 'Mean \t', 'STD \t', 'Median \t', 'Convergence \t'

for err, method in zip(errors, legend_entries):
    print method, '\t', 
    print np.round(np.mean(err), decimals=4), '\t', 
    print np.round(np.std(err), decimals=4), '\t', 
    print np.round(np.median(err), decimals=4), '\t',
    
    c = 0
    for e, ini_e in zip(err, errors[0]):
        if e < ini_e:
            c+=1
        
    print np.round(c / len(err), decimals=4)


	Mean 	STD 	Median 	Convergence 	
gt-projection 	0.0247 	0.0066 	0.0238 	0.0
gt 	0.033 	0.0159 	0.0288 	0.2455
0.00 	0.0365 	0.0231 	0.0302 	0.2277
0.02 	0.0389 	0.0271 	0.0309 	0.2247
0.04 	0.046 	0.0357 	0.0344 	0.1845
0.06 	0.0616 	0.0501 	0.0412 	0.1503

In [ ]: