In [1]:
%matplotlib inline
%pylab inline


Populating the interactive namespace from numpy and matplotlib

Load Results


In [2]:
from alabortcvpr2015.utils import pickle_load

path = '/data/'
exp = 'afw_fast_dsift'

aam_pic = []
aam_aic = []
clm_rlms = []
unified_pic_rlms = []
unified_aic_rlms = []

for j in xrange(8):
    aam_pic.append(pickle_load(path + 'PhD/Results/aam_pic_' + exp + str(j)))
    aam_aic.append(pickle_load(path + 'PhD/Results/aam_aic_' + exp + str(j)))
    clm_rlms.append(pickle_load(path + 'PhD/Results/clm_rlms_' + exp + str(j)))
    unified_pic_rlms.append(pickle_load(path + 'PhD/Results/unified_picrlms_' + exp + str(j)))
    unified_aic_rlms.append(pickle_load(path + 'PhD/Results/unified_aicrlms_' + exp + str(j)))

results = [aam_pic, aam_aic, clm_rlms, unified_pic_rlms, unified_aic_rlms]

Explore Results

CED per Algorithm


In [3]:
j = 2

initial_errors = [fr.initial_error() for fr in unified_aic_rlms[j]]

final_errors = []
for fitter_results in results[j]:
    final_errors.append([fr.final_error() for fr in fitter_results])
    
errors = [initial_errors] + final_errors

In [4]:
from menpofit.visualize import plot_ced

legend_entries = ['Ini',
                  '32',
                  '64',
                  '128',
                  '256',
                  '512',
                  '1024',
                  '2048',
                  '2946']

plot_ced(errors, legend_entries=legend_entries)


Statistics per Algorithm


In [5]:
from __future__ import division

print '\t', 'Mean \t', 'STD \t', 'Median \t', 'Convergence \t'

for err, method in zip(errors, legend_entries):
    print method, '\t', 
    print np.round(np.mean(err), decimals=4), '\t', 
    print np.round(np.std(err), decimals=4), '\t', 
    print np.round(np.median(err), decimals=4), '\t',
    
    c = 0
    for e, ini_e in zip(err, errors[0]):
        if e < ini_e:
            c+=1
        
    print np.round(c / len(err), decimals=4)


	Mean 	STD 	Median 	Convergence 	
Ini 	0.0996 	0.0354 	0.0942 	0.0
32 	0.053 	0.026 	0.0451 	0.9703
64 	0.0484 	0.0244 	0.0416 	0.9733
128 	0.0453 	0.0238 	0.0387 	0.9792
256 	0.0443 	0.0235 	0.0373 	0.9852
512 	0.0436 	0.0228 	0.0371 	0.9792
1024 	0.043 	0.023 	0.0364 	0.9822
2048 	0.0424 	0.0233 	0.0355 	0.9852
2946 	0.0421 	0.0229 	0.0349 	0.9852

Experiment 2


In [6]:
legend_entries = ['AAM-PIC',
                  'AAM-AIC',
                  'CLM-RLMS',
                  'UNI-PIC-RLMS',
                  'UNI-AIC-RLMS']

In [7]:
it_errors = []

for fitter_results in results:
    all_errors = np.asarray([[fr.final_error() for fr in frs] for frs in fitter_results])
    it_errors.append(np.mean(all_errors, axis=1))
    
it_errors = np.asarray(it_errors)

plot([pow(2,i) for i in range(5, 12)] + [2946], it_errors.T, linewidth=2)
legend(legend_entries)
#xlim((0, 20))
xlabel('Number of Training Image')
ylabel('Mean Normalized Point-to-Point Error')
plt.gcf().set_size_inches((7, 5))