In [1]:
import sys
sys.argv.append('--quiet')
sys.argv.append('--leave-mpl-backend-alone')
%pylab inline
from ibeis.all_imports import *
ibs = ibeis.opendb('PZ_Mothers')
print(ibs.get_infostr())
qaid_list = ibs.get_aids_with_groundtruth()
print('len(qaid_list) = %r' % len(qaid_list))


Populating the interactive namespace from numpy and matplotlib
[pt] LEAVE THE BACKEND ALONE !!! was specified
[pt] not changing mpl backend
[!plottool] matplotlib has already been initialized
[ibs._init_dirs] ibs.dbdir = '/media/Store/data/work/PZ_Mothers'

    workdir = '/media/Store/data/work'
    dbname = 'PZ_Mothers'
    num_images = 118
    num_annotations = 119
    num_names = 42
    
len(qaid_list) = 105

In [2]:
clamp_gt = utool.get_arg('--clamp-gt', int, 1)
clamp_ft = utool.get_arg('--clamp-gf', int, 1)
num_samp = utool.get_arg('--num-samples', int, 5)
#
# Determanism
seed_ = 143039
np.random.seed(seed_)
#
# List of database sizes to test
samp_min, samp_max = (2, ibs.get_num_names())
dbsamplesize_list = utool.sample_domain(samp_min, samp_max, num_samp)
#
# Sample true and false matches for every query annotation
qaid_trues_list = ibs.get_annot_groundtruth_sample(qaid_list, per_name=clamp_gt)
qaid_falses_list = ibs.get_annot_groundfalse_sample(qaid_list, per_name=clamp_ft)
#
# Vary the size of the falses
def generate_varied_falses():
    for false_aids in qaid_falses_list:
        false_sample_list = []
        for dbsize in dbsamplesize_list:
            if dbsize > len(false_aids):
                continue
            false_sample = np.random.choice(false_aids, dbsize, replace=False).tolist()
            false_sample_list.append(false_sample)
        yield false_sample_list
qaid_false_samples_list = list(generate_varied_falses())

print('false sample checksum: %r' % sum(list(map(sum, qaid_false_samples_list))))
print('Ground Truth stats: ')
print(utool.dict_str(utool.mystats(map(len, qaid_trues_list))))
print('Ground False stats: ')
print(utool.dict_str(utool.mystats(map(len, qaid_falses_list))))


false sample checksum: 578650
Ground Truth stats: 
{
    'max': 1.0,
    'min': 1.0,
    'mean': 1.0,
    'std': 0.0,
    'nMin': 105,
    'nMax': 105,
    'shape': '(105,)',
}
Ground False stats: 
{
    'max': 40.0,
    'min': 40.0,
    'mean': 40.0,
    'std': 0.0,
    'nMin': 105,
    'nMax': 105,
    'shape': '(105,)',
}

In [15]:
#
# Get a rough idea of how many queries will be run
nTotal = sum([len(false_aids_samples) * len(true_aids)
              for true_aids, false_aids_samples
              in zip(qaid_false_samples_list, qaid_trues_list)])
# Create a progress marking function
progkw = {'nTotal': nTotal, 'flushfreq': 20, 'approx': False}
mark_, end_ = utool.log_progress('[upscale] progress: ',  **progkw)
count = 0
# output containers
upscores_dict = utool.ddict(lambda: utool.ddict(list))


[upscale] progress:   0/525

In [16]:
#
# Set up and run test iterations
input_iter = zip(qaid_list, qaid_trues_list, qaid_false_samples_list)
for qaid, true_aids, false_aids_samples in input_iter:
    #print('qaid = %r' % (qaid,))
    #print('true_aids=%r' % (true_aids,))
    # For each true match and false sample
    for gt_aid, false_sample in utool.iprod(true_aids, false_aids_samples):
        #print('  gt_aid=%r' % (gt_aid,))
        #print('  false_sample=%r' % (false_sample,))
        #mark_(count) 
        #print('')
        count += 1
        # Execute query
        daids = false_sample + [gt_aid]
        qres = ibs._query_chips([qaid], daids)[qaid]
        # Elicit information
        score = qres.get_gt_scores(gt_aids=[gt_aid])[0]
        # Append result
        upscores_dict[(qaid, gt_aid)]['dbsizes'].append(len(false_sample))
        upscores_dict[(qaid, gt_aid)]['score'].append(score)
end_()




In [5]:
colors = df2.distinct_colors(len(upscores_dict))
df2.figure(fnum=1, doclf=True, docla=True)
for ix, ((qaid, gt_aid), upscores) in enumerate(upscores_dict.items()):
    xdata = upscores['dbsizes']
    ydata = upscores['score']
    df2.plt.plot(xdata, ydata, 'o-', color=colors[ix])
figtitle = 'Effect of Database Size on Match Scores'
figtitle += '\n' + ibs.get_dbname()
#figtitle += '\n' + ibs.cfg.query_cfg.get_cfgstr()
df2.set_figtitle(figtitle, font='large')
df2.set_xlabel('# Annotations in database')
df2.set_ylabel('Groundtruth Match Scores (annot-vs-annot)')
df2.dark_background()



In [17]:
dbsample_index = 0  # which xtick you are sorting by
line_index = 0   # which datapoint you want

highscore = 0
highpair = None
none_pairs = []
pair_list  = []
score_list = []
for pair, dict_ in six.iteritems(upscores_dict):
    scores = dict_['score']
    if any([s is None for s in scores]):
        none_pairs.append(pair)
    if dbsample_index >= len(scores):
        continue
    score = scores[dbsample_index]
    if score is None:
        continue
    score_list.append(score)
    pair_list.append(pair)

print('score_stats')
utool.print_mystats(score_list)

sorted_tups = sorted(list(zip(score_list, pair_list)))
print(sorted_tups[0])
print(sorted_tups[-1])

qaid, gt_aid = sorted_tups[line_index][1]
print('qaid = %r' % qaid)
print('gt_aid = %r' % gt_aid)
index = qaid_list.index(qaid)
print(index)
false_aids_samples = qaid_false_samples_list[index]
false_sample = false_aids_samples[dbsample_index]
print(false_sample)
daids = false_sample + [gt_aid]
qres = ibs._query_chips([qaid], daids)[qaid]


score_stats
{
    'max': 38889032.0,
    'min': 1458666.0,
    'mean': 7737964.5,
    'std': 5200598.0,
    'nMin': 1,
    'nMax': 1,
    'shape': '(104,)',
}
(1458666.0, (18, 17))
(38889032.0, (46, 47))
qaid = 18
gt_aid = 17
15
[40, 106]

In [7]:
fig = qres.show_analysis(ibs, annote_mode=1, N=3)


[show_qres_analysis][show_qres] qres.show_analysis()
[show_qres_analysis][analysis] showing top aids

In [8]:
fig = qres.show_top(ibs, annote_mode=0, N=1)



In [9]:
fig = qres.show_top(ibs, annote_mode=0, N=6)



In [10]:
fig = qres.show_top(ibs, annote_mode=1, N=6)



In [10]: