In [ ]:
import numpy as np
from keras.applications.resnet50 import preprocess_input
from keras.applications.resnet50 import ResNet50
from keras.models import Model
from keras.models import model_from_json
from keras.preprocessing import image

In [ ]:
from pelops.datasets.featuredataset import FeatureDataset
from pelops.datasets.veri import VeriDataset
import pelops.utils as utils
from itertools import product
from tqdm import tqdm
from pelops.experiment_api.experiment import ExperimentGenerator
from collections import defaultdict

In [ ]:
def load_image(img_path, resizex=224, resizey=224):
    data = image.load_img(img_path, target_size=(resizex, resizey))
    x = image.img_to_array(data)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    return x

def load_model_workaround(model_file, weight_file):
    # load json and create model
    json_file = open(model_file, 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    loaded_model = model_from_json(loaded_model_json)
    # load weights into new model
    loaded_model.load_weights(weight_file)
    return loaded_model

In [ ]:
def comp_siamese(feats):
    same_vehicle = feats[0]
    return same_vehicle

# do the comparisons between chips
# cam1 - listing of chips seen at cam1
# cam2 - listing of chips seen at cam2
# comparison - function to compare 2 vectors should return small things
#              when comparison is close, large otherwise
# verbose - return more info if true
def is_correct_match(predict,
                     cam1,
                     cam2,
                     comparison=comp_siamese, verbose=False):
    similarities = []
    for cam1_chip in cam1:
        left = cam1_chip.filepath
        left_image = load_image(left)
        for cam2_chip in cam2:
            right = cam2_chip.filepath
            right_image = load_image(right)
            feat = predict([left_image,right_image])
            similarity = comparison(feat.squeeze())
            similarities.append((similarity, cam1_chip, cam2_chip))
    similarities.sort(reverse=True) #GROSSMAN
    for i, (similarity, chip1, chip2) in enumerate(similarities):
        # return best_match
        if chip1.car_id == chip2.car_id:
            if verbose:
                return i, similarities
            else:
                return i
    raise ValueError("Huh?")


# do EXPPERCMC, determine
# featureData - big table to look up data
# experimentGen  - function to create experiments
# EXPPERCMC - number of experiments to run for a single CMC
# comparison - function to compare 2 feature vectors
def pre_cmc(predict, experimentGen,
            EXPPERCMC=100, comparison=comp_siamese):

    num_downs = defaultdict(int)
    for i in range(EXPPERCMC):
        a = experimentGen.generate()
        num_down = is_correct_match(predict, a[0], a[1],
                                    comparison=comparison)
        num_downs[num_down] += 1

    keys = sorted(num_downs)
    vals = [num_downs[key] for key in keys]
    return((keys, np.array(vals)/EXPPERCMC))


# Generate unprocessed CMC curves
# the data needs to be summed to make the correct
# CMC curve
# featureData - FeatureDataset of chips
# experimentGen - ExperimentGenerator
# NUMCMC - number of CMC to build
# EXPPERCMC - number of experiments run per CMC
# comparison - function that compares two feature vectors returning
#              distance measure, 0 -> close  big -> far
def repeat_pre_cmc(predict, experimentGen, NUMCMC=100,
                   EXPPERCMC=100, comparison=comp_siamese):
    experimentHolder = []
    for experiment in tqdm(range(NUMCMC)):
        experimentHolder.append(pre_cmc(predict, experimentGen,
                                        EXPPERCMC=EXPPERCMC,
                                        comparison=comparison))
    return experimentHolder


# finalize creation of the CMC curves
# generate statistics on the CMC curves
# return all
# experimentHolder - array of CMC curves
# itemsPerCamera - number of items on a camera
def make_cmc_stats(experimentHolder, itemsPerCamera):
    comparisons = itemsPerCamera*itemsPerCamera
    stats = np.zeros((len(experimentHolder), comparisons))

    for index, (keys, vals) in enumerate(experimentHolder):
        for keyIndex in range(len(keys)):
            stats[index, keys[keyIndex]] = vals[keyIndex]

    for index in range(len(stats[:, ])):
        total_sum = 0.0
        offsetlen = len(stats[0])
        for sample in range(offsetlen):
            total_sum += stats[index, sample]
            stats[index, sample] = total_sum

    gdata = np.zeros((3, comparisons))

    for i in range(comparisons):
        gdata[1, i] = np.average(stats[:, i])
    for i in range(comparisons):
        stddev = np.std(stats[:, i])
        gdata[0, i] = gdata[1, i] - stddev
        gdata[2, i] = gdata[1, i] + stddev

    return (stats, gdata)

In [ ]:
model = load_model_workaround('/local_data/dgrossman/model_save_dir/VeRi-siamese-weekend.model.json', 
                              '/local_data/dgrossman/model_save_dir/VeRi-siamese-weekend.weights.hdf5')

In [ ]:
model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

In [ ]:
ITEMSPERCAMERA = 10
YRANDOM=1024
CAMERAS=2
DROPPED=0
CMC=10
EXPERIMENTS=100

In [ ]:
veri_train = VeriDataset('/local_data/dgrossman/VeRi',set_type = utils.SetType.TRAIN.value)
expGen_validate = ExperimentGenerator(veri_train, CAMERAS, ITEMSPERCAMERA, DROPPED, YRANDOM)

In [ ]:
experimentHolder = repeat_pre_cmc(model.predict,expGen_validate,NUMCMC=CMC,EXPPERCMC=EXPERIMENTS)
stats,gdata = make_cmc_stats(experimentHolder,ITEMSPERCAMERA)

In [ ]:
%matplotlib inline
import matplotlib.pyplot as plt

#make the plots
fig = plt.figure()
ax = plt.subplot(111)

ax.plot(gdata.transpose())
plt.title('{} CMC curves with {} experiments per curve'.format(CMC,EXPERIMENTS))
ax.legend(('-stddev','avg','+stddev'),bbox_to_anchor=(1, -0.05),
          fancybox=True, shadow=True, ncol=5)