In [ ]:
%load_ext autoreload
%autoreload 2
model_filepath='hog_svm_daimler_model.pkl'
print("Loading model from %s..." % model_filepath)
from sklearn.externals import joblib
model = joblib.load(model_filepath) 
print("Model loaded: "+str(model))

Read the metadata for the test dataset


In [ ]:
import daimler
import os
basepath="/media/data/datasets/pedestrian/daimler_mono/DaimlerBenchmark/"
metadata_filepath=os.path.join(basepath,"GroundTruth/GroundTruth2D.db")
test_images_filepath=os.path.join(basepath,"Data/TestData")
metadata,object_names=daimler.read_image_metadata(metadata_filepath)

# select a subset of the dataset (the full dataset takes hours to predict on)
metadata=metadata[2850:2900]

print("Loaded metadata for %d test images." % len(metadata))

Detect pedestrians in test images using a sliding window with the SVM classifier previously trained


In [69]:
import time
import skimage.io as io
import numpy as np
import experiment
from multiprocessing import Pool
pool = Pool(4) 
    
descriptor_function= lambda image: experiment.my_hog(image)
n=len(metadata)
predictions={}
i=0
print("Evaluating on test set...")

def classify_window(window):
    bbox,descriptor=window
    klass=model.predict(descriptor.reshape(1, -1))
    return (bbox,klass)
def keep_pedestrian_bbs(prediction):
    bb,r=prediction
    return r==1

for image_metadata in metadata:
    image_filepath=os.path.join(test_images_filepath,image_metadata.filename)
#     t = time.time()
    image=io.imread(image_filepath)
#     print("Read image %d ms" %  (time.time() - t))
    # calculate hog for each window in the image
    hog_windows=experiment.calculate_descriptor_windows(image,descriptor_function,window_scales=[(96,48)],window_strides=(12,12))
    
#     print("Hog windows %d ms" %  (time.time() - t))
    # classify windows
    image_predictions=  map(classify_window, hog_windows)
    # keep only pedestrian bounding boxes
    image_predictions = filter(keep_pedestrian_bbs,image_predictions)
    image_predictions = list(image_predictions)                                    
#     print("Predictions %d ms" %  (time.time() - t))
    predictions[image_metadata.filename]=image_predictions
    i+=1
    if i % (n//20+1) ==0:
        print("  %f .." % (i/n*100))
    
print("Done.")


  90.000000 ..
  96.000000 ..
Done.

In [ ]:
# save the predictions for future analysis
import pickle
pickle.dump( predictions, open( "predictions.p", "wb" ) )

Visualize results for all or some test images


In [ ]:
import matplotlib.pyplot as plt

plt.rcParams['image.cmap'] = 'gray'
import matplotlib.patches as patches
import daimler

for image_index in range(0,n,1):
    image_metadata=metadata[image_index]
    image_filepath=os.path.join(test_images_filepath,image_metadata.filename)
    image= np.expand_dims(io.imread(image_filepath), axis=2)
    prediction=predictions[image_metadata.filename]

    daimler.display_image_with_bounding_boxes(image[:,:,0],image_metadata)
    print(prediction)
    for (bb,confidence) in prediction:
        p = patches.Rectangle((bb.c,bb.r), bb.w, bb.h, fill=False,color="green")
        plt.gca().add_patch(p)

    plt.title(image_metadata.filename+" detected pedestrians in green")
    plt.show()

In [ ]: