Descriptors


In [1]:
%pylab inline

from tools import *
from cpp_wrapper.descriptors import *


Populating the interactive namespace from numpy and matplotlib

Preprocessing

Crop the already aligned images (lfw-a dataset) to 150x70 images as done in high-dim LBP.


In [2]:
lfw_img = np.load("../data/lfw/lfw.npy")[2]
lfwa_img = np.load("../data/lfw/lfwa.npy")[2]
cropped_img = lfwa_img[49:201,84:166]
grid_img = np.copy(cropped_img)
for i in range(11, 81, 10):
    grid_img[:,i] = 0
for i in range(11, 151, 10):
    grid_img[i,:] = 0

imgs = [lfw_img, lfwa_img, cropped_img, grid_img]
for i, img in enumerate(imgs):
    import cv2
    cv2.imwrite("img_%d.png" % i, img)
showMosaic(imgs, ncols=4, color="gray")



In [3]:
raw_imgs = np.load("../data/lfw/lfwa.npy")[:7]

In [4]:
imgs = [img[49:201,84:166] for img in raw_imgs]
showMosaic(imgs, ncols=7, color="gray")
print imgs[0].shape


(152, 82)

In [5]:
imgs2 = [img[44:206,62:188] for img in raw_imgs]
showMosaic(imgs2, ncols=7, color="gray")
print imgs2[0].shape


(162, 126)

LBP descriptors overview


In [14]:
def descriptorsTest(descriptor_type):
    descriptors = [descriptor_type.compute(np.asarray(img), normalize=False) for img in imgs]

    print "Total descriptor length : %i"%(descriptors[0].shape)

Simple LBP

Basic LBP descriptor


In [15]:
descriptor_type = LbpDescriptor("lbp")
descriptorsTest(descriptor_type)


Total descriptor length : 30720

Uniform LBP

Uniform LBP descriptor keeping only 59 bins in the histograms instead of 256.


In [16]:
descriptor_type = LbpDescriptor("ulbp")
descriptorsTest(descriptor_type)


Total descriptor length : 7080

Uniform LBP + PCA

Adding a PCA space transformation after computing the uniform LBP descriptor.


In [17]:
pca = Pca(filename="../benchmarks/lfw/models/PCA/ulbp_lfwa/set_1.txt")

In [18]:
descriptor_type = LbpDescriptor("ulbp_pca", pca=pca)
descriptorsTest(descriptor_type)


Total descriptor length : 200

Uniform LBP + WPCA

Adding a whitening step after the PCA (=> WPCA)


In [19]:
descriptor_type = LbpDescriptor("ulbp_wpca", pca=pca)
descriptorsTest(descriptor_type)


Total descriptor length : 200

Uniform LBP + PCA + LDA

Adding a supervised learning step after the PCA


In [20]:
lda = Lda("../benchmarks/lfw/models/LDA/ulbp_pca_lfwa/set_1.txt")

In [21]:
descriptor_type = LbpDescriptor("ulbp_pca_lda", pca=pca, lda=lda)
descriptorsTest(descriptor_type)


Total descriptor length : 50

Uniform LBP + PCA + JB

Replacing LDA with JB


In [25]:
class LbpJbDescriptor:
    
    def __init__(self, pca, jb):
        self.descriptor = LbpDescriptor("ulbp_pca", pca=pca)
        self.jb = jb
        
    def compute(self, x, normalize=False):
        desc = self.descriptor.compute(x, normalize=False)
        return self.jb.transform(desc[np.newaxis,:])[0]

In [26]:
from utils.file_manager import pickleLoad

jb = pickleLoad("../benchmarks/lfw/models/JB/ulbp_pca_lfwa/set_1.txt")

In [27]:
descriptor_type = LbpJbDescriptor(pca, jb)
descriptorsTest(descriptor_type)


Total descriptor length : 201

Results


In [28]:
descriptor_types = [LbpDescriptor("lbp"), LbpDescriptor("ulbp"), LbpDescriptor("ulbp_pca", pca=pca), LbpDescriptor("ulbp_wpca", pca=pca), LbpDescriptor("ulbp_pca_lda", pca=pca, lda=lda), LbpJbDescriptor(pca, jb)]

Speed comparaison


In [29]:
for descriptor_type in descriptor_types:
    %timeit -n 1000 descriptor_type.compute(imgs[0])


1000 loops, best of 3: 132 µs per loop
1000 loops, best of 3: 87.8 µs per loop
1000 loops, best of 3: 265 µs per loop
1000 loops, best of 3: 261 µs per loop
1000 loops, best of 3: 263 µs per loop
1000 loops, best of 3: 336 µs per loop

ROC curves


In [24]:
from itertools import imap
from stats import *
from datasets import lfw
from benchmarks import lfw as lfw_bench
from learning.joint_bayesian import jointBayesianDistance


sets_ground_truth = lfw.loadSetsGroundTruth()

descs_files = ["lbp_lfwa", "ulbp_lfwa", "ulbp_pca_lfwa", "ulbp_wpca_lfwa", "ulbp_pca_lda_lfwa"]
scores = [lfw_bench.computeDistanceMatrix(descs, sets_ground_truth, distance=np.inner) for descs in imap(lfw_bench.loadDescriptors, descs_files)]
rocs = [lfw_bench.computeMeanROC(score) for score in scores]

jb_descs = lfw_bench.loadDescriptors("ulbp_pca_jb_not_normalized_lfwa")
scores.append(lfw_bench.computeDistanceMatrix(jb_descs, sets_ground_truth, distance=jointBayesianDistance))
rocs.append(lfw_bench.computeMeanROC(scores[-1]))

In [27]:
labels = ["LBP", "Uniform LBP", "Uniform LBP with PCA", "Uniform LBP with WPCA", "Uniform LBP with PCA + LDA", "Uniform LBP with PCA + JB"]

plotROC(rocs, labels, "Comparative ROC of different methods")



In [21]:
for label, score in zip(labels, scores):
    mean, std = lfw_bench.computeMeanAccuracy(score)
    print "%s: %0.4f +/- %0.4f"%(label, mean, std)


LBP: 0.7215 +/- 0.0048
Uniform LBP: 0.7208 +/- 0.0040
Uniform LBP with PCA: 0.7322 +/- 0.0035
Uniform LBP with WPCA: 0.7805 +/- 0.0024
Uniform LBP with PCA + LDA: 0.8203 +/- 0.0072
Uniform LBP with PCA + JB: 0.8265 +/- 0.0052

Scores distribution


In [22]:
prev_figsize = pylab.rcParams['figure.figsize']
pylab.rcParams['figure.figsize'] = (14, 10)

for i,((matches, mismatches), label) in enumerate(zip(scores, labels)):
    if i != 5:
        r = (-0.6, 1.0)
    else:
        r = (-140, 60)
        
    subplot(len(scores)/2, 3, i+1)
    
    array = np.asarray(matches).ravel()
    weights = np.ones_like(array)/len(array)
    positive, _ ,_ = hist(array, weights=weights, bins=150, range=r, histtype="step", color="g")
    
    array = np.asarray(mismatches).ravel()
    weights = np.ones_like(array)/len(array)
    negative, _, _ = hist(array, weights=weights, bins=150, range=r, histtype="step", color="r")

    print np.sum(np.min([positive, negative], axis=0))*100
    title(label)
    xlim(*r)

pylab.rcParams['figure.figsize'] = prev_figsize


56.8659871758
57.132640481
54.8983454704
44.5656329393
36.4655822515
34.9333333333

In [23]:
from scipy import stats

prev_figsize = pylab.rcParams['figure.figsize']
pylab.rcParams['figure.figsize'] = (16, 10)

for i,((matches, mismatches), label) in enumerate(zip(scores, labels)):
    if i != 5:
        xx = np.linspace(-0.6, 1.0, num=100)
    else:
        xx = np.linspace(-140, 60, num=100)
    subplot(len(scores)/2, 3, i+1)

    kde = stats.gaussian_kde(np.asarray(matches).ravel())
    plot(xx, kde(xx), "g")
    kde = stats.gaussian_kde(np.asarray(mismatches).ravel())
    plot(xx, kde(xx), "r")
    
    title(label)
    xlim(xx.min(), xx.max())
    
pylab.rcParams['figure.figsize'] = prev_figsize



In [24]:
from scipy import stats


prev_figsize = pylab.rcParams['figure.figsize']
pylab.rcParams['figure.figsize'] = (14, 6)

for i,((matches, mismatches), label) in enumerate(zip(scores, labels)):
    if i != 5:
        xx = np.linspace(-0.6, 1.0, num=500)
    else:
        xx = np.linspace(-140, 60, num=500)
    subplot(len(scores)/3, 3, i+1)

    mu1, sigma1 = stats.norm.fit(np.asarray(matches).ravel())
    pdf1 = stats.norm.pdf(xx, mu1, sigma1) #/ (sqrt(2*np.pi) * sigma1)
    plot(xx, pdf1, "g")
    mu2, sigma2 = stats.norm.fit(np.asarray(mismatches).ravel())
    pdf2 = stats.norm.pdf(xx, mu2, sigma2) #/ (sqrt(2*np.pi) * sigma2)
    plot(xx, pdf2, "r")

    fill_between(xx, 0, pdf1, where=pdf1<=pdf2, color="none", edgecolor="b", hatch="x", linewidth=0.0)
    fill_between(xx, 0, pdf2, where=pdf1>pdf2, color="none", edgecolor="b", hatch="x", linewidth=0.0)
    
    title(label)
    xlim(xx.min(), xx.max())

pylab.rcParams['figure.figsize'] = prev_figsize


BLUFR


In [25]:
from itertools import imap
from stats import *
from datasets import lfw
from benchmarks import lfw as lfw_bench
from learning.joint_bayesian import jointBayesianDistance
from benchmarks import blufr


labels = blufr.loadLabels()
descriptors, gallery_indexes, probe_indexes = blufr.loadTestDescriptors("ulbp_wpca_lfwa")


---------------------------------------------------------------------------
IOError                                   Traceback (most recent call last)
<ipython-input-25-853b38f2f8fe> in <module>()
      8 
      9 labels = blufr.loadLabels()
---> 10 descriptors, gallery_indexes, probe_indexes = blufr.loadTestDescriptors("ulbp_wpca_lfwa")

/home/tlorieul/Dev/Snoop/src/lib/Python/lib/benchmarks/blufr.py in loadTestDescriptors(descriptor_type)
     47 
     48     for i in range(sets_number):
---> 49         descriptors.append(np.load(os.path.join(config.blufr_benchmark_path, "test", descriptor_type, "set_%d.npy" % (i+1))))
     50         gallery_indexes.append(globalToLocalIndex(blufr["indexes"][blufr["gallery"][i]-1], test_sets[i]))
     51         probe_indexes.append(globalToLocalIndex(blufr["indexes"][blufr["probe_images"][i]-1], test_sets[i]))

/usr/lib64/python2.7/site-packages/numpy/lib/npyio.pyc in load(file, mmap_mode)
    367     own_fid = False
    368     if isinstance(file, basestring):
--> 369         fid = open(file, "rb")
    370         own_fid = True
    371     elif isinstance(file, gzip.GzipFile):

IOError: [Errno 2] No such file or directory: '/home/tlorieul/Dev/Snoop/src/lib/Python/benchmarks/blufr/test/ulbp_wpca_lfwa/set_1.npy'

In [ ]:
roc = blufr.computeOpenSetIdentificationROC(descriptors[0], labels, gallery_indexes[0], probe_indexes[0])

In [26]:
print roc[0]
print roc[1]


---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-26-001a2c2dd7ed> in <module>()
----> 1 print roc[0]
      2 print roc[1]

NameError: name 'roc' is not defined

In [5]:
plotROC([roc], ["lal"], "oklk")



In [ ]:
from benchmarks import blufr


sets_ground_truth = lfw.loadSetsGroundTruth()

#descs_files = ["lbp_lfwa", "ulbp_lfwa", "ulbp_pca_lfwa", "ulbp_wpca_lfwa", "ulbp_pca_lda_lfwa"]
descs_files = ["ulbp_wpca_lfwa"]
scores = [lfw_bench.computeDistanceMatrix(descs, sets_ground_truth, distance=np.inner) for descs in imap(blufr.loadTestDescriptors, descs_files)]
rocs = [lfw_bench.computeMeanROC(score) for score in scores]

"""
jb_descs = lfw_bench.loadDescriptors("ulbp_pca_jb_not_normalized_lfwa")
scores.append(lfw_bench.computeDistanceMatrix(jb_descs, sets_ground_truth, distance=jointBayesianDistance))
rocs.append(lfw_bench.computeMeanROC(scores[-1]))
"""

PCA analysis

Variance distribution


In [30]:
import config

pca = Pca(filename=os.path.join(config.lfw_benchmark_path, "models", "PCA", "ulbp_7080_dim.txt"))
eigenvalues = pca.getEigenvalues()
plot(np.cumsum(eigenvalues) / np.sum(eigenvalues))
xlabel("Number of dimensions")
ylabel("Cumulative variance distribution")
xlim(xmax=7080)
grid()



In [31]:
ratio = np.cumsum(eigenvalues) / np.sum(eigenvalues)
print ratio[200-1], ratio[2000-1]


0.692465 0.954837

Output dimension effect


In [20]:
import config

ulbp_descs = lfw_bench.loadDescriptors("ulbp_not_normalized_lfwa")
pca = Pca(filename=os.path.join(config.lfw_benchmark_path, "models", "PCA", "ulbp_7080_dim.txt"))

labels = []
pca_results = []
pca_rocs = []
wpca_results = []
wpca_rocs = []

descs = None
#dims = [50, 100, 200, 400, 800, 2000][::-1]
dims = range(20, 3000, 20)[::-1]
    
for dim in dims:
    if descs is None:
        pca.reduceDimension(dim)
        descs = np.empty((len(ulbp_descs), dim), dtype=np.float32)
        for i in range(len(ulbp_descs)):
            descs[i] = pca.project(ulbp_descs[i])
            
        whitening = np.power(pca.getEigenvalues(), -0.5)
        wpca_descs = np.empty((len(ulbp_descs), dim), dtype=np.float32)
        for i in range(len(ulbp_descs)):
            wpca_descs[i] = whitening * descs[i]
    
    else:
        descs = descs[:, :dim]
        wpca_descs = wpca_descs[:, :dim]
            
    labels.append("%d dimensions" % dim)
    
    scores = lfw_bench.computeDistanceMatrix(descs, sets_ground_truth)
    pca_results.append(lfw_bench.computeMeanAccuracy(scores))
    pca_rocs.append(lfw_bench.computeMeanROC(scores))
    
    scores = lfw_bench.computeDistanceMatrix(wpca_descs, sets_ground_truth)
    wpca_results.append(lfw_bench.computeMeanAccuracy(scores))
    wpca_rocs.append(lfw_bench.computeMeanROC(scores))

In [15]:
for label,(mean,std) in zip(labels, pca_results):
    print "%s: %0.4f +/- %0.4f"%(label, mean, std)
    
plotROC(pca_rocs, labels=labels, title="ROC with PCA compression")


2000 dimensions: 0.7393 +/- 0.0038
800 dimensions: 0.7352 +/- 0.0042
400 dimensions: 0.7322 +/- 0.0047
200 dimensions: 0.7233 +/- 0.0050
100 dimensions: 0.7098 +/- 0.0050
50 dimensions: 0.6940 +/- 0.0046

In [16]:
for label,(mean,std) in zip(labels, wpca_results):
    print "%s: %0.4f +/- %0.4f"%(label, mean, std)
    
plotROC(wpca_rocs, labels=labels, title="ROC with WPCA compression")


2000 dimensions: 0.7663 +/- 0.0063
800 dimensions: 0.7773 +/- 0.0064
400 dimensions: 0.7788 +/- 0.0060
200 dimensions: 0.7630 +/- 0.0053
100 dimensions: 0.7442 +/- 0.0050
50 dimensions: 0.7273 +/- 0.0052

In [28]:
plot(dims, zip(*pca_results)[0], label="Uniform LBP + PCA")
plot(dims, zip(*wpca_results)[0], label="Uniform LBP + WPCA")
xlabel("Dimensions")
ylabel("Mean accuracy")
legend(loc="lower right")



In [ ]: