Faces recognition using LLE and SVMs

The dataset used in this example is a preprocessed excerpt of the "Labeled Faces in the Wild", aka LFW_:

http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)

LFW: http://vis-www.cs.umass.edu/lfw/


In [1]:
%matplotlib inline
from time import time
import logging
import matplotlib.pyplot as plt

import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import fetch_lfw_people
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import PCA
from sklearn.svm import SVC
from sklearn import manifold

print(__doc__)

# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')

lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)

# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape

# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]

# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]

print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)

# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(
    X, y, test_size=0.25, random_state=42)


2017-03-19 22:28:27,557 Loading LFW people faces from /home/chandu/scikit_learn_data/lfw_home
Automatically created module for IPython interactive environment
Total dataset size:
n_samples: 1288
n_features: 1850
n_classes: 7

In [2]:
methods = ['standard', 'ltsa', 'hessian', 'modified']
accuracies = []
components = []
neighbors = []
n_components = 26
n_neighbors = 27

lle = manifold.LocallyLinearEmbedding(n_neighbors, n_components,eigen_solver='auto',method=methods[0])

X_train_changed = lle.fit_transform(X_train)
X_test_changed = lle.fit_transform(X_test)
param_grid = {'C': [1,1e1,1e2,5e2,1e3, 5e3, 1e4, 5e4, 1e5],
                              'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_changed, y_train)
y_pred = clf.predict(X_test_changed)

accuracies.append(float(np.sum(y_test==y_pred))/len(y_pred))
components.append(n_components)
neighbors.append(n_neighbors)

print('For '+str(n_components)+' components '+str(n_neighbors)+' neighbors'+', accuracy is '+str(float(np.sum(y_test==y_pred))/len(y_pred))+' confusion matrix is: ')
print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
print(classification_report(y_test, y_pred, target_names=target_names))


For 26 components 27 neighbors, accuracy is 0.33850931677 confusion matrix is: 
[[ 1  1  1  8  1  0  1]
 [ 2  8  3 39  5  1  2]
 [ 0  3  3 12  5  0  4]
 [ 4 14  1 91 27  3  6]
 [ 0  0  0 22  2  1  0]
 [ 0  1  0  8  5  1  0]
 [ 1  0  2 21  9  0  3]]
                   precision    recall  f1-score   support

     Ariel Sharon       0.12      0.08      0.10        13
     Colin Powell       0.30      0.13      0.18        60
  Donald Rumsfeld       0.30      0.11      0.16        27
    George W Bush       0.45      0.62      0.52       146
Gerhard Schroeder       0.04      0.08      0.05        25
      Hugo Chavez       0.17      0.07      0.10        15
       Tony Blair       0.19      0.08      0.12        36

      avg / total       0.32      0.34      0.31       322


In [ ]: