In [1]:
%matplotlib inline
import cPickle
import pickle
import pandas as pd
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from termcolor import colored

In [2]:
def plot_confusion_matrix(y_test,y_pred, classes,
                          normalize=False,
                          title='Confusion matrix',
                          cmap=plt.cm.Blues):
    import pandas as ps
    """
    This function prints and plots the confusion matrix.
    Normalization can be applied by setting `normalize=True`.
    """
    from sklearn.metrics import confusion_matrix
    cm = confusion_matrix(y_test, y_pred)
    
    if normalize:
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
        print("Normalized confusion matrix")
    else:
        print('Confusion matrix, without normalization')

    pd.DataFrame(cm).to_csv("confusion_2.csv", sep=",",header = list(classes))
    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(classes))
    plt.xticks(tick_marks, classes, rotation=45)
    plt.yticks(tick_marks, classes)

    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')

In [3]:
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading

def transform_matrix_offset_center(matrix, x, y):
    o_x = float(x) / 2 + 0.5
    o_y = float(y) / 2 + 0.5
    offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
    reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
    transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
    return transform_matrix

def apply_transform(x, transform_matrix, channel_index=2, fill_mode='nearest', cval=0.):
    x = np.rollaxis(x, channel_index, 0)
    final_affine_matrix = transform_matrix[:2, :2]
    final_offset = transform_matrix[:2, 2]
    channel_images = [ndi.interpolation.affine_transform(x_channel, final_affine_matrix,
                      final_offset, order=0, mode=fill_mode, cval=cval) for x_channel in x]
    x = np.stack(channel_images, axis=0)
    x = np.rollaxis(x, 0, channel_index+1)
    return x


def image_rotation(x, rg,step, row_index=0, col_index=1, channel_index=2,
                    fill_mode='nearest', cval=0.):
    image_set = np.zeros((np.expand_dims(x,axis=0).shape),dtype=np.uint8)
    for i in range(-rg,rg,step):
        theta = (np.pi  * i)/ 180
        rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
                                [np.sin(theta), np.cos(theta), 0],
                                [0, 0, 1]])

        h, w = x.shape[row_index], x.shape[col_index]
        transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
        new = apply_transform(x, transform_matrix, channel_index, fill_mode, cval)
        image_set = np.vstack((image_set,np.expand_dims(new,axis=0)))
    return image_set

def image_shear(x,  row_index=0, col_index=1, channel_index=2,
                 fill_mode='nearest', cval=0.):
    shear = [-0.25,0.25,6.3,9.5,-9.5,2.95,-2.95,0]
    image_set = np.zeros((np.expand_dims(x,axis=0).shape),dtype=np.uint8)
    for i in shear:
        shear_matrix = np.array([[1, -np.sin(i), 0],
                             [0, np.cos(i), 0],
                             [0, 0, 1]])
        h, w = x.shape[row_index], x.shape[col_index]
        transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
        new = apply_transform(x, transform_matrix, channel_index, fill_mode, cval)
        image_set = np.vstack((image_set,np.expand_dims(new,axis=0)))
    return image_set[1:]



def image_zoom(x, zoom_range,step, row_index=0, col_index=1, channel_index=2,
                fill_mode='nearest', cval=0.):
    if len(zoom_range) != 2:
        raise Exception('zoom_range should be a tuple or list of two floats. '
                        'Received arg: ', zoom_range)

    if zoom_range[0] == 1 and zoom_range[1] == 1:
        zx, zy = 1, 1
    else:
        image_set = np.zeros((np.expand_dims(x,axis=0).shape),dtype=np.uint8)
        for zx in np.arange(zoom_range[0],zoom_range[1],step):
            for zy in np.arange(zoom_range[0],zoom_range[1],step):
                zoom_matrix = np.array([[zx, 0, 0],
                            [0, zy, 0],
                            [0, 0, 1]])

                h, w = x.shape[row_index], x.shape[col_index]
                transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
                new = apply_transform(x, transform_matrix, channel_index, fill_mode, cval)
                image_set = np.vstack((image_set,np.expand_dims(new,axis=0)))
    return image_set[1:]

def image_shift(x, wrg, hrg,step ,row_index=0, col_index=1, channel_index=2,
                 fill_mode='nearest', cval=0.):
    h,w = x.shape[row_index],x.shape[col_index]
    image_set = np.zeros((np.expand_dims(x,axis=0).shape),dtype=np.uint8)
    for tx in np.arange(-hrg*h,hrg*h,step):
        for ty in np.arange(-wrg*h,wrg*h,step):
            translation_matrix = np.array([[1, 0, tx],
                                   [0, 1, ty],
                                   [0, 0, 1]])

            transform_matrix = translation_matrix  # no need to do offset
            new = apply_transform(x, transform_matrix, channel_index, fill_mode, cval)
            image_set = np.vstack((image_set,np.expand_dims(new,axis=0)))
    return image_set[1:]

In [4]:
def load_data():
    import pandas as pd
    import numpy as np
    from PIL import Image
    from cv2 import resize
    #from skimage.transform import resize
    from skimage import exposure
    
    train = pd.read_csv('/home/mckc/new///train.csv')
    test = pd.read_csv('/home/mckc/new///test.csv')
    
    #train = train.query('subject in ("Dhruva Murari","Ponraj S","Naresh Raj","Raashi Chhalani","naveen","Praba")')
    #test = test.query('subject in ("Dhruva Murari","Ponraj S","Naresh Raj","Raashi Chhalani","naveen","Praba")')
    print 'the training data shape is ',train.shape
    print 'the test data shape is ', test.shape
    
    X_tr = []
    Y_tr = []
    iteration = 0
    for i in train.values[:,0]:
        image = exposure.equalize_hist(resize(np.array(Image.open(i).convert('L')),(96,96)))
        #print image.shape
        X_tr.append(image)
        Y_tr.append(train.values[iteration,1])
        iteration+=1
        if iteration % 500==0:
            print colored((float(iteration)/len(train.values[:,0])*100 ,' Percentage complete'), 'green')
            
            
    X_ts = []
    Y_ts = []
    iteration = 0
    for i in test.values[:,0]:
        image = exposure.equalize_hist(resize(np.array(Image.open(i).convert('L')),(96,96)))
        X_ts.append(image)
        Y_ts.append(test.values[iteration,1])
        iteration+=1
        if iteration % 500==0:
            print colored((float(iteration)/len(test.values[:,0])*100 ,' Percentage complete'), 'green')
    X_tr,X_ts,Y_tr,Y_ts = np.array(X_tr),np.array(X_ts),np.array(Y_tr),np.array(Y_ts)
    print 'the training file shape',X_tr.shape,Y_tr.shape
    print 'the testing file shape',X_ts.shape,Y_ts.shape
    
    return X_tr,X_ts,Y_tr,Y_ts

In [5]:
def simulate(X,Y):
    import scipy as sp
    import scipy.ndimage
    complete = np.zeros((1,96,96),dtype=np.uint8)
    Y_complete = []
    for i in range(len(X)):
        #complete = np.vstack((complete,X[i,:,:].reshape(1,96,96)))
        complete = np.vstack((complete,scipy.ndimage.rotate(X[i,:,:], angle = 5,reshape=False,cval=255).reshape(1,96,96)))
        complete = np.vstack((complete,scipy.ndimage.rotate(X[i,:,:], angle = 10,reshape=False,cval=255).reshape(1,96,96)))
        complete = np.vstack((complete,scipy.ndimage.rotate(X[i,:,:], angle = 15,reshape=False,cval=255).reshape(1,96,96)))
        complete = np.vstack((complete,scipy.ndimage.rotate(X[i,:,:], angle = -5,reshape=False,cval=255).reshape(1,96,96)))
        complete = np.vstack((complete,scipy.ndimage.rotate(X[i,:,:], angle = -15,reshape=False,cval=255).reshape(1,96,96)))
        complete = np.vstack((complete,scipy.ndimage.rotate(X[i,:,:], angle = -10,reshape=False,cval=255).reshape(1,96,96)))
        rotated = np.fliplr(X[i,:,:])
        complete = np.vstack((complete,scipy.ndimage.rotate(rotated, angle = 5,reshape=False,cval=255).reshape(1,96,96)))
        complete = np.vstack((complete,scipy.ndimage.rotate(rotated, angle = 10,reshape=False,cval=255).reshape(1,96,96)))
        complete = np.vstack((complete,scipy.ndimage.rotate(rotated, angle = 15,reshape=False,cval=255).reshape(1,96,96)))
        complete = np.vstack((complete,scipy.ndimage.rotate(rotated, angle = -5,reshape=False,cval=255).reshape(1,96,96)))
        complete = np.vstack((complete,scipy.ndimage.rotate(rotated, angle = -10,reshape=False,cval=255).reshape(1,96,96)))
        complete = np.vstack((complete,scipy.ndimage.rotate(rotated, angle = -15,reshape=False,cval=255).reshape(1,96,96)))
        #complete = np.vstack((complete,rotated.reshape(1,96,96)))
        Y_complete = np.append(Y_complete,([Y[i]]*12))
        if i % 500==0:
            print colored((float(i)/len(X)*100 ,' Percentage complete'),'green')
    complete = complete[1:,:,:]
    return complete,Y_complete

In [6]:
def augment(X,Y):
    import skimage.transform as tf
    import scipy as sp
    import scipy.ndimage
    complete = np.zeros((1,96,96),dtype=np.uint8)
    Y_complete = []
    for i in range(len(X)):
        #complete = np.vstack((complete,X[i,:,:].reshape(1,96,96)))
        tform = tf.SimilarityTransform(scale=1,translation=(0,-5))
        complete = np.vstack((complete,tf.warp(X[i,:,:], tform).reshape(1,96,96)))
        tform = tf.SimilarityTransform(scale=1,translation=(0,5))
        complete = np.vstack((complete,tf.warp(X[i,:,:], tform).reshape(1,96,96)))
        tform = tf.SimilarityTransform(scale=1,translation=(5,0))
        complete = np.vstack((complete,tf.warp(X[i,:,:], tform).reshape(1,96,96)))
        tform = tf.SimilarityTransform(scale=1,translation=(-5,0))
        complete = np.vstack((complete,tf.warp(X[i,:,:], tform).reshape(1,96,96)))
        tform = tf.SimilarityTransform(scale=1.2,translation=(0,-5))
        complete = np.vstack((complete,tf.warp(X[i,:,:], tform).reshape(1,96,96)))
        tform = tf.SimilarityTransform(scale=1.2,translation=(0,5))
        complete = np.vstack((complete,tf.warp(X[i,:,:], tform).reshape(1,96,96)))
        tform = tf.SimilarityTransform(scale=1.2,translation=(5,0))
        complete = np.vstack((complete,tf.warp(X[i,:,:], tform).reshape(1,96,96)))
        tform = tf.SimilarityTransform(scale=1.2,translation=(-5,0))
        complete = np.vstack((complete,tf.warp(X[i,:,:], tform).reshape(1,96,96)))
        tform = tf.SimilarityTransform(scale=0.8,translation=(0,-5))
        complete = np.vstack((complete,tf.warp(X[i,:,:], tform).reshape(1,96,96)))
        tform = tf.SimilarityTransform(scale=0.8,translation=(0,5))
        complete = np.vstack((complete,tf.warp(X[i,:,:], tform).reshape(1,96,96)))
        tform = tf.SimilarityTransform(scale=0.8,translation=(5,0))
        complete = np.vstack((complete,tf.warp(X[i,:,:], tform).reshape(1,96,96)))
        tform = tf.SimilarityTransform(scale=0.8,translation=(-5,0))
        complete = np.vstack((complete,tf.warp(X[i,:,:], tform).reshape(1,96,96)))
        
        tform = tf.SimilarityTransform(scale=1,translation=(0,-3))
        complete = np.vstack((complete,tf.warp(X[i,:,:], tform).reshape(1,96,96)))
        tform = tf.SimilarityTransform(scale=1,translation=(0,3))
        complete = np.vstack((complete,tf.warp(X[i,:,:], tform).reshape(1,96,96)))
        tform = tf.SimilarityTransform(scale=1,translation=(3,0))
        complete = np.vstack((complete,tf.warp(X[i,:,:], tform).reshape(1,96,96)))
        tform = tf.SimilarityTransform(scale=1,translation=(-3,0))
        complete = np.vstack((complete,tf.warp(X[i,:,:], tform).reshape(1,96,96)))
        tform = tf.SimilarityTransform(scale=1.2,translation=(0,-3))
        complete = np.vstack((complete,tf.warp(X[i,:,:], tform).reshape(1,96,96)))
        tform = tf.SimilarityTransform(scale=1.2,translation=(0,3))
        complete = np.vstack((complete,tf.warp(X[i,:,:], tform).reshape(1,96,96)))
        tform = tf.SimilarityTransform(scale=1.2,translation=(3,0))
        complete = np.vstack((complete,tf.warp(X[i,:,:], tform).reshape(1,96,96)))
        tform = tf.SimilarityTransform(scale=1.2,translation=(-3,0))
        complete = np.vstack((complete,tf.warp(X[i,:,:], tform).reshape(1,96,96)))
        tform = tf.SimilarityTransform(scale=0.8,translation=(0,-3))
        complete = np.vstack((complete,tf.warp(X[i,:,:], tform).reshape(1,96,96)))
        tform = tf.SimilarityTransform(scale=0.8,translation=(0,3))
        complete = np.vstack((complete,tf.warp(X[i,:,:], tform).reshape(1,96,96)))
        tform = tf.SimilarityTransform(scale=0.8,translation=(3,0))
        complete = np.vstack((complete,tf.warp(X[i,:,:], tform).reshape(1,96,96)))
        tform = tf.SimilarityTransform(scale=0.8,translation=(-3,0))
        complete = np.vstack((complete,tf.warp(X[i,:,:], tform).reshape(1,96,96)))

        
        tform = tf.AffineTransform(scale=(1,1),shear=0.25)
        complete = np.vstack((complete,tf.warp(X[i,:,:], tform).reshape(1,96,96)))
        tform = tf.AffineTransform(scale=(1,1),shear=0.2)
        complete = np.vstack((complete,tf.warp(X[i,:,:], tform).reshape(1,96,96)))
        tform = tf.AffineTransform(scale=(1,1),shear=0.15)
        complete = np.vstack((complete,tf.warp(X[i,:,:], tform).reshape(1,96,96)))
        tform = tf.AffineTransform(scale=(1,1),shear=0.1)
        complete = np.vstack((complete,tf.warp(X[i,:,:], tform).reshape(1,96,96)))
        tform = tf.AffineTransform(scale=(1,1),shear=-0.25)
        complete = np.vstack((complete,tf.warp(X[i,:,:], tform).reshape(1,96,96)))
        tform = tf.AffineTransform(scale=(1,1),shear=-0.2)
        complete = np.vstack((complete,tf.warp(X[i,:,:], tform).reshape(1,96,96)))
        tform = tf.AffineTransform(scale=(1,1),shear=-0.15)
        complete = np.vstack((complete,tf.warp(X[i,:,:], tform).reshape(1,96,96)))
        tform = tf.AffineTransform(scale=(1,1),shear=-0.1)
        complete = np.vstack((complete,tf.warp(X[i,:,:], tform).reshape(1,96,96)))
        
        rotated = np.fliplr(X[i,:,:])
        complete = np.vstack((complete,scipy.ndimage.rotate(X[i,:,:], angle = 5,reshape=False,cval=255).reshape(1,96,96)))
        complete = np.vstack((complete,scipy.ndimage.rotate(X[i,:,:], angle = 10,reshape=False,cval=255).reshape(1,96,96)))
        complete = np.vstack((complete,scipy.ndimage.rotate(X[i,:,:], angle = 15,reshape=False,cval=255).reshape(1,96,96)))
        complete = np.vstack((complete,scipy.ndimage.rotate(X[i,:,:], angle = -5,reshape=False,cval=255).reshape(1,96,96)))
        complete = np.vstack((complete,scipy.ndimage.rotate(X[i,:,:], angle = -15,reshape=False,cval=255).reshape(1,96,96)))
        complete = np.vstack((complete,scipy.ndimage.rotate(X[i,:,:], angle = -10,reshape=False,cval=255).reshape(1,96,96)))
        complete = np.vstack((complete,scipy.ndimage.rotate(rotated, angle = 5,reshape=False,cval=255).reshape(1,96,96)))
        complete = np.vstack((complete,scipy.ndimage.rotate(rotated, angle = 10,reshape=False,cval=255).reshape(1,96,96)))
        complete = np.vstack((complete,scipy.ndimage.rotate(rotated, angle = 15,reshape=False,cval=255).reshape(1,96,96)))
        complete = np.vstack((complete,scipy.ndimage.rotate(rotated, angle = -5,reshape=False,cval=255).reshape(1,96,96)))
        complete = np.vstack((complete,scipy.ndimage.rotate(rotated, angle = -10,reshape=False,cval=255).reshape(1,96,96)))
        complete = np.vstack((complete,scipy.ndimage.rotate(rotated, angle = -15,reshape=False,cval=255).reshape(1,96,96)))
        
        
        Y_complete = np.append(Y_complete,([Y[i]]*44))
        if i % 50==0:
            print colored((float(i)/len(X)*100 ,' Percentage complete'),'green')
    complete = complete[1:,:,:]
    return complete,Y_complete

In [7]:
def Save_data(X,Y,data):
    for i in range(len(X)):
        file_name = '/home/mckc/face_'+data+'/'+Y[i]+'_'+str(i)+'.npy'
        np.save(file_name,X[i,:,:])

In [8]:
def load(data):
    import os
    import numpy as np
    files = os.listdir('/home/mckc/face_'+data+'/')    
    X = np.zeros((1,96,96),dtype=np.float64)
    Y = []
    iter = 0
    for i in files:
        X = np.vstack((X,np.load('/home/mckc/face_'+data+'/'+i).reshape(1,96,96)))
        index = i.index('_')
        Y = np.append(Y,i[:index])
        iter = iter+1
        if iter % 800 ==0:
            print colored((float(iter)/len(files)*100 ,' Percentage complete'), 'green')
            
    print X[1:,:,:].shape,Y.shape
    return X[1:,:,:],Y

In [ ]:
#from cv2 import resize
from skimage import exposure
import os
import cv2
import numpy as np

images_geq = []
images = []
label=[]
files = os.listdir('/home/mckc/aug/face_labels/All')
for i in files:
    image =cv2.resize(cv2.imread('/home/mckc/aug/face_labels/All/'+i,0),(96,96))
    image_geq = exposure.equalize_hist(image)

    # Equalization
    images.append(image)
    images_geq.append(image_geq)
    label.append(i)
image_data = np.array(images).reshape(-1,9216)
images_geq = np.array(images_geq).reshape(-1,9216)

In [9]:
X_train,X_ts,Y_train,Y_ts    = load_data()
#X_tr,X_ts,Y_tr,Y_ts    = load_data()


the training data shape is  (2852, 2)
the test data shape is  (969, 2)
(17.53155680224404, ' Percentage complete')
(35.06311360448808, ' Percentage complete')
(52.594670406732114, ' Percentage complete')
(70.12622720897616, ' Percentage complete')
(87.6577840112202, ' Percentage complete')
(51.59958720330238, ' Percentage complete')
the training file shape (2852, 96, 96) (2852,)
the testing file shape (969, 96, 96) (969,)

In [ ]:
a = X_train[40]
plt.imshow(a,cmap=cm.Greys_r)

In [ ]:
cv2.ellipse(a, (48,48), (50,80),0,0,360 ,0,20)
plt.imshow(a,cmap=cm.Greys_r)

In [10]:
%%time
#X_tr,Y_tr = simulate(X_train,Y_train)
#X_tr,Y_tr = augment(X_train,Y_train)
X_tr,Y_tr = (X_train,Y_train)
print X_tr.shape,Y_tr.shape


(2852, 96, 96) (2852,)
CPU times: user 4 ms, sys: 0 ns, total: 4 ms
Wall time: 226 µs

In [ ]:
Save_data(X_tr,Y_tr,'train')
Save_data(X_ts,Y_ts,'test')

In [ ]:
%%time
X_tr,Y_tr = load('train')
X_ts,Y_ts = load('test')

In [15]:
from sklearn.utils import shuffle
X_tr,Y_tr = shuffle(X_tr,Y_tr)
X_normal = X_tr.reshape(-1,9216)
X_test_normal = X_ts.reshape(-1,9216)
map, Y_number = np.unique(Y_tr, return_inverse=True)
Y_test_number = np.unique(Y_ts, return_inverse=True)[1]
#Y_number = Y_number.astype("|S6")
#Y_test_number = Y_test_number.astype("|S6")
np.save('/home/mckc/map',map)

In [16]:
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
import pickle

In [ ]:
map = np.load('/home/mckc/map.npy')
loaded_model = pickle.load(open('/home/mckc/Face_code/logistic.pkl', 'rb'))
Y_prob = clf.predict_proba(image_geq.reshape(-1,9216))
a= np.array([map[i] for i in np.argsort(Y_prob,axis=1)],dtype=np.chararray)[:,:4]
zip(label==a[:,0])

In [ ]:
clf = LogisticRegression(verbose=0,n_jobs=-1,multi_class='multinomial',solver='lbfgs',max_iter=50,warm_start=True)
clf.fit(X_normal,Y_number)
Y_logictic= clf.predict(X_ts.reshape(-1,9216))
#confusion_matrix(map[Y_logictic],Y_ts,labels=map)
print 'Accuracy of the model is ',accuracy_score(map[Y_logictic],Y_ts)
plot_confusion_matrix(map[Y_logictic],Y_ts, classes=map,normalize=True,title='Confusion matrix')

In [ ]:
pickle.dump(clf, open('/home/mckc/Face_code/logistic_1.pkl', 'wb'))

In [ ]:
print map[0]
plt.imshow(clf.coef_[0,:].reshape(96,96),cmap=cm.Greys_r)

In [ ]:
print map[1]
plt.imshow(clf.coef_[1,:].reshape(96,96),cmap=cm.Greys_r)

In [ ]:
print map[2]
plt.imshow(clf.coef_[2,:].reshape(96,96),cmap=cm.Greys_r)

In [ ]:
print map[3]
plt.imshow(clf.coef_[3,:].reshape(96,96),cmap=cm.Greys_r)

In [ ]:
print map[4]
plt.imshow(clf.coef_[4,:].reshape(96,96),cmap=cm.Greys_r)

In [ ]:
print map[5]
plt.imshow(clf.coef_[5,:].reshape(96,96),cmap=cm.Greys_r)

In [ ]:
recognizer = RandomForestClassifier(100,verbose=0,oob_score=True,n_jobs=-1,warm_start=True)
recognizer.fit(X_normal,Y_number)

Y_rf= recognizer.predict(X_ts.reshape(-1,9216))
Y_rf_vales = map[Y_rf]

print 'Accuracy of the model is ',accuracy_score(Y_ts,Y_rf_vales)
plt.figure(10)
plot_confusion_matrix(Y_rf_vales,Y_ts, classes=map,normalize=True,title='Confusion matrix')

In [ ]:
pickle.dump(recognizerzer, open('/home/mckc/Face_code/rf.pkl', 'wb'))

In [ ]:
file = open('/home/mckc/Face_code/rf.pkl','rb')
object_file = pickle.load(file)

In [ ]:
with open('/home/mckc/Face_code/rf.pickle', 'wb') as f:
    cPickle.dump(object_file, f)

with open('/home/mckc/Face_code/rf.pickle', 'rb') as f:
    forest2 = cPickle.load(f)

In [ ]:
importances = recognizer.feature_importances_
importance_image = importances.reshape(96,96)
#plt.figure(figsize=(7,7))
plt.imshow(importance_image,cmap=cm.Greys_r)

In [16]:
#Fishers
recognizer = cv2.createLBPHFaceRecognizer()
recognizer.train(X_tr, Y_number.astype(int))

Y_LBP = [recognizer.predict(X_ts[i])[0] for i in range(X_ts.shape[0])]
print 'Accuracy of the model is ',accuracy_score(map[Y_LBP],Y_ts)
plot_confusion_matrix(map[Y_LBP],Y_ts, classes=map,normalize=True,title='Confusion matrix')


Accuracy of the model is  0.723163841808
Normalized confusion matrix

In [ ]:
#Fishers
recognizer = cv2.createEigenFaceRecognizer()
recognizer.train(X_tr, Y_number)

Y_LBP = [recognizer.predict(X_ts[i])[0] for i in range(X_ts.shape[0])]
print 'Accuracy of the model is ',accuracy_score(map[Y_LBP],Y_ts)
plot_confusion_matrix(map[Y_LBP],Y_ts, classes=map,normalize=True,title='Confusion matrix')

In [ ]:
#Fishers
recognizer = cv2.createFisherFaceRecognizer()
recognizer.train(X_tr, Y_number)

Y_LBP = [recognizer.predict(X_ts[i])[0] for i in range(X_ts.shape[0])]
print 'Accuracy of the model is ',accuracy_score(map[Y_LBP],Y_ts)
plot_confusion_matrix(map[Y_LBP],Y_ts, classes=map,normalize=True,title='Confusion matrix')

In [ ]:
from tflearn.data_utils import  to_categorical
#from keras.callbacks import EarlyStopping
#early_stopping = EarlyStopping(monitor='val_loss', patience=2)

Y_Keras = to_categorical(Y_number, 97)
Y_test_keras = to_categorical(Y_test_number,97)

In [ ]:
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras import backend as K
from keras.optimizers import Adam,SGD,Adagrad,Adadelta

# Create first network with Keras
from keras.models import Sequential
from keras.layers import Dense, Activation,Dropout
model = Sequential()
model.add(Dense(1000, input_dim=9216,activation='sigmoid'))
#model.add(Dense(500,activation='sigmoid'))
#model.add(Dense(1000,activation='relu'))
model.add(Dense(97,activation='softmax'))
adagrad = Adagrad(lr=0.0001, epsilon=1e-08)
adadelta = Adadelta(lr=.1, rho=0.95, epsilon=1e-08)
adam = Adam(lr=0.000001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)

# Compile model
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])

In [ ]:
model.fit(X_tr.reshape(-1,9216), Y_Keras, nb_epoch=120, batch_size=10,verbose=1
         ,validation_data=(X_ts.reshape(-1,9216),Y_test_keras ))

Y_kr= model.predict_classes(X_ts.reshape(-1,9216))

print 'Accuracy of the model is ',accuracy_score(Y_ts,map[Y_kr],'\n')
#confusion_matrix(Y_ts,Y_kr_vales)
plot_confusion_matrix(map[Y_kr],Y_ts, classes=map,normalize=True,title='Confusion matrix')

In [ ]:
%%time
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.estimator import regression

#tflearn.init_graph(num_cores=8, gpu_memory_fraction=0.5)
network = input_data(shape=[None, 96, 96, 1], name='input')
network = conv_2d(network, 32, 3, activation='relu')
network = conv_2d(network, 64, 3, activation='relu')
network = max_pool_2d(network, 2)

network = conv_2d(network, 64, 3, activation='relu')
network = conv_2d(network, 128, 3, activation='relu')
network = max_pool_2d(network, 2)

network = fully_connected(network, 1000, activation='relu')
network = dropout(network, 0.5)
network = fully_connected(network, 97, activation='softmax')
network = regression(network, optimizer='adam',
                     loss='categorical_crossentropy',
                     learning_rate=0.001)

# Training
model = tflearn.DNN(network, tensorboard_verbose=3,tensorboard_dir='/home/mckc/tf_logs/')
model.fit({'input': X_tr.reshape(-1,96,96,1)}, {'target': Y_Keras}, n_epoch=10,
           validation_set=({'input': X_ts.reshape(-1,96,96,1)}, {'target': Y_test_keras}),show_metric=True, run_id='convnet_face_onn')

In [ ]:
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.estimator import regression
from tflearn.data_utils import  to_categorical


#tflearn.init_graph(num_cores=8, gpu_memory_fraction=0.5)
print('start1')
network = input_data(shape=[None, 96, 96, 1], name='input')
#network = conv_2d(network, 32, 3, activation='relu')
#network = conv_2d(network, 64, 3, activation='relu')
#network = max_pool_2d(network, 2)

#network = conv_2d(network, 64, 3, activation='relu')
#network = conv_2d(network, 128, 3, activation='relu')
#network = max_pool_2d(network, 2)

#network = fully_connected(network, 1000, activation='relu')
#network = dropout(network, 0.5)
#network = fully_connected(network, 97, activation='softmax')

network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = fully_connected(network, 128, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='tanh')
network = dropout(network, 0.8)

network = regression(network, optimizer='adam',
                     loss='categorical_crossentropy',
                     learning_rate=0.001)

# Training
print('start2')

print(X_tr.reshape(-1,96,96,1).shape,Y_Keras.shape)
model = tflearn.DNN(network, tensorboard_verbose=3,tensorboard_dir='/home/mckc/tf_logs/')
print('start3')
model.fit({'input': X_tr.reshape(-1,96,96,1)}, {'target': Y_Keras}, n_epoch=2,
           validation_set=({'input': X_ts.reshape(-1,96,96,1)}, {'target': Y_test_keras}),show_metric=True, run_id='convnet_face')

In [ ]:
from tflearn.data_preprocessing import ImagePreprocessing
from tflearn.data_augmentation import ImageAugmentation

# Real-time data preprocessing
img_prep = ImagePreprocessing()
img_prep.add_featurewise_zero_center()
img_prep.add_featurewise_stdnorm()

# Real-time data augmentation
img_aug = ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_rotation(max_angle=25.)

# Convolutional network building
network = input_data(shape=[None, 96, 96, 1],
                     data_preprocessing=img_prep,
                     data_augmentation=img_aug)
network = conv_2d(network, 32, 3, activation='relu')
network = conv_2d(network, 64, 3, activation='relu')
network = max_pool_2d(network, 2)

network = conv_2d(network, 64, 3, activation='relu')
network = conv_2d(network, 128, 3, activation='relu')
network = max_pool_2d(network, 2)

network = fully_connected(network, 1000, activation='relu')
network = dropout(network, 0.5)
network = fully_connected(network, 28, activation='softmax')
network = regression(network, optimizer='adam',
                     loss='categorical_crossentropy',
                     learning_rate=0.001)

# Train using classifier
model = tflearn.DNN(network, tensorboard_verbose=0)
model.fit(X_train.reshape(-1,96,96,1), Y, n_epoch=50, shuffle=True, validation_set=(X_ts.reshape(-1,96,96,1), Y_test),
show_metric=True, batch_size=96, run_id='cifar10_cnn')

In [23]:
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten,AveragePooling2D
from keras.layers import Convolution2D, MaxPooling2D,BatchNormalization
from keras.utils import np_utils
from keras.optimizers import Adam,SGD,Adadelta,Adagrad
from keras import backend as K

Y_Keras = np_utils.to_categorical(Y_number, 97)
Y_Keras_test = np_utils.to_categorical(Y_test_number,97)

model = Sequential()
model.add(Convolution2D(32, 3, 3,border_mode='same',input_shape=(1, 96, 96)))
convout1 = Activation('relu')
model.add(convout1)
model.add(Convolution2D(64, 3, 3,border_mode='same'))
convout2 = Activation('relu')
model.add(convout2)
#model.add(BatchNormalization(epsilon=1e-05,axis=1,momentum=0.99))
model.add(MaxPooling2D((2,2), strides=(2,2)))

model.add(Convolution2D(64, 3, 3,border_mode='same'))
convout3 = Activation('relu')
model.add(convout3)
model.add(Convolution2D(128, 3, 3,border_mode='same'))
convout4 = Activation('relu')
model.add(convout4)
#model.add(BatchNormalization(epsilon=1e-05,axis=1,momentum=0.99))
model.add(MaxPooling2D((2,2), strides=(2,2)))

model.add(Convolution2D(96, 3, 3,border_mode='same'))
convout5 = Activation('relu')
model.add(convout5)
model.add(Convolution2D(192, 3, 3,border_mode='same'))
convout6 = Activation('relu')
model.add(convout6)
#model.add(BatchNormalization(epsilon=1e-05,axis=1,momentum=0.99))
model.add(MaxPooling2D((2,2), strides=(2,2)))

#model.add(Convolution2D(128, 3, 3,border_mode='same'))
#convout7 = Activation('relu')
#model.add(convout7)
#model.add(Convolution2D(256, 3, 3,border_mode='same'))
#convout8 = Activation('relu')
#model.add(convout8)
#model.add(MaxPooling2D((2,2), strides=(2,2)))

#model.add(Convolution2D(160, 3, 3,border_mode='same'))
#convout9 = Activation('relu')
#model.add(convout9)
#model.add(Convolution2D(320, 3, 3,border_mode='same'))
#convout10 = Activation('relu')
#model.add(convout10)
#model.add(AveragePooling2D(pool_size=(2, 2), strides=(1,1)))


model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(1000,activation='relu'))
model.add(Dense(97,activation='softmax'))

adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)

model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
#model.load_weights('model_augment.h5')
#model.load_weights("model_batch_1.h5")
#model_1 best with loss 0.6

from keras.callbacks import TensorBoard
tb = TensorBoard(log_dir='/home/mckc/logs', histogram_freq=1,)

In [ ]:
print('started')
model.fit(X_tr.reshape(-1,1,96,96), Y_Keras, nb_epoch=100, batch_size=30,verbose=1
          , callbacks=[tb]
         ,validation_data=(X_ts.reshape(-1,1,96,96),Y_Keras_test ))

Y_kr= model.predict_classes(X_ts.reshape(-1,1,96,96))

print 'Accuracy of the model is ',accuracy_score(Y_ts,map[Y_kr],'\n')
#confusion_matrix(Y_ts,Y_kr_vales)
plot_confusion_matrix(map[Y_kr],Y_ts, classes=map,normalize=False,title='Confusion matrix')

In [ ]:
%%time
from keras.preprocessing.image import ImageDataGenerator
datagen = ImageDataGenerator(
    featurewise_center=False,  # set input mean to 0 over the dataset
    samplewise_center=False,  # set each sample mean to 0
    featurewise_std_normalization=False,  # divide inputs by std of the dataset
    samplewise_std_normalization=False,  # divide each input by its std
    zca_whitening=False,  # apply ZCA whitening
    rotation_range=40,  # randomly rotate images in the range (degrees, 0 to 180)
    width_shift_range=0.3,  # randomly shift images horizontally (fraction of total width)
    height_shift_range=0.3,  # randomly shift images vertically (fraction of total height)
    horizontal_flip=True,  # randomly flip images
    vertical_flip=False)  # randomly flip images

datagen.fit(X_tr.reshape(-1,1,96,96))
model.fit_generator(datagen.flow(X_tr.reshape(-1500,1,96,96), Y_Keras,
                    batch_size=50), nb_epoch=1500,verbose=1,
                               callbacks=[tb],
        samples_per_epoch = 20000,validation_data=(X_ts.reshape(-1,1,96,96),Y_Keras_test ))


WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/keras/callbacks.py:517 in _set_model.: merge_all_summaries (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.merge_all.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/keras/callbacks.py:521 in _set_model.: __init__ (from tensorflow.python.training.summary_io) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.FileWriter. The interface and behavior is the same; this is just a rename.
Epoch 1/1500
 2600/20000 [==>...........................] - ETA: 327s - loss: 4.5517 - acc: 0.0277

In [22]:
#model.save_weights("model_batch_1.h5")
model.save_weights("model_augment.h5")

In [ ]:
import theano
convout1_f = theano.function([model.input], convout1.output)
convout2_f = theano.function([model.input], convout2.output)
convout3_f = theano.function([model.input], convout3.output)
convout4_f = theano.function([model.input], convout4.output)
convout5_f = theano.function([model.input], convout5.output)
convout6_f = theano.function([model.input], convout6.output)
#convout7_f = theano.function([model.input], convout7.output)
#convout8_f = theano.function([model.input], convout8.output)
#convout9_f = theano.function([model.input], convout9.output)
#convout10_f = theano.function([model.input], convout10.output)

# utility functions
from mpl_toolkits.axes_grid1 import make_axes_locatable

def nice_imshow(ax, data, vmin=None, vmax=None, cmap=None):
    """Wrapper around pl.imshow"""
    if cmap is None:
        cmap = cm.jet
    if vmin is None:
        vmin = data.min()
    if vmax is None:
        vmax = data.max()
    divider = make_axes_locatable(ax)
    cax = divider.append_axes("right", size="5%", pad=0.05)
    im = ax.imshow(data, vmin=vmin, vmax=vmax, interpolation='nearest', cmap=cmap)
    plt.colorbar(im, cax=cax)

In [ ]:
import numpy.ma as ma
def make_mosaic(imgs, nrows, ncols, border=1):
    """
    Given a set of images with all the same shape, makes a
    mosaic with nrows and ncols
    """
    nimgs = imgs.shape[0]
    imshape = imgs.shape[1:]
    
    mosaic = ma.masked_all((nrows * imshape[0] + (nrows - 1) * border,
                            ncols * imshape[1] + (ncols - 1) * border),
                            dtype=np.float32)
    
    paddedh = imshape[0] + border
    paddedw = imshape[1] + border
    for i in xrange(nimgs):
        row = int(np.floor(i / ncols))
        col = i % ncols
        
        mosaic[row * paddedh:row * paddedh + imshape[0],
               col * paddedw:col * paddedw + imshape[1]] = imgs[i]
    return mosaic

#pl.imshow(make_mosaic(np.random.random((9, 10, 10)), 3, 3, border=1))

In [14]:
import matplotlib.pyplot as plt
%matplotlib inline
i = 12

# Visualize the first layer of convolutions on an input image
sample = X_ts[i:i+1]

plt.figure()
plt.title('input')
nice_imshow(plt.gca(), np.squeeze(sample), vmin=0, vmax=1, cmap=cm.Greys_r)


---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-14-d51a373a7431> in <module>()
      8 plt.figure()
      9 plt.title('input')
---> 10 nice_imshow(plt.gca(), np.squeeze(sample), vmin=0, vmax=1, cmap=cm.Greys_r)

NameError: name 'nice_imshow' is not defined

In [27]:
# Visualize convolution result (after activation)
C1 = convout1_f(sample.astype(np.float32).reshape(-1,1,96,96))
C1 = np.squeeze(C1)
print("C1 shape : ", C1.shape)

plt.figure(figsize=(15, 15))
plt.suptitle('convout1')
nice_imshow(plt.gca(), make_mosaic(C1, 6, 6), cmap=cm.Greys_r)


('C1 shape : ', (32, 96, 96))

In [15]:
from keras.models import model_from_json,model_from_yaml
# serialize model to JSON
model_json = model.to_json()
with open("model.json", "w") as json_file:
    json_file.write(model_json)
print("Saved model to disk")


Saved model to disk

In [15]:
%%time
from keras.models import model_from_json,model_from_yaml
json_file = open('/home/mckc/Face_code/model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("/home/mckc/Face_code/model.h5")
print("Loaded model from disk")


Loaded model from disk
CPU times: user 1.71 s, sys: 268 ms, total: 1.98 s
Wall time: 4.14 s

In [ ]:
from keras.models import model_from_json,model_from_yaml
json_file = open('/home/pi/Desktop/files/model_backup.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model

In [17]:
# serialize model to YAML
model_yaml = loaded_model.to_yaml()
with open("model.yaml", "w") as yaml_file:
    yaml_file.write(model_yaml)

In [18]:
# load YAML and create model
yaml_file = open('/home/pi/Desktop/files/model.yaml', 'r')
loaded_model_yaml = yaml_file.read()
yaml_file.close()
loaded_model = model_from_yaml(loaded_model_yaml)

# load weights into new model
loaded_model.load_weights("/home/mckc/Face_code/model.h5")
print("Loaded model from disk")


Loaded model from disk

In [ ]: