In [1]:
import sys
import dlib
import openface
from skimage import io
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from termcolor import colored
In [5]:
import theano
import keras
In [ ]:
import pandas as pd
import numpy as np
from PIL import Image
from skimage.transform import resize
import dlib
train = pd.read_csv('/home/mckc/All Data/train.csv')
test = pd.read_csv('/home/mckc/All Data/test.csv')
print 'the training data shape is ',train.shape
print 'the test data shape is ', test.shape
train_records = train.shape[0]
test_records = test.shape[0]
X_tr = np.zeros((1,3,224,224),dtype=np.uint8)
Y_tr = []
detector = dlib.get_frontal_face_detector()
iteration = 0
for i in train.values[:,0]:
image = np.array(Image.open(i)).astype(np.uint8)
faces = detector(image, 1)
if len(faces) == 1:
for a,b in enumerate(faces):
a
face = resize(np.array(image)[b.top():b.bottom(),b.left():b.right(),:],(224,224,3)).reshape(1,3,224,224)
X_tr = np.vstack((X_tr,face))
Y_tr = np.append(Y_tr,train.values[iteration,1])
iteration+=1
if iteration % 50==0:
print colored((float(iteration)/train_records*100 ,' Percentage complete'), 'green')
X_tr = X_tr[1:,:,:]
iteration = 0
X_ts = np.zeros((1,3,224,224),dtype=np.uint8)
Y_ts = []
for i in test.values[:,0]:
image = np.array(Image.open(i)).astype(np.uint8)
faces = detector(image, 1)
if len(faces) != 1:
for a,b in enumerate(faces):
a
face = resize(np.array(image)[b.top():b.bottom(),b.left():b.right(),:],(224,224,3)).reshape(1,3,224,224)
X_ts = np.vstack((X_ts,face))
Y_ts = np.append(Y_ts,test.values[iteration,1])
iteration+=1
if iteration % 50 == 0 :
print colored((float(iteration)/test_records*100 ,' Percentage complete'), 'green')
X_ts = X_ts[1:,:,:]
print 'the training file shape',X_tr.shape,Y_tr.shape
print 'the testing file shape',X_ts.shape,Y_ts.shape
In [ ]:
#from keras.datasets import cifar10
from keras.utils import np_utils
from keras.preprocessing.image import ImageDataGenerator
#(X_train, y_train), (X_test, y_test) = cifar10.load_data()
map, Y_number = np.unique(Y_tr, return_inverse=True)
Y_test_number = np.unique(np.array(Y_ts), return_inverse=True)[1]
Y_train = np_utils.to_categorical(Y_number, 7)
Y_test = np_utils.to_categorical(Y_test_number, 7)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(X_tr.astype(np.float32))
In [ ]:
from keras.models import Sequential
from keras.layers.core import Flatten, Dense, Dropout
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD
def VGG_16(weights_path=None):
model = Sequential()
model.add(ZeroPadding2D((1,1),input_shape=(3,224,224)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1000, activation='softmax'))
if weights_path:
model.load_weights(weights_path)
return model
model = VGG_16('/home/mckc/Downloads/vgg16_weights.h5')
model.layers.pop()
model.add(Dense(7, activation='softmax'))
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy')
In [ ]:
model.fit(X_tr.reshape(-1,3,224,224),Y_train,nb_epoch=1000)
In [ ]:
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(X_tr, Y_train, batch_size=32),
samples_per_epoch=len(X_tr), nb_epoch=100)
In [ ]:
# here's a more "manual" example
for e in range(nb_epoch):
print 'Epoch', e
batches = 0
for X_batch, Y_batch in datagen.flow(X_train, Y_train, batch_size=32):
loss = model.train(X_batch, Y_batch)
batches += 1
if batches >= len(X_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
In [4]:
import cv2
im = cv2.resize(cv2.imread('/home/mckc/Downloads/1.jpg'), (224, 224)).astype(np.float32)
im[:,:,0] -= 103.939
im[:,:,1] -= 116.779
im[:,:,2] -= 123.68
im = im.transpose((2,0,1))
im = np.expand_dims(im, axis=0)
out = model.predict(im)
print np.argmax(out)
In [25]:
from keras.models import Sequential
from keras.layers.core import Flatten, Dense, Dropout
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD
import cv2, numpy as np
def VGG_19(weights_path=None):
model = Sequential()
model.add(ZeroPadding2D((1,1),input_shape=(3,224,224)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1000, activation='softmax'))
if weights_path:
model.load_weights(weights_path)
return model
model = VGG_19('/home/mckc/Downloads/vgg19_weights.h5')
model.layers.pop()
model.add(Dense(7, activation='softmax'))
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy')
In [1]:
def AlexNet(weights_path=None, heatmap=False):
if heatmap:
inputs = Input(shape=(3,None,None))
else:
inputs = Input(shape=(3,227,227))
conv_1 = Convolution2D(96, 11, 11,subsample=(4,4),activation='relu',
name='conv_1')(inputs)
conv_2 = MaxPooling2D((3, 3), strides=(2,2))(conv_1)
conv_2 = crosschannelnormalization(name="convpool_1")(conv_2)
conv_2 = ZeroPadding2D((2,2))(conv_2)
conv_2 = merge([
Convolution2D(128,5,5,activation="relu",name='conv_2_'+str(i+1))(
splittensor(ratio_split=2,id_split=i)(conv_2)
) for i in range(2)], mode='concat',concat_axis=1,name="conv_2")
conv_3 = MaxPooling2D((3, 3), strides=(2, 2))(conv_2)
conv_3 = crosschannelnormalization()(conv_3)
conv_3 = ZeroPadding2D((1,1))(conv_3)
conv_3 = Convolution2D(384,3,3,activation='relu',name='conv_3')(conv_3)
conv_4 = ZeroPadding2D((1,1))(conv_3)
conv_4 = merge([
Convolution2D(192,3,3,activation="relu",name='conv_4_'+str(i+1))(
splittensor(ratio_split=2,id_split=i)(conv_4)
) for i in range(2)], mode='concat',concat_axis=1,name="conv_4")
conv_5 = ZeroPadding2D((1,1))(conv_4)
conv_5 = merge([
Convolution2D(128,3,3,activation="relu",name='conv_5_'+str(i+1))(
splittensor(ratio_split=2,id_split=i)(conv_5)
) for i in range(2)], mode='concat',concat_axis=1,name="conv_5")
dense_1 = MaxPooling2D((3, 3), strides=(2,2),name="convpool_5")(conv_5)
if heatmap:
dense_1 = Convolution2D(4096,6,6,activation="relu",name="dense_1")(dense_1)
dense_2 = Convolution2D(4096,1,1,activation="relu",name="dense_2")(dense_1)
dense_3 = Convolution2D(1000, 1,1,name="dense_3")(dense_2)
prediction = Softmax4D(axis=1,name="softmax")(dense_3)
else:
dense_1 = Flatten(name="flatten")(dense_1)
dense_1 = Dense(4096, activation='relu',name='dense_1')(dense_1)
dense_2 = Dropout(0.5)(dense_1)
dense_2 = Dense(4096, activation='relu',name='dense_2')(dense_2)
dense_3 = Dropout(0.5)(dense_2)
dense_3 = Dense(1000,name='dense_3')(dense_3)
prediction = Activation("softmax",name="softmax")(dense_3)
model = Model(input=inputs, output=prediction)
if weights_path:
model.load_weights(weights_path)
return model
In [ ]: