In [14]:
import matplotlib.pyplot as plt
import caffe
import lmdb
import glob
import cv2
import uuid
from caffe.proto import caffe_pb2
from sklearn import datasets
import numpy as np

In [16]:
#record labels for data
original_data_set = [img for img in glob.glob("img/original_data/a/*jpg")]
target_data_set = [img for img in glob.glob("img/original_data/b/*jpg")]
original_faces = []
#print(original_data_set)
IMAGE_WIDTH = 227
IMAGE_HEIGHT = 227

In [8]:
def transform_img(img, img_width=IMAGE_WIDTH, img_height=IMAGE_HEIGHT):
    #Image Resizing
    img = cv2.resize(img, (img_width, img_height), interpolation = cv2.INTER_CUBIC)
    return img

In [9]:
for in_idx, img_path in enumerate(original_data_set):
        img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
        img = transform_img(img, img_width=IMAGE_WIDTH, img_height=IMAGE_HEIGHT)
        original_faces.append(img)

In [10]:
print(original_faces[0].shape) #227,227,3
print(len(original_faces))


---------------------------------------------------------------------------
IndexError                                Traceback (most recent call last)
<ipython-input-10-2e9476241c3e> in <module>()
----> 1 print(original_faces[0].shape) #227,227,3
      2 print(len(original_faces))

IndexError: list index out of range

In [27]:
#start manual labeling
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
from IPython.display import display, clear_output

In [28]:
class Trainer:
    def __init__(self):
        self.results = {}
        self.imgs = original_faces
        self.index = 0
        
    def increment_face(self):
        if self.index + 1 >= len(self.imgs):
            return self.index
        else:
            while str(self.index) in self.results:
                print self.index
                self.index += 1
            return self.index
    
    def record_result(self, showingTeeth=True):
        self.results[str(self.index)] = showingTeeth

In [29]:
trainer = Trainer()

In [31]:
button_teeth = widgets.Button(
    description='(YES)showing teeth',
    disabled=False,
    button_style='', 
    tooltip='Click me',
    icon='check'
)

button_no_teeth = widgets.Button(
    description='NOT showing teeth',
    disabled=False,
    button_style='',
    tooltip='Click me',
    icon='check'
)
status = widgets.HTML(
    value="Hello <b>"+str(trainer.index)+"</b>",
    placeholder='Some HTML',
    description='Some HTML',
    disabled=False
)
def display_face(face):
    clear_output()
    reversedImage=cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
    plt.imshow(reversedImage)
    plt.axis('off')
    plt.show()

def update_teeth(b):
    trainer.record_result(showingTeeth=True)
    trainer.increment_face()
    display_face(trainer.imgs[trainer.index])
    

def update_no_teeth(b):
    trainer.record_result(showingTeeth=False)
    trainer.increment_face()
    display_face(trainer.imgs[trainer.index])

button_no_teeth.on_click(update_no_teeth)
button_teeth.on_click(update_teeth)


#display(status)
display(button_teeth)
display(button_no_teeth)
display_face(trainer.imgs[trainer.index])



In [32]:
#when manual labeling is done
#save the faces modified dataset along with the label(the label will be saved on the filename as showingtheeth or not
#one time process, disabled by default
import json
transformed_data_set_path = "classified_data/"

def generate_classified_data():
    with open('results.xml', 'w') as f:
        json.dump(trainer.results, f)

    results = json.load(open('results.xml'))
    showingTeeth_indices = [int(i) for i in results if results[i] == True]
    #print(showingTeeth_indices)

    not_showingTeeth_indices = [int(i) for i in results if results[i] == False]
    #print(not_showingTeeth_indices)
    #save all the labeled images to the classified folder and mirror the data for extra data
    showingTeeth_indices = [int(i) for i in results if results[i] == True]
    not_showingTeeth_indices = [int(i) for i in results if results[i] == False]

    for i in showingTeeth_indices:
        guid = uuid.uuid4()
        uid_str = guid.urn
        str_guid = uid_str[9:]
        path = transformed_data_set_path+str_guid+"_showingteeth.jpg"
        cv2.imwrite(path,original_faces[i]);
        #mirrored version
        guid = uuid.uuid4()
        uid_str = guid.urn
        str_guid = uid_str[9:]
        path = transformed_data_set_path+str_guid+"_showingteeth.jpg"
        rimg=cv2.flip(original_faces[i],1)
        cv2.imwrite(path,rimg);

    for i in not_showingTeeth_indices:
        guid = uuid.uuid4()
        uid_str = guid.urn
        str_guid = uid_str[9:]
        path = transformed_data_set_path+str_guid+".jpg"
        cv2.imwrite(path,original_faces[i]);
        #mirrored version
        guid = uuid.uuid4()
        uid_str = guid.urn
        str_guid = uid_str[9:]
        path = transformed_data_set_path+str_guid+".jpg"
        rimg=cv2.flip(original_faces[i],1)
        cv2.imwrite(path,rimg);
        
#one time process, disabled by default
#generate_classified_data()

In [ ]:
#move a subset of the classified data to the classified validation data (manually)

In [34]:
#manual feature detector to slice mouth from the classified folders , this will be our true training data
import cv2
import numpy as np
import glob
import uuid

training_data_path = "training_data"
validation_data_path = "validation_data"

classified_data_path = "classified_data"
classified_validation_data_path = "classified_validation_data"

def mouth_detect_bulk(input_folder,output_folder):
    
    face_cascade = cv2.CascadeClassifier('haarcascade/haarcascade_frontalface_default.xml')
    mouth_cascade = cv2.CascadeClassifier('haarcascade/mouth.xml')
    transformed_data_set = [img for img in glob.glob(input_folder+"/*jpg")]

    img_width = 50
    img_height = 50

    for in_idx, img_path in enumerate(transformed_data_set):
        img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
        gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 
        #gray_img = img
        faces = face_cascade.detectMultiScale(gray_img, 1.05, 5)
        #print faces

        for (p,q,r,s) in faces:
            cv2.rectangle(img,(p,q),(p+r,q+s),(255,0,0),2)
            face_gray = gray_img[q:q+s, p:p+r]
            face_color = img[q:q+s, p:p+r]
            mouth = mouth_cascade.detectMultiScale(face_gray)
            numSqu=1
            if(len(mouth)==3):
                #get the lowest rectangle aka biggest y
                biggestYIndex=-1
                if (mouth[0][1] > mouth[1][1]) and (mouth[0][1] > mouth[2][1]):
                    biggestYIndex = 0
                elif (mouth[1][1] > mouth[0][1]) and (mouth[1][1] > mouth[2][1]):
                    biggestYIndex = 1
                else:
                    biggestYIndex = 2

                mp = mouth[biggestYIndex][0]
                mq = mouth[biggestYIndex][1]
                mr = mouth[biggestYIndex][2]
                ms = mouth[biggestYIndex][3]
                cv2.rectangle(face_gray,(mp,mq),(mp+mr,mq+ms), (255,255,255),2)
                crop_img = face_gray[mq:mq+ms, mp:mp+mr]
                if 'showingteeth' in img_path:
                    guid = uuid.uuid4()
                    uid_str = guid.urn
                    str_guid = uid_str[9:]
                    path = output_folder+"/"+str_guid+"_showingteeth.jpg"
                    crop_img_resized = cv2.resize(crop_img, (img_width, img_height), interpolation = cv2.INTER_CUBIC)
                    cv2.imwrite(path,crop_img_resized)
                else:
                    guid = uuid.uuid4()
                    uid_str = guid.urn
                    str_guid = uid_str[9:]
                    path = output_folder+"/"+str_guid+".jpg"
                    crop_img_resized = cv2.resize(crop_img, (img_width, img_height), interpolation = cv2.INTER_CUBIC)
                    cv2.imwrite(path,crop_img_resized)
                    #cv2.imshow("output", crop_img)


    #cv2.waitKey(1)
    #if cv2.waitKey(1) & 0xFF == ord('q'):
    # print("asd")
    #cv2.waitKey(10000)
    #cap.release()
    #cv2.destroyAllWindows()
    
#generate training data    
mouth_detect_bulk(classified_data_path,training_data_path)
mouth_detect_bulk(classified_validation_data_path,validation_data_path)

In [39]:
#create LMDB Datum structure for the training
IMAGE_WIDTH = 50
IMAGE_HEIGHT = 50

train_lmdb = "lmdb/train_lmdb"
train_data = [img for img in glob.glob("training_data/*jpg")]
print(len(train_data))

def make_datum(img, label):
    return caffe_pb2.Datum(
        channels=1,
        width=IMAGE_WIDTH,
        height=IMAGE_HEIGHT,
        label=label,
        data=np.rollaxis(img, 2).tostring())

def make_lmdb(data_input,output_path):
    in_db = lmdb.open(output_path,10000000)
    with in_db.begin(write=True) as in_txn:
        for in_idx, img_path in enumerate(data_input):
            img = cv2.imread(img_path, cv2.IMREAD_COLOR)
            if 'showingteeth' in img_path:
                label = 1
            else:
                label = 0
            datum = make_datum(img, label)
            #print datum
            in_txn.put('{:0>5d}'.format(in_idx), datum.SerializeToString())
            #print '{:0>5d}'.format(in_idx) + ':' + img_path
    in_db.close()

make_lmdb(train_data,train_lmdb)


601

In [40]:
#create LMDB Datum structure for the validation set
val_lmdb = "lmdb/val_lmdb"
val_data = [img for img in glob.glob("validation_data/*jpg")]
print(len(val_data))

make_lmdb(val_data,val_lmdb)


47

In [ ]: