In [1]:
# 起手式
import sys, time
import ipywidgets as widget
from ipywidgets import interact
from IPython.display import display
import numpy as np
import cv2
from PIL import Image
from io import BytesIO
def to_pil(ima):
if ima.dtype == np.float64:
ima = (ima*255).clip(0,255).astype('uint8')
return Image.fromarray(ima)
def img_to_png(ima, cvt=None):
if cvt:
ima = cv2.cvtColor(ima, cvt)
im = to_pil(ima)
bio = BytesIO()
im.save(bio, format='png')
return bio.getvalue()
In [2]:
# 讀取已經學習過的臉部以及眼睛偵測 haar cascade
face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_eye.xml')
In [3]:
# 東華魔術社近距離比賽照片
img = Image.open("img/magic_faces.jpg")
# 轉成陣列
img = np.array(img)
# 放大兩倍,比較好找
img = cv2.resize(img, (img.shape[1]*2, img.shape[0]*2))
# 灰階圖
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 顯示
to_pil(img)
Out[3]:
In [4]:
# 從裡面來找找看臉吧
faces = face_cascade.detectMultiScale(gray, 1.1, 5)
# 從每個找到的裡面,再去找眼睛
for (x,y,w,h) in faces:
# 畫綠色的長方形
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
# 專注在臉部的小長方形圖
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
# 從小長方形灰階圖中間去找眼睛
eyes = eye_cascade.detectMultiScale(roi_gray)
# 畫出眼睛的方塊
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(255,0,255),2)
to_pil(img)
Out[4]:
In [5]:
# 另外一種臉部偵測方式,使用 dlib
import dlib
fd = dlib.get_frontal_face_detector()
In [7]:
def find_faces(fn):
img = Image.open(fn)
img = np.array(img)
img = cv2.resize(img, (img.shape[1]*2, img.shape[0]*2))
#img = cv2.resize(img, (200,400))
rl = fd(img)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
#faces = face_cascade.detectMultiScale(gray, 1.1, 5, minSize=(5,5), maxSize=(1000,1000))
#for (x,y,w,h) in faces:
for r in rl:
x,y,w,h = r.left(), r.top(), r.width(), r.height()
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(255,0,255),2)
return to_pil(img)
find_faces("img/magic_faces.jpg")
Out[7]:
In [8]:
#!wget dropbox-url
#!unzip x zipfile
#!ls
In [9]:
import glob
color_faces = []
gray_faces = []
for fn in glob.glob("akb48/*.jpg"):
img = Image.open(fn)
img = np.array(img)
img0 = img = cv2.resize(img, (img.shape[1]*2, img.shape[0]*2))
#rows,cols = img0.shape[:2]
#for deg in range(-45, 46, 45):
# M = cv2.getRotationMatrix2D((cols/2,rows/2),deg,1)
# img = cv2.warpAffine(img0,M,(cols,rows))
if 1:
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
#faces = face_cascade.detectMultiScale(gray, 1.3, 5)
#for (x,y,w,h) in faces:
rl = fd(img)
for r in rl:
x,y,w,h = r.left(), r.top(), r.width(), r.height()
if x<0 or y <0:
continue
roi_color = cv2.resize(img[y:y+h, x:x+w], (96,96))
roi_gray = cv2.cvtColor(roi_color, cv2.COLOR_RGB2GRAY)
roi_gray = cv2.equalizeHist(roi_gray)
color_faces.append(roi_color.copy())
gray_faces.append(roi_gray)
if len(color_faces)%10 == 0:
out_img = np.concatenate(color_faces[-10:], axis=1)
display(to_pil(out_img))
#display(to_pil(roi_gray))
In [10]:
idx=np.arange(len(gray_faces))
np.random.shuffle(idx)
gray_faces = np.array(gray_faces)
color_faces = np.array(color_faces)
gray_faces=gray_faces[idx]
color_faces=color_faces[idx]
In [11]:
model = cv2.face.createLBPHFaceRecognizer()
In [12]:
labels=np.arange(len(gray_faces)-100)
In [13]:
model.train(gray_faces[:-100], labels)
In [14]:
for i in range(100):
r = model.predict(gray_faces[-i-1])
print(r)
img1 = color_faces[-i-1]
img2 = color_faces[r[0]]
display(to_pil(np.concatenate([img1, img2], axis=1)))
In [ ]:
In [ ]: