In [1]:
import cv2
import numpy as np
import scipy.misc
import os
import dlib
import time
os.chdir("/home/mckc/Imagedb/")
import uuid
#%matplotlib inline
video_capture = cv2.VideoCapture(0)
detector = dlib.get_frontal_face_detector()
In [2]:
while True:
# Capture frame-by-frame
# time.sleep(1)
ret, frame = video_capture.read()
faces = detector(frame, 1)
# Draw a rectangle around the faces
if len(faces)>0:
for a,b in enumerate(faces):
fac = np.array(frame)[b.top():b.bottom(),b.left():b.right(),:]
cv2.rectangle(frame, (b.left(), b.top()), (b.right(), b.bottom()), (0, 255, 0), 2)
#cv2.putText(frame,'omar',(b.left(),b.bottom()), cv2.FONT_HERSHEY_DUPLEX,1,(0,0,255), 2,8)
scipy.misc.toimage(cv2.cvtColor(fac,cv2.COLOR_RGB2BGR)).save(str(uuid.uuid4()) +'.jpg')
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
In [4]:
from picamera.array import PiRGBArray
from picamera import PiCamera
camera = PiCamera()
#camera.resolution = (1008, 752)
camera.video_stabilization = True
camera.framerate = 10
#camera.vflip = True
#camera.hflip = True
#camera.zoom = (0,0,0.75,0.75)
rawCapture = PiRGBArray(camera, size=(640, 480))
cv2.namedWindow('Video',cv2.WINDOW_NORMAL)
cv2.resizeWindow('Video',640,480)
i = 0
t1 = time.time()
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# grab the raw NumPy array representing the image, then initialize the timestamp
# and occupied/unoccupied text
frame = frame.array
cv2.imshow('video',frame)
In [ ]: