|
import Tkinter as tk
import cv2, sys, time, os, math
from PIL import Image, ImageTk
import numpy as numpy
from os import listdir
from os.path import isfile, join
# Load the BCM V4l2 driver for /dev/video0
os.system('sudo modprobe bcm2835-v4l2')
# Set the framerate ( not sure this does anything! )
os.system('v4l2-ctl -p 4')
width, height = 320, 240
cap = cv2.VideoCapture(0)
cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, width)
cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, height)
cascPath = '/home/pi/lbpcascade_frontalface.xml'
faceCascade = cv2.CascadeClassifier(cascPath)
root = tk.Tk()
root.attributes("-fullscreen", True)
root.bind('<Escape>', lambda e: root.quit())
lmain = tk.Label(root)
lmain.pack()
last_image_faces = []
def show_frame():
_, frame = cap.read()
frame = cv2.flip(frame, 1)
frame = faceDetect(frame)
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
img = Image.fromarray(cv2image)
imgtk = ImageTk.PhotoImage(image=img)
lmain.imgtk = imgtk
lmain.configure(image=imgtk)
lmain.after(1, show_frame)
def faceDetect(frame):
# Do face detection
#faces = faceCascade.detectMultiScale(frame, 1.1, 3, 0, (10, 10))
#Slower method
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist( gray )
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=4,
minSize=(20, 20),
flags=cv2.cv.CV_HAAR_SCALE_IMAGE | cv2.cv.CV_HAAR_FIND_BIGGEST_OBJECT | cv2.cv.CV_HAAR_DO_ROUGH_SEARCH
)
print "Found {0} faces!".format(len(faces))
global last_image_faces
image_faces = []
for (x, y, w, h) in faces:
# Draw a green rectangle around the face
face = frame[y:(y+h), x:(x+w)]
center_x = x + (w/2)
center_y = y + (h/2)
center = [center_x, center_y]
image_faces.append(center)
tracking = False
for pos in last_image_faces:
#dist = sqrt( (center_x - pos[0])**2 + (center_y - pos[1])**2 )
dist = math.hypot(center_x - pos[0], center_y - pos[1])
print("Distance from last point " + str(dist))
if dist < 30:
tracking = True
if tracking == False:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)
recognizeFace(face)
else:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
break
last_image_faces = image_faces
return frame
def recognizeFace(face):
print("Searching Face database...")
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGBA)
count = 0
match_found = False
for f in face_db:
count = count + 1
# Initiate SIFT detector
orb = cv2.ORB()
# find the keypoints and descriptors with SIFT
kp1, des1 = orb.detectAndCompute(face,None)
kp2, des2 = orb.detectAndCompute(f,None)
# create BFMatcher object
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# Match descriptors.
matches = bf.match(des1,des2)
if len(matches) > 0:
# found match
print("Match Found! (" + str(count) +")")
match_found = True
break
if match_found == False:
# Save picture
print("No match found! Searched " + str(count) + " records. Saving image.")
cv2.imwrite("/home/pi/photos/faces/face-" + str(len(face_db)) + ".jpg", face)
loadFaceDB()
def loadFaceDB():
# Load faces
face_db_path='/home/pi/photos/faces'
onlyfiles = [ f for f in listdir(face_db_path) if isfile(join(face_db_path,f)) ]
global face_db
face_db = numpy.empty(len(onlyfiles), dtype=object)
for n in range(0, len(onlyfiles)):
face_db[n] = cv2.imread( join(face_db_path,onlyfiles[n]) )
loadFaceDB()
show_frame()
root.mainloop()
|