|
#FACE RECOGNIZER
import Tkinter as tk
import cv2, sys, time, os, math
from PIL import Image, ImageTk
import numpy as numpy
import pprint
import random
import math
from os import listdir
from os.path import isfile, join
# Load the BCM V4l2 driver for /dev/video0
os.system('sudo modprobe bcm2835-v4l2')
# Set the framerate ( not sure this does anything! )
os.system('v4l2-ctl -p 4')
width, height = 320, 240
cap = cv2.VideoCapture(0)
cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, width)
cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, height)
cascPath = '/home/pi/lbpcascade_frontalface.xml'
faceCascade = cv2.CascadeClassifier(cascPath)
root = tk.Tk()
root.attributes("-fullscreen", True)
root.bind('<Escape>', lambda e: root.quit())
lmain = tk.Label(root)
lmain.pack()
last_image_faces = []
users = []
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
def show_frame():
_, frame = cap.read()
frame = cv2.flip(frame, 1)
frame = faceDetect(frame)
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
img = Image.fromarray(cv2image)
imgtk = ImageTk.PhotoImage(image=img)
lmain.imgtk = imgtk
lmain.configure(image=imgtk)
lmain.after(1, show_frame)
def faceDetect(frame):
# Do face detection
#faces = faceCascade.detectMultiScale(frame, 1.1, 3, 0, (10, 10))
#Slower method
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist( gray )
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=4,
minSize=(20, 20),
flags=cv2.cv.CV_HAAR_SCALE_IMAGE | cv2.cv.CV_HAAR_FIND_BIGGEST_OBJECT | cv2.cv.CV_HAAR_DO_ROUGH_SEARCH
)
print "Found {0} faces!".format(len(faces))
global last_image_faces
image_faces = []
for (x, y, w, h) in faces:
counter = 1
center_x = x + (w/2)
center_y = y + (h/2)
start_y = center_y - 40
start_x = center_x - 40
if len(last_image_faces) > 0:
pos = last_image_faces[0]
last_image_faces.remove(pos)
dist = math.hypot(center_x - pos[0], center_y - pos[1])
if dist < 30:
# Info = [center_x, center_y, time_since_last_check, user, score]
center = [center_x, center_y, pos[2] + 1]
print("Tracking face " + str(counter))
counter = counter + 1
if center[2] > 6:
if start_x > 0 and start_y > 0:
face_crop = frame[y:(y+h), x:(x+w)]
info = recognizeFace(face_crop)
center[2] = 1
center.append(info[0])
center.append(info[1])
image_faces.append(center)
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 255, 255), 2)
if len(pos) > 3:
center.append(pos[3])
center.append(pos[4])
if pos[4] < 2000:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)
cv2.putText(frame, "%.1f" % (center[4]/1000), ((x + w - 38), (y + 17)), font, 1, (0,0,255), 1, 1)
else:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.putText(frame, users[center[3]], (x, (y + h + 15)), font, 1, (0,255,0), 1, 1)
cv2.putText(frame, "%.1f" % (center[4]/1000), ((x + w - 38), (y + 17)), font, 1, (0,255,0), 1, 1)
else:
center = [center_x, center_y, 1]
image_faces.append(center)
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 255, 255), 2)
else:
center = [center_x, center_y, 1]
image_faces.append(center)
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 255, 255), 2)
print("Number of faces detected " + str(len(last_image_faces)))
last_image_faces = image_faces
return frame
def recognizeFace(face):
print("Searching Face database...")
match_found = False
face = cv2.resize(face, (120, 120))
face = cv2.cvtColor(face, cv2.cv.CV_BGR2GRAY)
face = cv2.equalizeHist( face )
cv2.imwrite("/home/pi/photos/faces/face-" + str(len(face_db) + 1) + ".jpg", face)
loadFaceDB()
predicted_label = predict_image_from_model(model, face)
print 'Predicted: %(predicted)s ' % {"predicted": users[predicted_label[0]]}
print predicted_label[1]
return predicted_label
def loadFaceDB():
# Load faces
face_db_path='/home/pi/photos/faces'
onlyfiles = [ f for f in listdir(face_db_path) if isfile(join(face_db_path,f)) ]
global face_db
face_db = numpy.empty(len(onlyfiles), dtype=object)
for n in range(0, len(onlyfiles)):
face_db[n] = cv2.imread( join(face_db_path,onlyfiles[n]) )
# Face Recognition
def create_and_train_model_from_dict(label_matrix):
""" Create eigenface model from dict of labels and images """
model = cv2.createEigenFaceRecognizer()
model.train(label_matrix.values(), numpy.array(label_matrix.keys()))
return model
def predict_image_from_model(model, image):
""" Given an eigenface model, predict the label of an image"""
return model.predict(image)
def read_csv(filename='/home/pi/faces.csv'):
""" Read a csv file """
csv = open(filename, 'r')
return csv
def prepare_training_testing_data(file):
""" prepare testing and training data from file"""
lines = file.readlines()
training_data, testing_data = split_test_training_data(lines)
return training_data
def create_label_matrix_dict(input_file):
""" Create dict of label -> matricies from file """
### for every line, if key exists, insert into dict, else append
label_dict = {}
for line in input_file:
print(line)
## split on the ';' in the csv separating filename;label
filename, label = line.strip().split(';')
##update the current key if it exists, else append to it
if label_dict.has_key(int(label)):
current_files = label_dict.get(label)
numpy.append(current_files,read_matrix_from_file(filename))
else:
label_dict[int(label)] = read_matrix_from_file(filename)
return label_dict
def split_test_training_data(data, ratio=0.2):
""" Split a list of image files by ratio of training:test data """
test_size = int(math.floor(ratio*len(data)))
random.shuffle(data)
return data[test_size:], data[:test_size]
def read_matrix_from_file(filename):
""" read in grayscale version of image from file """
return cv2.imread(filename, cv2.CV_LOAD_IMAGE_GRAYSCALE)
def create_cvs():
BASE_PATH="/home/pi/photos/recognized_faces"
SEPARATOR=";"
label = 0
open("faces.csv", 'w').close()
with open("faces.csv", "a") as myfile:
for dirname, dirnames, filenames in os.walk(BASE_PATH):
for subdirname in dirnames:
users.append(subdirname)
subject_path = os.path.join(dirname, subdirname)
for filename in os.listdir(subject_path):
abs_path = "%s/%s" % (subject_path, filename)
myfile.write("%s%s%d\n" % (abs_path, SEPARATOR, label))
label = label + 1
# Face Recognition vars
create_cvs()
training_data = prepare_training_testing_data(read_csv())
data_dict = create_label_matrix_dict(training_data)
model = create_and_train_model_from_dict(data_dict)
loadFaceDB()
show_frame()
root.mainloop()
|