#!/usr/bin/env python import cv2, sys, time, os #from pantilt import * import Tkinter as tk import PIL from PIL import ImageTk from PIL import Image # Load the BCM V4l2 driver for /dev/video0 os.system('sudo modprobe bcm2835-v4l2') # Set the framerate ( not sure this does anything! ) os.system('v4l2-ctl -p 4') # Frame Size. Smaller is faster, but less accurate. # Wide and short is better, since moving your head # vertically is kinda hard! FRAME_W = 320 FRAME_H = 240 # Default Pan/Tilt for the camera in degrees. # Camera range is from 0 to 180 cam_pan = 70 cam_tilt = 70 # Set up the CascadeClassifier for face tracking #cascPath = 'haarcascade_frontalface_default.xml' # sys.argv[1] cascPath = '/home/pi/lbpcascade_frontalface.xml' faceCascade = cv2.CascadeClassifier(cascPath) # Set up the capture with our frame size video_capture = cv2.VideoCapture(0) video_capture.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, FRAME_W) video_capture.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, FRAME_H) time.sleep(2) window = tk.Tk() #Makes main window window.wm_title("Digital Microscope") window.config(background="#FFFFFF") #window.attributes("-fullscreen", True) # Turn the camera to the default position #pan(cam_pan) #tilt(cam_tilt) window.mainloop() while True: # Capture frame-by-frame ret, frame = video_capture.read() if ret == False: print("Error getting image") continue # Convert to greyscale for detection gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.equalizeHist( gray ) # Do face detection #faces = faceCascade.detectMultiScale(frame, 1.1, 3, 0, (10, 10)) # Slower method faces = faceCascade.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=4, minSize=(20, 20), flags=cv2.cv.CV_HAAR_SCALE_IMAGE | cv2.cv.CV_HAAR_FIND_BIGGEST_OBJECT | cv2.cv.CV_HAAR_DO_ROUGH_SEARCH ) print "Found {0} faces!".format(len(faces)) for (x, y, w, h) in faces: # Draw a green rectangle around the face cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2) # Track first face # Get the center of the face # Correct relative to center of image turn_x = float(x - (FRAME_W/2)) turn_y = float(y - (FRAME_H/2)) # Convert to percentage offset turn_x /= float(FRAME_W/2) turn_y /= float(FRAME_H/2) # Scale offset to degrees turn_x *= 2.5 # VFOV turn_y *= 2.5 # HFOV cam_pan += -turn_x cam_tilt += turn_y # Clamp Pan/Tilt to 0 to 180 degrees # cam_pan = max(0,min(180,cam_pan)) # cam_tilt = max(0,min(180,cam_tilt)) # Update the servos # pan(cam_pan) # tilt(cam_tilt) break # Display the image, with rectangle # on the Pi desktop #cv2.imshow('Video', frame) b,g,r = cv2.split(frame) img = cv2.merge((r,g,b)) im = Image.fromarray(img) img = ImageTk.PhotoImage(image=im) #img = ImageTk.PhotoImage(frame) panel = tk.Label(window, image = img) if cv2.waitKey(1) & 0xFF == ord('q'): break # When everything is done, release the capture video_capture.release() cv2.destroyAllWindows()