I am attempting to combine two scripts for getting facial recognition to work with a live video feed from my laptop web cam. I have one working opencv2 script that works with my webcam to view live footage and the other is a facial recognition script with the haar classifier on a jpeg still image. I am working with Python 3.6 IDE, open cv2. This script below works for viewing a live feed thru my laptop web camera.
import numpy as np
import cv2, time
video = cv2.VideoCapture(0)
a = 0
while True:
a = a + 1
check, frame = video.read()
print(check)
print(frame)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow("Capturing", gray)
cv2.waitKey(1)
key = cv2.waitKey(1)
if key == ord('q'):
break
print(a)
video.release()
And I got this script that draw a box around a face to work for the haar classifier with a function on a still .jpeg image. How would I go about combining these two scripts with using the haar classifier for facial recognition on live video feed? The haar classifier XML and jpeg are files on my local PC directory.
import cv2
import matplotlib.pyplot as plt
import time
def detect_faces(f_cascade, colored_img, scaleFactor = 1.1):
img_copy = colored_img.copy()
gray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
faces = f_cascade.detectMultiScale(gray, scaleFactor=scaleFactor, minNeighbors=5);
for (x, y, w, h) in faces:
cv2.rectangle(img_copy, (x, y), (x+w, y+h), (0, 255, 0), 2)
return img_copy
test2 = cv2.imread('C:/Python/opencv/sAndb.jpg')
haar_face_cascade = cv2.CascadeClassifier('C:/Python/opencv/opencv-master/opencv-master/data/haarcascades/haarcascade_frontalface_alt.xml')
faces_detected_img = detect_faces(haar_face_cascade, test2)
cv2.imshow('Faces', faces_detected_img)
Try this:
import numpy as np
import cv2, time
import matplotlib.pyplot as plt
haar_face_cascade = cv2.CascadeClassifier('C:/Python/opencv/opencv-master/opencv-master/data/haarcascades/haarcascade_frontalface_alt.xml')
video = cv2.VideoCapture(0)
a = 0
while True:
a = a + 1
check, frame = video.read()
print(check)
print(frame)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = f_cascade.detectMultiScale(gray, scaleFactor=scaleFactor, minNeighbors=5);
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)
cv2.imshow("Face Detector", frame)
cv2.waitKey(1)
key = cv2.waitKey(1)
if key == ord('q'):
break
print(a)
video.release()