I am trying to stream opencv frames to the browser. Upon research, i came across Miguel's tutorial: https://blog.miguelgrinberg.com/post/video-streaming-with-flask/page/10
Let me break down what I'm trying to achieve: on the home page, I'm trying to stream opencv frames with opencv in real time and on another page, I need to use the webcam to take a picture.
Problem: using Miguel's way of streaming to the browser, starts an infinite thread, in this case, does not release the camera when I want to take a picture on the other page. Switching back to the home page, I get this error:
VIDEOIO ERROR: V4L2: Pixel format of incoming image is unsupported by OpenCV
Unable to stop the stream: Device or resource busy
video stream started
OpenCV(3.4.1) Error: Assertion failed (scn == 3 || scn == 4) in cvtColor, file /home/eli/cv/opencv-3.4.1/modules/imgproc/src/color.cpp, line 11115
Debugging middleware caught exception in streamed response at a point where response headers were already sent.
Here's my code:
detect_face_video.py
This is where I perform the face recognition
# import the necessary packages
from imutils.video import VideoStream
import face_recognition
import argparse
import imutils
import pickle
import time
import cv2
from flask import Flask, render_template, Response
import sys
import numpy
from app.cv_func import draw_box
import redis
import datetime
from app.base_camera import BaseCamera
import os
global red
red = redis.StrictRedis(host='localhost', port=6379, db=0, decode_responses=True)
class detect_face:
def gen(self):
i=1
while i<10:
yield (b'--frame\r\n'
b'Content-Type: text/plain\r\n\r\n'+str(i)+b'\r\n')
i+=1
def get_frame(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
# load the known faces and embeddings
print("[INFO] loading encodings...")
"rb").read())
data = pickle.loads(open("%s/encode.pickle"%dir_path, "rb").read())
# initialize the video stream and pointer to output video file, then
# allow the camera sensor to warm up
print("[INFO] starting video stream...")
try:
vs = VideoStream(src=1).start()
except Exception as ex:
vs.release()
print("video stream started")
# loop over frames from the video file stream
i=1
counter = 1
while True:
# grab the frame from the threaded video stream
try:
frame = vs.read()
except Exception as ex:
print("an error occured here")
print(ex)
# finally:
continue
# convert the input frame from BGR to RGB then resize it to have
# a width of 750px (to speedup processing)
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
rgb = imutils.resize(frame, width=450, height=400)
r = frame.shape[1] / float(rgb.shape[1])
# detect the (x, y)-coordinates of the bounding boxes
# corresponding to each face in the input frame, then compute
# the facial embeddings for each face
boxes = face_recognition.face_locations(rgb,
model="hog")
# boxes = face_recognition.face_locations(rgb,
# model=args["detection_method"])
encodings = face_recognition.face_encodings(rgb, boxes)
names = []
# loop over the facial embeddings
for encoding in encodings:
print(encoding)
# attempt to match each face in the input image to our known
# encodings
matches = face_recognition.compare_faces(data["encodings"],
encoding)
# matches = face_recognition.compare_faces(data["encodings"],
# encoding)
name = "Unknown"
# check to see if we have found a match
if True in matches:
# find the indexes of all matched faces then initialize a
# dictionary to count the total number of times each face
# was matched
matchedIdxs = [i for (i, b) in enumerate(matches) if b]
counts = {}
# loop over the matched indexes and maintain a count for
# each recognized face face
for i in matchedIdxs:
name = data["names"][i]
counts[name] = counts.get(name, 0) + 1
# determine the recognized face with the largest number
# of votes (note: in the event of an unlikely tie Python
# will select first entry in the dictionary)
name = max(counts, key=counts.get)
# update the list of names
names.append(name)
red.set('currentName', name)
# self.create_report(name, counter)
# f = open("tester.txt", 'w+')
key='StudentName%d'%counter
if(name != 'Unknown'):
red.set(key,name)
red.set('counter', counter)
counter+=1
# loop over the recognized faces
for ((top, right, bottom, left), name) in zip(boxes, names):
# rescale the face coordinates
top = int(top * r)
right = int(right * r)
bottom = int(bottom * r)
left = int(left * r)
# print("top: %d right: %d bottom: %d left: %d"%(top,right,bottom,left))
# print("top_: %d right_: %d bottom_: %d left_: %d"%(top_,right_,bottom_,left_))
# draw the predicted face name on the image
cv2.rectangle(frame, (left, top), (right, bottom),
(0, 255, 0), 2)
# draw_box(frame, int(left/2), int(top/2), int(right/2), int(bottom/2))
y = top - 15 if top - 15 > 15 else top + 15
cv2.putText(frame, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX,
0.75, (0, 255, 0), 2)
imgencode=cv2.imencode('.jpg',frame)[1]
stringData = imgencode.tostring()
yield(b'--frame\r\n'
b'Content-Type: text/plain\r\n\r\n'+stringData+b'\r\n')
i+=1
del(vs)
cv2.destroyAllWindows()
vs.stop()
And the routes file(i only pasted the important sections): routes.py
from flask import Flask, render_template, request,Response,jsonify,make_response
from app.detect_face_video import detect_face
detect = detect_face()
@app.route('/')
def index():
return render_template('index.html')
def get_frame_():
detect.gen()
detect.get_frame()
@app.route('/calc')
def calc():
#This function displays the video streams in the webpage
# detect.vs.stop()
return Response(detect.get_frame(),mimetype='multipart/x-mixed-replace; boundary=frame')
How can i stop-or say pause- the streaming anytime i leave that page(the home page)?
If you're looking for faster and more robust plus simpler way to stream frames to Browser then, you can use my VidGear Python Library's WebGear, which is a powerful ASGI Video-streamer API, built upon Starlette - a lightweight ASGI async framework/toolkit.
# install VidGear
python3 -m pip install vidgear[asyncio]
Then you can use this complete python example which runs video server at address http://<host-machine ip>:8000/
on any browser on the network, in just a few lines of code:
# import required libraries
import uvicorn
from vidgear.gears.asyncio import WebGear
# various performance tweaks
options = {
"frame_size_reduction": 40,
"jpeg_compression_quality": 80,
"jpeg_compression_fastdct": True,
"jpeg_compression_fastupsample": False,
}
# initialize WebGear app
web = WebGear(source="foo.mp4", logging=True, **options)
# run this app on Uvicorn server at address http://localhost:8000/
uvicorn.run(web(), host="0.0.0.0", port=8000)
# close app safely
web.shutdown()
If still get some error, raise an issue here in its GitHub repo.