I've implemented object detection and drawing Contours. Now I want to count number of this object when crossing the line. if the y of the object > y of the line the count object but this counts multiple. In this case can anyone help how to avoid this ?
Link to video: video
file from link 11.mp4 - you can open directly using code below. Maybe this file cannot be open with some video players
import datetime
import math
import cv2
import numpy as np
from imutils.video import FPS
import imutils
#global variables
width = 0
height = 0
EntranceCounter = 0
ExitCounter = 0
MinCountourArea = 2000 #Adjust ths value according to your usage
BinarizationThreshold = 70 #Adjust ths value according to your usage
OffsetRefLines =100 #Adjust ths value according to your usage
already_counted = False
is_position_ok=False
#Check if an object in entering in monitored zone
def CheckEntranceLineCrossing(y, CoorYEntranceLine, CoorYExitLine):
AbsDistance = abs(y - CoorYEntranceLine)
if ((AbsDistance <= 2) and (y < CoorYExitLine)):
return 1
else:
return 0
#Check if an object in exitting from monitored zone
def CheckExitLineCrossing(y, CoorYEntranceLine, CoorYExitLine):
AbsDistance = abs(y - CoorYExitLine)
print("coorCenter: ",y,"distance: ",y - CoorYExitLine,"CoorYExitLine: ",CoorYExitLine,"CoorYEntranceLine: ",CoorYEntranceLine)
if ((AbsDistance <= 2) and (y > CoorYExitLine)):
return 1
else:
return 0
camera = cv2.VideoCapture("11.mp4")
fps = camera.get(cv2.CAP_PROP_FPS)
print("fps: ",fps)
#force 640x480 webcam resolution
# camera.set(3,640)
# camera.set(4,480)
ReferenceFrame = None
#The webcam maybe get some time / captured frames to adapt to ambience lighting. For this reason, some frames are grabbed and discarted.
for i in range(0,20):
(grabbed, Frame) = camera.read()
Frame = cv2.resize(Frame, (860, 540))
while True:
(grabbed, Frame) = camera.read()
Frame = cv2.resize(Frame, (860, 540))
Frame=Frame[0:540,300:860]
height = np.size(Frame,0)
width = int(np.size(Frame,1)/2)
#if cannot grab a frame, this program ends here.
if not grabbed:
break
#gray-scale convertion and Gaussian blur filter applying
GrayFrame = cv2.cvtColor(Frame, cv2.COLOR_BGR2GRAY)
GrayFrame = cv2.GaussianBlur(GrayFrame, (21, 21), 0)
if ReferenceFrame is None:
ReferenceFrame = GrayFrame
continue
#Background subtraction and image binarization
FrameDelta = cv2.absdiff(ReferenceFrame, GrayFrame)
FrameThresh = cv2.threshold(FrameDelta, BinarizationThreshold, 255, cv2.THRESH_BINARY)[1]
#Dilate image and find all the contours
FrameThresh = cv2.dilate(FrameThresh, None, iterations=2)
cnts, _ = cv2.findContours(FrameThresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
QttyOfContours = 0
#plot reference lines (entrance and exit lines)
CoorYEntranceLine = int((height / 2)-OffsetRefLines)
CoorYExitLine = int((height / 2)+OffsetRefLines)
cv2.line(Frame, (0,CoorYEntranceLine), (width,CoorYEntranceLine), (255, 0, 0), 2)
cv2.line(Frame, (0,CoorYExitLine), (width,CoorYExitLine), (0, 0, 255), 2)
#check all found countours
for c in cnts:
#print("new round")
#if a contour has small area, it'll be ignored
if cv2.contourArea(c) > MinCountourArea:
QttyOfContours = QttyOfContours+1
#draw an rectangle "around" the object
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(Frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
#find object's centroid
CoordXCentroid = int((x+x+w)/2)
CoordYCentroid = int((y+y+h)/2)
ObjectCentroid = (CoordXCentroid,CoordYCentroid)
cv2.circle(Frame, ObjectCentroid, 1, (0, 0, 0), 2)
if (CheckEntranceLineCrossing(CoordYCentroid,CoorYEntranceLine,CoorYExitLine)):
EntranceCounter += 1
# if (CheckExitLineCrossing(CoordYCentroid,CoorYEntranceLine,CoorYExitLine)):
# ExitCounter += 1
AbsDistance = abs(CoordYCentroid - CoorYExitLine)
if ((CoordYCentroid <CoorYExitLine) and (CoordXCentroid < width) and (AbsDistance)<=abs(CoordYCentroid-y)):
ExitCounter += 1
print("coorYCenter: ",CoordYCentroid,"distance: ",AbsDistance,"CoorYExitLine: ",CoorYExitLine,"width: ",width,"y: ",int(y))
continue
#print ("Total countours found: "+str(QttyOfContours))
#Write entrance and exit counter values on frame and shows it
cv2.putText(Frame, "Entrances: {}".format(str(EntranceCounter)), (10, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (250, 0, 1), 2)
cv2.putText(Frame, "Exits: {}".format(str(ExitCounter)), (10, 70),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.imshow("Original Frame", Frame)
#cv2.waitKey(1);
fps=int(fps)
if cv2.waitKey(fps) & 0xFF == ord('q'):
break
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()
There are multiple issues. First, There are a lot of false positive detections (workers, multiple fragments near the bags, etc.) and second, your algorithm may count the same detection multiple times.
One suggestion for a quick improvement is to implement a "backoff period" after a positive detection is made, i.e. a period of time where no other detection is counted after a positive one is made. I also suggest that you define a zone of detection rather than using a line.
The area should cover the total width of the end of the slope and its height should be high enough such as a given detection always falls within its bounds. This depends on the bag speed and the framerate of the video. This height should also not be too high in order to avoid detecting multiple times the same detection in consecutive times, even though the backoff strategy will eliminate some of this redundancy.
In the following algorithm I chose a backoff of 5 frames (approximately the time for a bag to entirely pass a given horizontal line) and a zone heigh of 40 (approximately half a bag in the worst case, with is sufficient given the framerate).
import cv2
import numpy as np
# global variables
width = 0
height = 0
EntranceCounter = 0
ExitCounter = 0
MinCountourArea = 2000 # Adjust ths value according to your usage
BinarizationThreshold = 70 # Adjust ths value according to your usage
OffsetRefLines = 100 # Adjust ths value according to your usage
already_counted = False
is_position_ok = False
backoff_frames = 6
backoff = 0
# Check if an object in exitting from monitored zone
def CheckExitLineCrossing(
y: float, x: float, CoorYExitLine: float, width: float, margin: int = 40
):
AbsDistance = abs(y - CoorYExitLine)
return (AbsDistance <= margin) and (y > CoorYExitLine) and (x < width)
camera = cv2.VideoCapture("11.mp4")
fps = camera.get(cv2.CAP_PROP_FPS)
print("fps: ", fps)
ReferenceFrame = None
for i in range(0, 20):
(grabbed, Frame) = camera.read()
Frame = cv2.resize(Frame, (860, 540))
while True:
(grabbed, Frame) = camera.read()
Frame = cv2.resize(Frame, (860, 540))
Frame = Frame[0:540, 300:860]
height = np.size(Frame, 0)
width = int(np.size(Frame, 1) / 2)
# if cannot grab a frame, this program ends here.
if not grabbed:
break
# gray-scale convertion and Gaussian blur filter applying
GrayFrame = cv2.cvtColor(Frame, cv2.COLOR_BGR2GRAY)
GrayFrame = cv2.GaussianBlur(GrayFrame, (21, 21), 0)
if ReferenceFrame is None:
ReferenceFrame = GrayFrame
continue
# Background subtraction and image binarization
FrameDelta = cv2.absdiff(ReferenceFrame, GrayFrame)
FrameThresh = cv2.threshold(
FrameDelta, BinarizationThreshold, 255, cv2.THRESH_BINARY
)[1]
# Dilate image and find all the contours
FrameThresh = cv2.dilate(FrameThresh, None, iterations=2)
cnts, _ = cv2.findContours(
FrameThresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
)
QttyOfContours = 0
# plot reference lines (entrance and exit lines)
CoorYEntranceLine = int((height / 2) - OffsetRefLines)
CoorYExitLine = int((height / 2) + OffsetRefLines)
cv2.line(Frame, (0, CoorYExitLine), (width, CoorYExitLine), (0, 0, 255), 2)
# check all found countours
for c in cnts:
# print("new round")
# if a contour has small area, it'll be ignored
if cv2.contourArea(c) > MinCountourArea:
QttyOfContours = QttyOfContours + 1
# draw an rectangle "around" the object
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(Frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
# find object's centroid
CoordXCentroid = int((x + x + w) / 2)
CoordYCentroid = int((y + y + h) / 2)
ObjectCentroid = (CoordXCentroid, CoordYCentroid)
cv2.circle(Frame, ObjectCentroid, 1, (0, 0, 0), 2)
if CheckExitLineCrossing(
CoordYCentroid, CoordXCentroid, width, CoorYExitLine
):
if backoff == 0:
ExitCounter += 1
backoff = backoff_frames
break
if backoff > 0:
backoff -= 1
cv2.putText(
Frame,
"Exits: {}".format(str(ExitCounter)),
(10, 70),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(0, 0, 255),
2,
)
cv2.imshow("Original Frame", Frame)
# cv2.waitKey(1);
fps = int(fps)
if cv2.waitKey(fps) & 0xFF == ord("q"):
break
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()
This eliminates almost all the false positives and negatives of the detection. I further suggest you to implement a better detection algorithm and to use some sort of tracking (Kalman filter, etc.) to improve the accuracy further.
Note that in the for-loop over the detections, you should break
instead of continue
because it is (I believe) not possible that two bags may be detected in the same frame, i.e. at the same time.