I'm trying to implement object detection Gazebo simulation enviroment. For this I use https://dev.px4.io/v1.9.0/en/simulation/gazebo.html this site. I'm receving video from QGroundControl. Also, I received video and detect object from USB Webcam via Python. Now, I want to receive gstreamer UDP video from python Opencv videocapture function. But it gives error:
File "gstreamer_try.py", line 120, in <module>
feed_dict={image_tensor: image_np_expanded})
File "/home/hanco/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/client/session.py", line 956, in run
run_metadata_ptr)
File "/home/hanco/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/client/session.py", line 1149, in _run
np_val = np.asarray(subfeed_val, dtype=subfeed_dtype)
File "/home/hanco/anaconda3/lib/python3.7/site-packages/numpy/core/_asarray.py", line 85, in asarray
return array(a, dtype, copy=False, order=order)
TypeError: int() argument must be a string, a bytes-like object or a number, not 'NoneType'
Firstly, I tried to write gstreamer sender and receiver some video:
gstreamer_sender.py:
import socket
import numpy as np
import cv2 as cv
addr = ("127.0.0.1", 5655)
buf = 512
width = 640
height = 480
cap = cv.VideoCapture("/home/hanco/Desktop/duckduck.mp4")
cap.set(3, width)
cap.set(3, height)
code = 'start'
code = ('start' + (buf - len(code)) * 'a').encode('utf-8')
if __name__ == '__main__':
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while(cap.isOpened()):
ret, frame = cap.read()
if ret:
s.sendto(code, addr)
data = frame.tostring()
for i in range(0, len(data), buf):
s.sendto(data[i:i+buf], addr)
# cv.imshow('send', frame)
# if cv.waitKey(1) & 0xFF == ord('q'):
# break
else:
break
# s.close()
# cap.release()
# cv.destroyAllWindows()
gstreamer_receiver.py:
import socket
import numpy as np
import cv2 as cv
addr = ("127.0.0.1", 5600)
buf = 512
width = 640
height = 480
code = b'start'
num_of_chunks = width * height * 3 / buf
if __name__ == '__main__':
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(addr)
chunk, _ = s.recvfrom(buf)
chunk.startswith(code)
start = True
while True:
chunks = []
while len(chunks) < num_of_chunks:
chunk, _ = s.recvfrom(buf)
if start:
chunks.append(chunk)
byte_frame = b''.join(chunks)
frame = np.frombuffer(byte_frame, dtype=np.uint8).reshape(width, height, 3)
cv.imshow('recv', frame)
if cv.waitKey(1) & 0xFF == ord('q'):
break
s.close()
cv.destroyAllWindows()
It works well and I realized that I can receive video from Gazebo by changing only port. Gazebo default port is 5600. But it didn't work.
I just want to implement here by using above code:
cap = cv2.VideoCapture(GSTREAMEAR_VIDEO_INPUT)
It gives error on this line:
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
Following code is running without any error.
# Read video
video = cv2.VideoCapture("udpsrc port=5600 ! application/x-rtp,payload=96,encoding-name=H264 ! rtpjitterbuffer mode=1 ! rtph264depay ! h264parse ! decodebin ! videoconvert ! appsink", cv2.CAP_GSTREAMER);