I am relatively new to asking question on stack overflow, but I will do my best to explain the problem thoroughly.
I am currently using an Axis IP Camera to obtain live video to a CARMA board. GStreamer then takes these frames using an RTSP client, performs an RTP depayload, and then decodes the h.264 images that are being sent from the camera. When I perform this process on my computer (currently equipped with an i7 processor) there is no lag time and the stream is output to the screen in real time, updating at a rate of 30 Hz. The problem arises when I switch over to the CARMA board I am working on. Instead of displaying in real time, the appsink is receives buffers at a rate much slower than normal. More specifically, instead of receiving buffers at a rate of 30 Hz, it only receives buffers at a rate of about 10 Hz on average when no other processing is occurring on the CARMA board. It should also be noted that no frames are dropped; the appsink that is receiving buffers is receiving all buffers, but not in real time. Any insight as to why this is occurring is greatly appreciate. I have checked to ensure that the timestamps are not an issue as well (i.e. the rate at which the appsink receives a buffer is does not change if I am or am not using a GST timestamp). The CARMA board is currently using ubuntu 11.04 and using the GCC to compile. Below are some code snippets and their respective explanations.
Some definitions
#define APPSINK_CAPS "video/x-raw-yuv,format=(fourcc)I420"
#define RTSP_URI "rtsp://(ipaddress)/axis-media/media.amp?videocodec=h264"
#define RTSP_LATENCY 0
#define RTSP_BUFFER_MODE 0
#define RTSP_RTP_BLOCKSIZE 65536
GStreamer pipeline set-up code:
/* Initialize GStreamer */
gst_init (&argc, &argv);
/* Create the elements */
data.rtspsrc = gst_element_factory_make("rtspsrc", NULL);
data.rtph264depay = gst_element_factory_make("rtph264depay", NULL);
data.nv_omx_h264dec = gst_element_factory_make("nv_omx_h264dec", NULL);
data.appsink = gst_element_factory_make("appsink", NULL);
if (!data.rtspsrc || !data.rtph264depay || !data.nv_omx_h264dec || !data.appsink) {
g_printerr ("Not all elements could be created.\n");
return -1;
}
/* Set element properties */
g_object_set( data.rtspsrc, "location", RTSP_URI,
"latency", RTSP_LATENCY,
"buffer-mode", RTSP_BUFFER_MODE,
"rtp-blocksize", RTSP_RTP_BLOCKSIZE,
NULL);
g_object_set( data.rtph264depay, "byte-stream", FALSE, NULL);
g_object_set( data.nv_omx_h264dec, "use-timestamps", TRUE, NULL);
/* Configure appsink. This plugin will allow us to access buffer data */
GstCaps *appsink_caps;
appsink_caps = gst_caps_from_string (APPSINK_CAPS);
g_object_set (data.appsink, "emit-signals", TRUE,
"caps", appsink_caps,
NULL);
g_signal_connect (data.appsink, "new-buffer", G_CALLBACK (appsink_new_buffer), &data);
gst_caps_unref (appsink_caps);
/* Create the empty pipeline */
data.pipeline = gst_pipeline_new ("test-pipeline");
if (!data.pipeline) {
g_printerr ("Pipeline could not be created.");
}
/* Build the pipeline */
/* Note that we are NOT linking the source at this point. We will do it later. */
gst_bin_add_many (GST_BIN(data.pipeline),
data.rtspsrc,
data.rtph264depay,
data.nv_omx_h264dec,
data.appsink,
NULL);
if (gst_element_link (data.rtph264depay, data.nv_omx_h264dec) != TRUE) {
g_printerr ("rtph264depay and nv_omx_h264dec could not be linked.\n");
gst_object_unref (data.pipeline);
return -1;
}
if (gst_element_link (data.nv_omx_h264dec, data.appsink) != TRUE) {
g_printerr ("nv_omx_h264dec and appsink could not be linked.\n");
gst_object_unref (data.pipeline);
return -1;
}
/* Connect to the pad-added signal (CALLBACK!) */
g_signal_connect (data.rtspsrc, "pad-added", G_CALLBACK (pad_added_handler), &data);
/* Add a probe to perform hashing on H.264 bytestream */
GstPad *rtph264depay_src_pad = gst_element_get_static_pad (data.rtph264depay, "src");
(gulong) gst_pad_add_buffer_probe (rtph264depay_src_pad, G_CALLBACK (hash_and_report), (gpointer)(&data));
gst_object_unref (rtph264depay_src_pad); //unreference the source pad
/* Start playing */
ret = gst_element_set_state (data.pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (data.pipeline);
return -1;
}
/* Wait until error or EOS */
bus = gst_element_get_bus (data.pipeline);
do {
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE, (GstMessageType)(GST_MESSAGE_STATE_CHANGED | GST_MESSAGE_ERROR | GST_MESSAGE_EOS));
/* Parse message */
if (msg != NULL) {
GError *err;
gchar *debug_info;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_ERROR:
gst_message_parse_error (msg, &err, &debug_info);
g_printerr ("Error received from element %s: %s\n", GST_OBJECT_NAME (msg->src), err->message);
g_printerr ("Debugging information: %s\n", debug_info ? debug_info : "none");
g_clear_error (&err);
g_free (debug_info);
terminate = TRUE;
break;
case GST_MESSAGE_EOS:
g_print ("End-Of-stream reached.\n");
break;
case GST_MESSAGE_STATE_CHANGED:
/* We are only interested in state-changed messages from the pipeline */
if (GST_MESSAGE_SRC (msg) == GST_OBJECT (data.pipeline)) {
GstState old_state, new_state, pending_state;
gst_message_parse_state_changed (msg, &old_state, &new_state, &pending_state);
g_print ("Pipeline state changed from %s to %s:\n", gst_element_state_get_name (old_state), gst_element_state_get_name (new_state));
}
break;
default:
//we should not reach here because we only asked for ERRORs and EOS and State Changes
g_printerr ("Unexpected message received.\n");
break;
}
gst_message_unref (msg);
}
} while (!terminate);
Now the pad_added_handler:
/* This function will be called by the pad-added signal */
//Thread 1
static void pad_added_handler (GstElement *src, GstPad *new_pad, CustomData *data) {
GstPad *sink_pad = gst_element_get_static_pad (data->rtph264depay, "sink");
GstPadLinkReturn ret;
GstCaps *new_pad_caps = NULL;
GstStructure *new_pad_struct = NULL;
const gchar *new_pad_type = NULL;
g_print ("Received new pad '%s' from '%s':\n", GST_PAD_NAME (new_pad), GST_ELEMENT_NAME (src));
/* Check the new pad's type */
new_pad_caps = gst_pad_get_caps (new_pad);
new_pad_struct = gst_caps_get_structure (new_pad_caps, 0);
new_pad_type = gst_structure_get_name (new_pad_struct);
if (!g_str_has_prefix (new_pad_type, "application/x-rtp")) {
g_print (" It has type '%s' which is not RTP. Ignoring.\n", new_pad_type);
goto exit;
}
/* If our converter is already linked, we have nothing to do here */
if (gst_pad_is_linked (sink_pad)) {
g_print (" We are already linked. Ignoring.\n");
goto exit;
}
/* Attempt the link */
ret = gst_pad_link (new_pad, sink_pad);
if (GST_PAD_LINK_FAILED (ret)) {
g_print (" Type is '%s' but link failed.\n", new_pad_type);
} else {
g_print (" Link succeeded (type '%s').\n", new_pad_type);
}
exit:
/* Unreference the new pad's caps, if we got them */
if (new_pad_caps != NULL)
gst_caps_unref (new_pad_caps);
/* Unreference the sink pad */
gst_object_unref (sink_pad);
}
And now the appsink that is called every time the appsink receives a buffer. This is the function that I believe (though am not certain) is not receiving buffers at real time, leading me to believe that there is some kind of processing that I am doing that is causing too much time to pass before another buffer can be processed:
// Called when appsink receives a buffer: Thread 1
void appsink_new_buffer (GstElement *sink, CustomData *data) {
GstBuffer *buffer;
/* Retrieve the buffer */
g_signal_emit_by_name (sink, "pull-buffer", &buffer);
if (buffer) {
(((CustomData*)data)->appsink_buffer_count)++;
//push buffer onto queue, to be processed in different thread
if (GstBufferQueue->size() > GSTBUFFERQUEUE_SIZE) {
//error message
printf ("GstBufferQueue is full!\n");
//release buffer
gst_buffer_unref (buffer);
} else {
//push onto queue
GstBufferQueue->push(buffer);
//activate thread
connectionDataAvailable_GstBufferQueue.notify_all();
}
}
}
A link to the camera I am using:
http://www.axis.com/products/cam_p1357/index.htm
Hope this helps. I will continue to investigate this problem myself and provide updates as they come. Let me know if you need any other information and I look forward to reading your responses!
Thanks
So apparently the problem was not the program (i.e. the software design) but rather that the hardware components on the CARMA board were not able to keep up with the amount of processing that I was doing. In other words, the Tegra 3 processor on the CARMA was insufficient as a device. Possible solutions are to cut down the processing I am doing on the CARMA board or upgrade to a different board. I hope this helps people understand both that the limited processing that is available on smaller devices, but also to be aware that processors (specifically, in the category of Tegra 3 that implement the System on a Chip model) may not have currently have the computational power required to keep up with projects or systems that require large, real-time calculations.
To put it short, be careful what you buy! Do your best to ensure that what you are purchasing is right for the project! That being said, don't be scared to try new devices. Despite not being able to do what I wanted, I learned more than I could have ever expected. After all, computer science is just continuous learning :p