Restarting pipeline on Deepstream2.0

Hi,

I am trying to add the ability to change source video in the deepstream-test1 sample of Deepstream2.0.

Basically I am doing the following :

In the bus message handling function (bus_call), on receiving EOS message :

  • change pipeline state to NULL/READY :
    gst_element_set_state (pipeline, GST_STATE_READY);
  • set new “location” property of the “filesrc” element
    g_object_set (G_OBJECT (source), “location”, new_video, NULL);
  • change pipeline state to PLAYING
    gst_element_set_state (pipeline, GST_STATE_PLAYING);

I expect the pipeline to continue execution and process the new file

The idea is to process video files that are continuously sent by some external application without reinitializing/reloading the classifier (which takes a lot of time)

When the pipeline state is changed to PLAYING I receive the following error :

==================================================
reformat.cu (769) - Cuda Error in NCHWToNCQHW4: 11
reformat.cu (769) - Cuda Error in NCHWToNCQHW4: 11
deepstream-test-new-app: nvll_infer.cpp:1190: bool GIE_Context::caffeToGIEModel(): Assertion `engine’ failed.

Can you propose me a solution/workaround for the described case ?

Thanks, Simon.

Hi simon,
Please share patch to the sample so that we can reproduce it and check further.

This is the code that reproduces the error - basically your “test1” example with a small number of changes (marked with CHANGED) :

==========================================================================================================
/*

  • Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
  • NVIDIA Corporation and its licensors retain all intellectual property
  • and proprietary rights in and to this software, related documentation
  • and any modifications thereto. Any use, reproduction, disclosure or
  • distribution of this software and related documentation without an express
  • license agreement from NVIDIA Corporation is strictly prohibited.

*/

#include <stdio.h>
#include <gst/gst.h>
#include <glib.h>

#include “gstnvdsmeta.h”

#define MAX_DISPLAY_LEN 64

#define PGIE_CLASS_ID_VEHICLE 0
#define PGIE_CLASS_ID_PERSON 2

gint frame_number = 0;
gchar pgie_classes_str[4][32] = { “Vehicle”, “TwoWheeler”, “Person”,
“Roadsign”
};

// CHANGED
GstElement *pipeline = NULL;

/* osd_sink_pad_buffer_probe will extract metadata received on OSD sink pad

  • and update params for drawing rectangle, object information etc. */

static GstPadProbeReturn
osd_sink_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info,
gpointer u_data)
{

GstMeta *gst_meta = NULL;
NvDsMeta *nvdsmeta = NULL;
gpointer state = NULL;
static GQuark _nvdsmeta_quark = 0;
GstBuffer *buf = (GstBuffer *) info->data;
NvDsFrameMeta *frame_meta = NULL;
guint num_rects = 0, rect_index = 0, l_index = 0;
NvDsObjectParams *obj_meta = NULL;
guint i = 0;
NvOSD_TextParams *txt_params = NULL;
guint vehicle_count = 0;
guint person_count = 0;

if (!_nvdsmeta_quark)
_nvdsmeta_quark = g_quark_from_static_string (NVDS_META_STRING);

while ((gst_meta = gst_buffer_iterate_meta (buf, &state))) {
if (gst_meta_api_type_has_tag (gst_meta->info->api, _nvdsmeta_quark)) {

  nvdsmeta = (NvDsMeta *) gst_meta;

  /* We are interested only in intercepting Meta of type
   * "NVDS_META_FRAME_INFO" as they are from our infer elements. */
  if (nvdsmeta->meta_type == NVDS_META_FRAME_INFO) {
    frame_meta = (NvDsFrameMeta *) nvdsmeta->meta_data;
    if (frame_meta == NULL) {
      g_print ("NvDS Meta contained NULL meta \n");
      return GST_PAD_PROBE_OK;
    }

    /* We reset the num_strings here as we plan to iterate through the
     *  the detected objects and form our own strings.
     *  The pipeline generated strings shall be discarded.
     */
    frame_meta->num_strings = 0;

    num_rects = frame_meta->num_rects;

    /* This means we have num_rects in frame_meta->obj_params,
     * now lets iterate through them */

    for (rect_index = 0; rect_index < num_rects; rect_index++) {
      /* Now using above information we need to form a text that should
       * be displayed on top of the bounding box, so lets form it here. */

      obj_meta = (NvDsObjectParams *) & frame_meta->obj_params[rect_index];

      txt_params = &(obj_meta->text_params);
      if (txt_params->display_text)
        g_free (txt_params->display_text);

      txt_params->display_text = g_malloc0 (MAX_DISPLAY_LEN);

      g_snprintf (txt_params->display_text, MAX_DISPLAY_LEN, "%s ",
          pgie_classes_str[obj_meta->class_id]);

      if (obj_meta->class_id == PGIE_CLASS_ID_VEHICLE)
        vehicle_count++;
      if (obj_meta->class_id == PGIE_CLASS_ID_PERSON)
        person_count++;

      /* Now set the offsets where the string should appear */
      txt_params->x_offset = obj_meta->rect_params.left;
      txt_params->y_offset = obj_meta->rect_params.top - 25;

      /* Font , font-color and font-size */
      txt_params->font_params.font_name = "Arial";
      txt_params->font_params.font_size = 10;
      txt_params->font_params.font_color.red = 1.0;
      txt_params->font_params.font_color.green = 1.0;
      txt_params->font_params.font_color.blue = 1.0;
      txt_params->font_params.font_color.alpha = 1.0;

      /* Text background color */
      txt_params->set_bg_clr = 1;
      txt_params->text_bg_clr.red = 0.0;
      txt_params->text_bg_clr.green = 0.0;
      txt_params->text_bg_clr.blue = 0.0;
      txt_params->text_bg_clr.alpha = 1.0;

      frame_meta->num_strings++;
    }
  }
}

}
g_print ("Frame Number = %d Number of objects = %d "
“Vehicle Count = %d Person Count = %d\n”,
frame_number, num_rects, vehicle_count, person_count);
frame_number++;

return GST_PAD_PROBE_OK;
}

static gboolean
bus_call (GstBus * bus, GstMessage * msg, gpointer data)
{
GMainLoop *loop = (GMainLoop *) data;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_EOS:
g_print (“End of stream\n”);
// CHANGED
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_element_set_state (pipeline, GST_STATE_PLAYING);
// g_main_loop_quit (loop);
break;
case GST_MESSAGE_ERROR:{
gchar *debug;
GError *error;
gst_message_parse_error (msg, &error, &debug);
g_printerr (“ERROR from element %s: %s\n”,
GST_OBJECT_NAME (msg->src), error->message);
g_free (debug);
g_printerr (“Error: %s\n”, error->message);
g_error_free (error);
g_main_loop_quit (loop);
break;
}
default:
break;
}
return TRUE;
}

int
main (int argc, char *argv)
{
GMainLoop *loop = NULL;
GstElement *source = NULL, *h264parser =
NULL,
*decoder = NULL, *sink = NULL, *pgie = NULL, *nvvidconv =
NULL, *nvosd = NULL, *filter1 = NULL, *filter2 = NULL;
GstBus *bus = NULL;
guint bus_watch_id;
GstCaps *caps1 = NULL, *caps2 = NULL;
gulong osd_probe_id = 0;
GstPad *osd_sink_pad = NULL;

/* Check input arguments */
if (argc != 2) {
g_printerr (“Usage: %s \n”, argv[0]);
return -1;
}

/* Standard GStreamer initialization */
gst_init (&argc, &argv);
loop = g_main_loop_new (NULL, FALSE);

/* Create gstreamer elements /
/
Create Pipeline element that will form a connection of other elements */
pipeline = gst_pipeline_new (“dstest1-pipeline”);

/* Source element for reading from the file */
source = gst_element_factory_make (“filesrc”, “file-source”);

/* Since the data format in the input file is elementary h264 stream,

  • we need a h264parser */
    h264parser = gst_element_factory_make (“h264parse”, “h264-parser”);

/* Use nvdec_h264 for hardware accelerated decode on GPU */
decoder = gst_element_factory_make (“nvdec_h264”, “nvh264-decoder”);

/* Use nvinfer to run inferencing on decoder’s output,

  • behaviour of inferencing is set through config file */
    pgie = gst_element_factory_make (“nvinfer”, “primary-nvinference-engine”);

/* Use convertor to convert from NV12 to RGBA as required by nvosd */
nvvidconv = gst_element_factory_make (“nvvidconv”, “nvvideo-converter”);

/* Create OSD to draw on the converted RGBA buffer */
nvosd = gst_element_factory_make (“nvosd”, “nv-onscreendisplay”);

/* Finally render the osd output */
// CHANGED : sink = gst_element_factory_make (“nveglglessink”, “nvvideo-renderer”);
sink = gst_element_factory_make (“fakesink”, “fakesink-output”);

/* caps filter for nvvidconv to convert NV12 to RGBA as nvosd expects input

  • in RGBA format */
    filter1 = gst_element_factory_make (“capsfilter”, “filter1”);
    filter2 = gst_element_factory_make (“capsfilter”, “filter2”);
    if (!pipeline || !source || !h264parser || !decoder || !pgie
    || !filter1 || !nvvidconv || !filter2 || !nvosd || !sink) {
    g_printerr (“One element could not be created. Exiting.\n”);
    return -1;
    }

/* we set the input filename to the source element */
g_object_set (G_OBJECT (source), “location”, argv[1], NULL);

/* Set all the necessary properties of the nvinfer element,

  • the necessary ones are : */
    g_object_set (G_OBJECT (pgie),
    “config-file-path”, “dstest1_pgie_config.txt”, NULL);

/* we set the osd properties here */
g_object_set (G_OBJECT (nvosd), “font-size”, 15, NULL);

/* we add a message handler */
bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
gst_object_unref (bus);

/* Set up the pipeline /
/
we add all elements into the pipeline */
gst_bin_add_many (GST_BIN (pipeline),
source, h264parser, decoder, pgie,
filter1, nvvidconv, filter2, nvosd, sink, NULL);
caps1 = gst_caps_from_string (“video/x-raw(memory:NVMM), format=NV12”);
g_object_set (G_OBJECT (filter1), “caps”, caps1, NULL);
gst_caps_unref (caps1);
caps2 = gst_caps_from_string (“video/x-raw(memory:NVMM), format=RGBA”);
g_object_set (G_OBJECT (filter2), “caps”, caps2, NULL);
gst_caps_unref (caps2);

/* we link the elements together /
/
file-source → h264-parser → nvh264-decoder →

  • nvinfer → filter1 → nvvidconv → filter2 → nvosd → video-renderer */

//gst_element_link_many (source, h264parser, decoder, pgie, filter1,
// nvvidconv, filter2, nvosd, sink, NULL);
gst_element_link_many (source, h264parser, decoder, pgie, filter1,nvvidconv, filter2, nvosd, sink, NULL);

/* Lets add probe to get informed of the meta data generated, we add probe to

  • the sink pad of the osd element, since by that time, the buffer would have
  • had got all the metadata. */

osd_sink_pad = gst_element_get_static_pad (nvosd, “sink”);
if (!osd_sink_pad)
g_print (“Unable to get sink pad\n”);
else
osd_probe_id = gst_pad_add_probe (osd_sink_pad, GST_PAD_PROBE_TYPE_BUFFER,
osd_sink_pad_buffer_probe, NULL, NULL);

/* Set the pipeline to “playing” state */
g_print (“Now playing: %s\n”, argv[1]);
gst_element_set_state (pipeline, GST_STATE_PLAYING);

/* Wait till pipeline encounters an error or EOS */
g_print (“Running…\n”);
g_main_loop_run (loop);

/* Out of the main loop, clean up nicely */
g_print (“Returned, stopping playback\n”);
gst_element_set_state (pipeline, GST_STATE_NULL);
g_print (“Deleting pipeline\n”);
gst_object_unref (GST_OBJECT (pipeline));
g_source_remove (bus_watch_id);
g_main_loop_unref (loop);
return 0;

}

Hi simon,
For your case, you can try multifilesrc
https://gstreamer.freedesktop.org/data/doc/gstreamer/head/gst-plugins-good/html/gst-plugins-good-plugins-multifilesrc.html

Thanks.

This is a bit better than using “filesrc”, but still is not a feasible solution :

  • I do not know all the files/URIs in advance - they are continuously produced by an external source which I can not control
  • I will not always have control of the file names

As I understood the GStreamer design, any pipeline can be paused/stopped/restarted. Do NVIDIA plugins for gstreamer support this ?

Simon.

Hi simon,
Please re-init the pipeline as below patch:

diff --git a/apps/deepstream-test1/deepstream_test1_app.c b/apps/deepstream-test1/deepstream_test1_app.c
index 434b9ee..28accb5 100644
--- a/apps/deepstream-test1/deepstream_test1_app.c
+++ b/apps/deepstream-test1/deepstream_test1_app.c
@@ -174,6 +174,9 @@ main (int argc, char *argv[])
 
   /* Standard GStreamer initialization */
   gst_init (&argc, &argv);
+
+  for (guint i = 0; i < 2; i++) {
+
   loop = g_main_loop_new (NULL, FALSE);
 
   /* Create gstreamer elements */
@@ -272,6 +275,9 @@ main (int argc, char *argv[])
   gst_object_unref (GST_OBJECT (pipeline));
   g_source_remove (bus_watch_id);
   g_main_loop_unref (loop);
+
+  }
+
   return 0;

Hi,

This does allows to run the pipeline twice, but still does not solve the problem.

Your solution basically runs the same code twice, while my question was - how can I run the pipeline twice, so that after the end of the first run I can change the source file and during the second run there is no neural network initialization. Your patch only solves the first part.

Hi simon,
Another suggestion is to run ‘appsrc ! h264parse ! nvdec_h264 ! …’ and do file open/read/close in appsrc.

1 Like
{
    data = &pipeline->multi_src_bin.sub_bins[i];
    seek_decode(data);
}


static gboolean
seek_decode (gpointer data)
{
  NvDsSrcBin *bin = (NvDsSrcBin *) data;
  gboolean ret;

  gst_element_set_state (bin->bin, GST_STATE_PAUSED);

  gst_pad_send_event (gst_element_get_static_pad (bin->tee, "sink"), gst_event_new_flush_start ());
  gst_pad_send_event (gst_element_get_static_pad (bin->tee, "sink"), gst_event_new_flush_stop (TRUE));

  ret = gst_element_seek (bin->bin, 1.0, GST_FORMAT_TIME,
      (GstSeekFlags) (GST_SEEK_FLAG_KEY_UNIT | GST_SEEK_FLAG_FLUSH), GST_SEEK_TYPE_SET, 0,
      GST_SEEK_TYPE_NONE, GST_CLOCK_TIME_NONE);


  gst_element_set_state (bin->bin, GST_STATE_PLAYING);

done:
  return FALSE;
}

Can this meet your reqirement?

I can try it, but can you elaborate on the solution ? It looks like a part of some code and it is hard to understand without the rest of it.

What is NvDsSrcBin ?
When should the “seek_decode” be called ?

Thanks, Simon.

typedef struct
{
  GstElement *bin;
  GstElement *src_elem;
  GstElement *cap_filter;
  GstElement *enc_que;
  GstElement *dec_que;
  GstElement *decodebin;
  GstElement *filesink;
  GstElement *enc_filter;

  GstElement *encbin_que;
  GstElement *tee;
  GstElement *fakesink_queue;
  GstElement *fakesink;
  GMutex bin_lock;
  guint bin_id;
  gulong src_buffer_probe;
  gpointer bbox_meta;
  GstBuffer *inbuf;
  gchar *location;
  gchar *file;
  gchar *direction;
  gint latency;
  gboolean got_key_frame;
  gboolean eos_done;
  gboolean reset_done;
  gboolean live_source;
  gboolean reconfiguring;
} NvDsSrcBin;

You can call “seek_decode” at GST_MESSAGE_EOS

This code is for file-loop and from deepstream-app source code which we will release in next version.

Thanks.

Can you suggest at what point in the proposed code can I change the “location” property of the “filesrc” ? Because it can not be changed in the “PAUSED” state.

Also, you are sending the flush start/stop events to the “tee” element of the pipeline, which is not in the test1-app. Should I send it to the element which is connected to “filesrc” ?

Thanks, Simon.

I have tried the following :

======================================
static gboolean
bus_call (GstBus * bus, GstMessage * msg, gpointer data)
{
GMainLoop *loop = (GMainLoop *) data;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_EOS:
g_print (“End of stream\n”);

  // Changing source
  gst_element_set_state (pipeline, GST_STATE_PAUSED);

  gst_pad_send_event (gst_element_get_static_pad (source, "src"), gst_event_new_flush_start ());
  gst_pad_send_event (gst_element_get_static_pad (source, "src"), gst_event_new_flush_stop (TRUE));

  gst_element_set_state (source, GST_STATE_READY);
  g_object_set (G_OBJECT (source), "location", "../../../samples/streams/sample_720p.h264", NULL);
  gst_element_set_state (source, GST_STATE_PAUSED);

  gboolean ret = gst_element_seek (pipeline, 1.0, GST_FORMAT_TIME,
      (GstSeekFlags) (GST_SEEK_FLAG_KEY_UNIT | GST_SEEK_FLAG_FLUSH), GST_SEEK_TYPE_SET, 0,
      GST_SEEK_TYPE_NONE, GST_CLOCK_TIME_NONE);

  gst_element_set_state (pipeline, GST_STATE_PLAYING);
  g_print("Restarted\n");

=====================

This fails with the following error :

(deepstream-test1-app:36420): GStreamer-CRITICAL **: getrange on pad file-source:src but it was not activated in pull mode
ERROR from element h264-parser: Internal data stream error.
Error: Internal data stream error.
Returned, stopping playback
Deleting pipeline

The below is my test1 diff. It can work.

diff --git a/apps/deepstream-test1/deepstream_test1_app.c b/apps/deepstream-test1/deepstream_test1_app.c
index 03133b4…f136769 100755
— a/apps/deepstream-test1/deepstream_test1_app.c
+++ b/apps/deepstream-test1/deepstream_test1_app.c
@@ -125,6 +125,32 @@ osd_sink_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info,
return GST_PAD_PROBE_OK;
}

+static GstElement* pPipeline = NULL;
+
+static gboolean seek_pipeline (gpointer data) {

  • GstElement *bin = (GstElement *) data;
  • gboolean ret;
  • g_print(“%s line%d\n”, FUNCTION, LINE);
  • gst_element_set_state (bin, GST_STATE_PAUSED);
  • //gst_pad_send_event (gst_element_get_static_pad (bin, “sink”), gst_event_new_flush_start ());
  • //gst_pad_send_event (gst_element_get_static_pad (bin, “sink”), gst_event_new_flush_stop (TRUE));
  • ret = gst_element_seek (bin, 1.0, GST_FORMAT_TIME,
  •  (GstSeekFlags) (GST_SEEK_FLAG_KEY_UNIT | GST_SEEK_FLAG_FLUSH), GST_SEEK_TYPE_SET, 0,
    
  •  GST_SEEK_TYPE_NONE, GST_CLOCK_TIME_NONE);
    
  • gst_element_set_state (bin, GST_STATE_PLAYING);
  • g_print(“%s line%d\n”, FUNCTION, LINE);

+done:

  • return FALSE;
    +}

static gboolean
bus_call (GstBus * bus, GstMessage * msg, gpointer data)
{
@@ -132,7 +158,8 @@ bus_call (GstBus * bus, GstMessage * msg, gpointer data)
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_EOS:
g_print (“End of stream\n”);

  •  g_main_loop_quit (loop);
    
  •  seek_pipeline(pPipeline);
    
  •  //g_main_loop_quit (loop);
     break;
    

    case GST_MESSAGE_ERROR:{
    gchar *debug;
    @@ -152,6 +179,7 @@ bus_call (GstBus * bus, GstMessage * msg, gpointer data)
    return TRUE;
    }

int
main (int argc, char *argv)
{
@@ -251,6 +279,9 @@ main (int argc, char *argv)
* the sink pad of the osd element, since by that time, the buffer would have
* had got all the metadata. */
osd_sink_pad = gst_element_get_static_pad (nvosd, “sink”);
+

  • pPipeline = pipeline;
  • if (!osd_sink_pad)
    g_print (“Unable to get sink pad\n”);
    else

I think the missing part in your code, that causes the problem, is the part where the “location” property of “filesrc” is changed. Below is the code which reproduced the error (with the additions you have proposed) - the interesting part is in the bus_call() function. As I’ve mentioned above, the code fails with the following error :


(deepstream-test1-app:36420): GStreamer-CRITICAL **: getrange on pad file-source:src but it was not activated in pull mode
ERROR from element h264-parser: Internal data stream error.
Error: Internal data stream error.
Returned, stopping playback
Deleting pipeline

========================
/*

  • Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
  • NVIDIA Corporation and its licensors retain all intellectual property
  • and proprietary rights in and to this software, related documentation
  • and any modifications thereto. Any use, reproduction, disclosure or
  • distribution of this software and related documentation without an express
  • license agreement from NVIDIA Corporation is strictly prohibited.

*/

#include <stdio.h>
#include <gst/gst.h>
#include <glib.h>

#include “gstnvdsmeta.h”

#define MAX_DISPLAY_LEN 64

#define PGIE_CLASS_ID_VEHICLE 0
#define PGIE_CLASS_ID_PERSON 2

gint frame_number = 0;
gchar pgie_classes_str[4][32] = { “Vehicle”, “TwoWheeler”, “Person”,
“Roadsign”
};

// CHANGED
static GstElement *pipeline = NULL;
static GstElement *source = NULL;
static GstElement *h264parser = NULL;

/* osd_sink_pad_buffer_probe will extract metadata received on OSD sink pad

  • and update params for drawing rectangle, object information etc. */

static GstPadProbeReturn
osd_sink_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info,
gpointer u_data)
{

GstMeta *gst_meta = NULL;
NvDsMeta *nvdsmeta = NULL;
gpointer state = NULL;
static GQuark _nvdsmeta_quark = 0;
GstBuffer *buf = (GstBuffer *) info->data;
NvDsFrameMeta *frame_meta = NULL;
guint num_rects = 0, rect_index = 0, l_index = 0;
NvDsObjectParams *obj_meta = NULL;
guint i = 0;
NvOSD_TextParams *txt_params = NULL;
guint vehicle_count = 0;
guint person_count = 0;

if (!_nvdsmeta_quark)
_nvdsmeta_quark = g_quark_from_static_string (NVDS_META_STRING);

while ((gst_meta = gst_buffer_iterate_meta (buf, &state))) {
if (gst_meta_api_type_has_tag (gst_meta->info->api, _nvdsmeta_quark)) {

  nvdsmeta = (NvDsMeta *) gst_meta;

  /* We are interested only in intercepting Meta of type
   * "NVDS_META_FRAME_INFO" as they are from our infer elements. */
  if (nvdsmeta->meta_type == NVDS_META_FRAME_INFO) {
    frame_meta = (NvDsFrameMeta *) nvdsmeta->meta_data;
    if (frame_meta == NULL) {
      g_print ("NvDS Meta contained NULL meta \n");
      return GST_PAD_PROBE_OK;
    }

    /* We reset the num_strings here as we plan to iterate through the
     *  the detected objects and form our own strings.
     *  The pipeline generated strings shall be discarded.
     */
    frame_meta->num_strings = 0;

    num_rects = frame_meta->num_rects;

    /* This means we have num_rects in frame_meta->obj_params,
     * now lets iterate through them */

    for (rect_index = 0; rect_index < num_rects; rect_index++) {
      /* Now using above information we need to form a text that should
       * be displayed on top of the bounding box, so lets form it here. */

      obj_meta = (NvDsObjectParams *) & frame_meta->obj_params[rect_index];

      txt_params = &(obj_meta->text_params);
      if (txt_params->display_text)
        g_free (txt_params->display_text);

      txt_params->display_text = g_malloc0 (MAX_DISPLAY_LEN);

      g_snprintf (txt_params->display_text, MAX_DISPLAY_LEN, "%s ",
          pgie_classes_str[obj_meta->class_id]);

      if (obj_meta->class_id == PGIE_CLASS_ID_VEHICLE)
        vehicle_count++;
      if (obj_meta->class_id == PGIE_CLASS_ID_PERSON)
        person_count++;

      /* Now set the offsets where the string should appear */
      txt_params->x_offset = obj_meta->rect_params.left;
      txt_params->y_offset = obj_meta->rect_params.top - 25;

      /* Font , font-color and font-size */
      txt_params->font_params.font_name = "Arial";
      txt_params->font_params.font_size = 10;
      txt_params->font_params.font_color.red = 1.0;
      txt_params->font_params.font_color.green = 1.0;
      txt_params->font_params.font_color.blue = 1.0;
      txt_params->font_params.font_color.alpha = 1.0;

      /* Text background color */
      txt_params->set_bg_clr = 1;
      txt_params->text_bg_clr.red = 0.0;
      txt_params->text_bg_clr.green = 0.0;
      txt_params->text_bg_clr.blue = 0.0;
      txt_params->text_bg_clr.alpha = 1.0;

      frame_meta->num_strings++;
    }
  }
}

}

if (frame_number % 100 == 0) {
g_print ("Frame Number = %d Number of objects = %d "
“Vehicle Count = %d Person Count = %d\n”,
frame_number, num_rects, vehicle_count, person_count);
}
frame_number++;

return GST_PAD_PROBE_OK;
}

static gboolean
bus_call (GstBus * bus, GstMessage * msg, gpointer data)
{
GMainLoop *loop = (GMainLoop *) data;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_EOS:
g_print (“End of stream\n”);
// CHANGED
gst_element_set_state (pipeline, GST_STATE_PAUSED);

  gst_pad_send_event (gst_element_get_static_pad (source, "src"), gst_event_new_flush_start ());
  gst_pad_send_event (gst_element_get_static_pad (source, "src"), gst_event_new_flush_stop (TRUE));

  // Changing "location" - we have to change the state of "filesrc" to READY/NULL
  gst_element_set_state (source, GST_STATE_READY);
  g_object_set (G_OBJECT (source), "location", "../../../samples/streams/sample_720p.h264", NULL);
  gst_element_set_state (source, GST_STATE_PAUSED);

  gboolean ret = gst_element_seek (pipeline, 1.0, GST_FORMAT_TIME,
      (GstSeekFlags) (GST_SEEK_FLAG_KEY_UNIT | GST_SEEK_FLAG_FLUSH), GST_SEEK_TYPE_SET, 0,
      GST_SEEK_TYPE_NONE, GST_CLOCK_TIME_NONE);

  gst_element_set_state (pipeline, GST_STATE_PLAYING);
  g_print("Restarted\n");
  // g_main_loop_quit (loop);
  break;
case GST_MESSAGE_ERROR:{
  gchar *debug;
  GError *error;
  gst_message_parse_error (msg, &error, &debug);
  g_printerr ("ERROR from element %s: %s\n",
      GST_OBJECT_NAME (msg->src), error->message);
  g_free (debug);
  g_printerr ("Error: %s\n", error->message);
  g_error_free (error);
  g_main_loop_quit (loop);
  break;
}
default:
  break;

}
return TRUE;
}

int
main (int argc, char *argv)
{
GMainLoop *loop = NULL;
GstElement *decoder = NULL, *sink = NULL, *pgie = NULL, *nvvidconv =
NULL, *nvosd = NULL, *filter1 = NULL, *filter2 = NULL;
GstBus *bus = NULL;
guint bus_watch_id;
GstCaps *caps1 = NULL, *caps2 = NULL;
gulong osd_probe_id = 0;
GstPad *osd_sink_pad = NULL;

/* Check input arguments */
if (argc != 2) {
g_printerr (“Usage: %s \n”, argv[0]);
return -1;
}

/* Standard GStreamer initialization */
gst_init (&argc, &argv);
loop = g_main_loop_new (NULL, FALSE);

/* Create gstreamer elements /
/
Create Pipeline element that will form a connection of other elements */
pipeline = gst_pipeline_new (“dstest1-pipeline”);

/* Source element for reading from the file */
source = gst_element_factory_make (“filesrc”, “file-source”);

/* Since the data format in the input file is elementary h264 stream,

  • we need a h264parser */
    h264parser = gst_element_factory_make (“h264parse”, “h264-parser”);

/* Use nvdec_h264 for hardware accelerated decode on GPU */
decoder = gst_element_factory_make (“nvdec_h264”, “nvh264-decoder”);

/* Use nvinfer to run inferencing on decoder’s output,

  • behaviour of inferencing is set through config file */
    pgie = gst_element_factory_make (“nvinfer”, “primary-nvinference-engine”);

/* Use convertor to convert from NV12 to RGBA as required by nvosd */
nvvidconv = gst_element_factory_make (“nvvidconv”, “nvvideo-converter”);

/* Create OSD to draw on the converted RGBA buffer */
nvosd = gst_element_factory_make (“nvosd”, “nv-onscreendisplay”);

/* Finally render the osd output */
// CHANGED : sink = gst_element_factory_make (“nveglglessink”, “nvvideo-renderer”);
sink = gst_element_factory_make (“fakesink”, “fakesink-output”);

/* caps filter for nvvidconv to convert NV12 to RGBA as nvosd expects input

  • in RGBA format */
    filter1 = gst_element_factory_make (“capsfilter”, “filter1”);
    filter2 = gst_element_factory_make (“capsfilter”, “filter2”);
    if (!pipeline || !source || !h264parser || !decoder || !pgie
    || !filter1 || !nvvidconv || !filter2 || !nvosd || !sink) {
    g_printerr (“One element could not be created. Exiting.\n”);
    return -1;
    }

/* we set the input filename to the source element */
g_object_set (G_OBJECT (source), “location”, argv[1], NULL);

/* Set all the necessary properties of the nvinfer element,

  • the necessary ones are : */
    g_object_set (G_OBJECT (pgie),
    “config-file-path”, “dstest1_pgie_config.txt”, NULL);

/* we set the osd properties here */
g_object_set (G_OBJECT (nvosd), “font-size”, 15, NULL);

/* we add a message handler */
bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
gst_object_unref (bus);

/* Set up the pipeline /
/
we add all elements into the pipeline */
gst_bin_add_many (GST_BIN (pipeline),
source, h264parser, decoder, pgie,
filter1, nvvidconv, filter2, nvosd, sink, NULL);
caps1 = gst_caps_from_string (“video/x-raw(memory:NVMM), format=NV12”);
g_object_set (G_OBJECT (filter1), “caps”, caps1, NULL);
gst_caps_unref (caps1);
caps2 = gst_caps_from_string (“video/x-raw(memory:NVMM), format=RGBA”);
g_object_set (G_OBJECT (filter2), “caps”, caps2, NULL);
gst_caps_unref (caps2);

/* we link the elements together /
/
file-source → h264-parser → nvh264-decoder →

  • nvinfer → filter1 → nvvidconv → filter2 → nvosd → video-renderer */

//gst_element_link_many (source, h264parser, decoder, pgie, filter1,
// nvvidconv, filter2, nvosd, sink, NULL);
gst_element_link_many (source, h264parser, decoder, pgie, filter1,nvvidconv, filter2, nvosd, sink, NULL);

/* Lets add probe to get informed of the meta data generated, we add probe to

  • the sink pad of the osd element, since by that time, the buffer would have
  • had got all the metadata. */

osd_sink_pad = gst_element_get_static_pad (nvosd, “sink”);
if (!osd_sink_pad)
g_print (“Unable to get sink pad\n”);
else
osd_probe_id = gst_pad_add_probe (osd_sink_pad, GST_PAD_PROBE_TYPE_BUFFER,
osd_sink_pad_buffer_probe, NULL, NULL);

/* Set the pipeline to “playing” state */
g_print (“Now playing: %s\n”, argv[1]);
gst_element_set_state (pipeline, GST_STATE_PLAYING);

/* Wait till pipeline encounters an error or EOS */
g_print (“Running…\n”);
g_main_loop_run (loop);

/* Out of the main loop, clean up nicely */
g_print (“Returned, stopping playback\n”);
gst_element_set_state (pipeline, GST_STATE_NULL);
g_print (“Deleting pipeline\n”);
gst_object_unref (GST_OBJECT (pipeline));
g_source_remove (bus_watch_id);
g_main_loop_unref (loop);
return 0;

}

I copy all my code here.
If you still have problem, please downlaod the latest deepstream2.0 SDK and try again.

/*
 * Copyright (c) 2018 NVIDIA Corporation.  All rights reserved.
 *
 * NVIDIA Corporation and its licensors retain all intellectual property
 * and proprietary rights in and to this software, related documentation
 * and any modifications thereto.  Any use, reproduction, disclosure or
 * distribution of this software and related documentation without an express
 * license agreement from NVIDIA Corporation is strictly prohibited.
 *
 */

#include <gst/gst.h>
#include <glib.h>

#include "gstnvdsmeta.h"

#define MAX_DISPLAY_LEN 64

#define PGIE_CLASS_ID_VEHICLE 0
#define PGIE_CLASS_ID_PERSON 2

gint frame_number = 0;
gchar pgie_classes_str[4][32] = { "Vehicle", "TwoWheeler", "Person",
  "Roadsign"
};

/* osd_sink_pad_buffer_probe  will extract metadata received on OSD sink pad
 * and update params for drawing rectangle, object information etc. */

static GstPadProbeReturn
osd_sink_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info,
    gpointer u_data)
{

  GstMeta *gst_meta = NULL;
  NvDsMeta *nvdsmeta = NULL;
  gpointer state = NULL;
  static GQuark _nvdsmeta_quark = 0;
  GstBuffer *buf = (GstBuffer *) info->data;
  NvDsFrameMeta *frame_meta = NULL;
  guint num_rects = 0, rect_index = 0, l_index = 0;
  NvDsObjectParams *obj_meta = NULL;
  guint i = 0;
  NvOSD_TextParams *txt_params = NULL;
  guint vehicle_count = 0;
  guint person_count = 0;

  if (!_nvdsmeta_quark)
    _nvdsmeta_quark = g_quark_from_static_string (NVDS_META_STRING);

  while ((gst_meta = gst_buffer_iterate_meta (buf, &state))) {
    if (gst_meta_api_type_has_tag (gst_meta->info->api, _nvdsmeta_quark)) {

      nvdsmeta = (NvDsMeta *) gst_meta;

      /* We are interested only in intercepting Meta of type
       * "NVDS_META_FRAME_INFO" as they are from our infer elements. */
      if (nvdsmeta->meta_type == NVDS_META_FRAME_INFO) {
        frame_meta = (NvDsFrameMeta *) nvdsmeta->meta_data;
        if (frame_meta == NULL) {
          g_print ("NvDS Meta contained NULL meta \n");
          return GST_PAD_PROBE_OK;
        }

        /* We reset the num_strings here as we plan to iterate through the
         *  the detected objects and form our own strings.
         *  The pipeline generated strings shall be discarded.
         */
        frame_meta->num_strings = 0;

        num_rects = frame_meta->num_rects;

        /* This means we have num_rects in frame_meta->obj_params,
         * now lets iterate through them */

        for (rect_index = 0; rect_index < num_rects; rect_index++) {
          /* Now using above information we need to form a text that should
           * be displayed on top of the bounding box, so lets form it here. */

          obj_meta = (NvDsObjectParams *) & frame_meta->obj_params[rect_index];

          txt_params = &(obj_meta->text_params);
          if (txt_params->display_text)
            g_free (txt_params->display_text);

          txt_params->display_text = g_malloc0 (MAX_DISPLAY_LEN);

          g_snprintf (txt_params->display_text, MAX_DISPLAY_LEN, "%s ",
              pgie_classes_str[obj_meta->class_id]);

          if (obj_meta->class_id == PGIE_CLASS_ID_VEHICLE)
            vehicle_count++;
          if (obj_meta->class_id == PGIE_CLASS_ID_PERSON)
            person_count++;

          /* Now set the offsets where the string should appear */
          txt_params->x_offset = obj_meta->rect_params.left;
          txt_params->y_offset = obj_meta->rect_params.top - 25;

          /* Font , font-color and font-size */
          txt_params->font_params.font_name = "Arial";
          txt_params->font_params.font_size = 10;
          txt_params->font_params.font_color.red = 1.0;
          txt_params->font_params.font_color.green = 1.0;
          txt_params->font_params.font_color.blue = 1.0;
          txt_params->font_params.font_color.alpha = 1.0;

          /* Text background color */
          txt_params->set_bg_clr = 1;
          txt_params->text_bg_clr.red = 0.0;
          txt_params->text_bg_clr.green = 0.0;
          txt_params->text_bg_clr.blue = 0.0;
          txt_params->text_bg_clr.alpha = 1.0;

          frame_meta->num_strings++;
        }
      }
    }
  }
  g_print ("Frame Number = %d Number of objects = %d "
      "Vehicle Count = %d Person Count = %d\n",
      frame_number, num_rects, vehicle_count, person_count);
  frame_number++;

  return GST_PAD_PROBE_OK;
}

static GstElement* pPipeline = NULL;

static gboolean seek_pipeline (gpointer data) {
  GstElement *bin = (GstElement *) data;
  gboolean ret;
  g_print("%s line%d\n", __FUNCTION__, __LINE__);

  gst_element_set_state (bin, GST_STATE_PAUSED);

  //gst_pad_send_event (gst_element_get_static_pad (bin, "sink"), gst_event_new_flush_start ());
  //gst_pad_send_event (gst_element_get_static_pad (bin, "sink"), gst_event_new_flush_stop (TRUE));

  ret = gst_element_seek (bin, 1.0, GST_FORMAT_TIME,
      (GstSeekFlags) (GST_SEEK_FLAG_KEY_UNIT | GST_SEEK_FLAG_FLUSH), GST_SEEK_TYPE_SET, 0,
      GST_SEEK_TYPE_NONE, GST_CLOCK_TIME_NONE);


  gst_element_set_state (bin, GST_STATE_PLAYING);

  g_print("%s line%d\n", __FUNCTION__, __LINE__);

done:
  return FALSE;
}


static gboolean
bus_call (GstBus * bus, GstMessage * msg, gpointer data)
{
  GMainLoop *loop = (GMainLoop *) data;
  switch (GST_MESSAGE_TYPE (msg)) {
    case GST_MESSAGE_EOS:
      g_print ("End of stream\n");
      seek_pipeline(pPipeline);
      //g_main_loop_quit (loop);
      break;
    case GST_MESSAGE_ERROR:{
      gchar *debug;
      GError *error;
      gst_message_parse_error (msg, &error, &debug);
      g_printerr ("ERROR from element %s: %s\n",
          GST_OBJECT_NAME (msg->src), error->message);
      g_free (debug);
      g_printerr ("Error: %s\n", error->message);
      g_error_free (error);
      g_main_loop_quit (loop);
      break;
    }
    default:
      break;
  }
  return TRUE;
}


int
main (int argc, char *argv[])
{
  GMainLoop *loop = NULL;
  GstElement *pipeline = NULL, *source = NULL, *h264parser =
      NULL,
      *decoder = NULL, *sink = NULL, *pgie = NULL, *nvvidconv =
      NULL, *nvosd = NULL, *filter1 = NULL, *filter2 = NULL;
  GstBus *bus = NULL;
  guint bus_watch_id;
  GstCaps *caps1 = NULL, *caps2 = NULL;
  gulong osd_probe_id = 0;
  GstPad *osd_sink_pad = NULL;

  /* Check input arguments */
  if (argc != 2) {
    g_printerr ("Usage: %s <H264 filename>\n", argv[0]);
    return -1;
  }

  /* Standard GStreamer initialization */
  gst_init (&argc, &argv);
  loop = g_main_loop_new (NULL, FALSE);

  /* Create gstreamer elements */
  /* Create Pipeline element that will form a connection of other elements */
  pipeline = gst_pipeline_new ("dstest1-pipeline");

  /* Source element for reading from the file */
  source = gst_element_factory_make ("filesrc", "file-source");

  /* Since the data format in the input file is elementary h264 stream,
   * we need a h264parser */
  h264parser = gst_element_factory_make ("h264parse", "h264-parser");

  /* Use nvdec_h264 for hardware accelerated decode on GPU */
  decoder = gst_element_factory_make ("nvdec_h264", "nvh264-decoder");

  /* Use nvinfer to run inferencing on decoder's output,
   * behaviour of inferencing is set through config file */
  pgie = gst_element_factory_make ("nvinfer", "primary-nvinference-engine");

  /* Use convertor to convert from NV12 to RGBA as required by nvosd */
  nvvidconv = gst_element_factory_make ("nvvidconv", "nvvideo-converter");

  /* Create OSD to draw on the converted RGBA buffer */
  nvosd = gst_element_factory_make ("nvosd", "nv-onscreendisplay");

  /* Finally render the osd output */
  sink = gst_element_factory_make ("fakesink", "nvvideo-renderer");

  /* caps filter for nvvidconv to convert NV12 to RGBA as nvosd expects input
   * in RGBA format */
  filter1 = gst_element_factory_make ("capsfilter", "filter1");
  filter2 = gst_element_factory_make ("capsfilter", "filter2");
  if (!pipeline || !source || !h264parser || !decoder || !pgie
      || !filter1 || !nvvidconv || !filter2 || !nvosd || !sink) {
    g_printerr ("One element could not be created. Exiting.\n");
    return -1;
  }

  /* we set the input filename to the source element */
  g_object_set (G_OBJECT (source), "location", argv[1], NULL);

  /* Set all the necessary properties of the nvinfer element,
   * the necessary ones are : */
  g_object_set (G_OBJECT (pgie),
      "config-file-path", "dstest1_pgie_config.txt", NULL);

  /* we set the osd properties here */
  g_object_set (G_OBJECT (nvosd), "font-size", 15, NULL);

  /* we add a message handler */
  bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
  bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
  gst_object_unref (bus);

  /* Set up the pipeline */
  /* we add all elements into the pipeline */
  gst_bin_add_many (GST_BIN (pipeline),
      source, h264parser, decoder, pgie,
      filter1, nvvidconv, filter2, nvosd, sink, NULL);
  caps1 = gst_caps_from_string ("video/x-raw(memory:NVMM), format=NV12");
  g_object_set (G_OBJECT (filter1), "caps", caps1, NULL);
  gst_caps_unref (caps1);
  caps2 = gst_caps_from_string ("video/x-raw(memory:NVMM), format=RGBA");
  g_object_set (G_OBJECT (filter2), "caps", caps2, NULL);
  gst_caps_unref (caps2);

  /* we link the elements together */
  /* file-source -> h264-parser -> nvh264-decoder ->
   * nvinfer -> filter1 -> nvvidconv -> filter2 -> nvosd -> video-renderer */
  gst_element_link_many (source, h264parser, decoder, pgie, filter1,
      nvvidconv, filter2, nvosd, sink, NULL);

  /* Lets add probe to get informed of the meta data generated, we add probe to
   * the sink pad of the osd element, since by that time, the buffer would have
   * had got all the metadata. */
  osd_sink_pad = gst_element_get_static_pad (nvosd, "sink");

  pPipeline = pipeline;

  if (!osd_sink_pad)
    g_print ("Unable to get sink pad\n");
  else
    osd_probe_id = gst_pad_add_probe (osd_sink_pad, GST_PAD_PROBE_TYPE_BUFFER,
        osd_sink_pad_buffer_probe, NULL, NULL);

  /* Set the pipeline to "playing" state */
  g_print ("Now playing: %s\n", argv[1]);
  gst_element_set_state (pipeline, GST_STATE_PLAYING);

  /* Wait till pipeline encounters an error or EOS */
  g_print ("Running...\n");
  g_main_loop_run (loop);

  /* Out of the main loop, clean up nicely */
  g_print ("Returned, stopping playback\n");
  gst_element_set_state (pipeline, GST_STATE_NULL);
  g_print ("Deleting pipeline\n");
  gst_object_unref (GST_OBJECT (pipeline));
  g_source_remove (bus_watch_id);
  g_main_loop_unref (loop);
  return 0;


}

Hi, Chris.

I’ve downloaded the latest deepstream2.0 and the problem still exists.
Your code works, because you do not change the “location” of “filesrc”.

If you will add the following lines in your “seek_pipeline” function - you will see the problem :

GstElement* source = gst_bin_get_by_name((GstBin*) data, “file-source”);
gst_element_set_state(source, GST_STATE_READY);
g_object_set (G_OBJECT (source), “location”, , NULL); // ← set the correct “filename” value here
gst_element_set_state(source, GST_STATE_PAUSED);

The error I see :

(deepstream-test1-app:42348): GStreamer-CRITICAL **: getrange on pad file-source:src but it was not activated in pull mode
ERROR from element h264-parser: Internal data stream error.

Here is the full code (I’ve only added the lines above into your code) :

/*
 * Copyright (c) 2018 NVIDIA Corporation.  All rights reserved.
 *
 * NVIDIA Corporation and its licensors retain all intellectual property
 * and proprietary rights in and to this software, related documentation
 * and any modifications thereto.  Any use, reproduction, disclosure or
 * distribution of this software and related documentation without an express
 * license agreement from NVIDIA Corporation is strictly prohibited.
 *
 */

#include <string.h>
#include <gst/gst.h>
#include <glib.h>

#include "gstnvdsmeta.h"

#define MAX_DISPLAY_LEN 64

#define PGIE_CLASS_ID_VEHICLE 0
#define PGIE_CLASS_ID_PERSON 2

gint frame_number = 0;
gchar pgie_classes_str[4][32] = { "Vehicle", "TwoWheeler", "Person",
  "Roadsign"
};

static char filename[256];

/* osd_sink_pad_buffer_probe  will extract metadata received on OSD sink pad
 * and update params for drawing rectangle, object information etc. */

static GstPadProbeReturn
osd_sink_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info,
    gpointer u_data)
{

  GstMeta *gst_meta = NULL;
  NvDsMeta *nvdsmeta = NULL;
  gpointer state = NULL;
  static GQuark _nvdsmeta_quark = 0;
  GstBuffer *buf = (GstBuffer *) info->data;
  NvDsFrameMeta *frame_meta = NULL;
  guint num_rects = 0, rect_index = 0, l_index = 0;
  NvDsObjectParams *obj_meta = NULL;
  guint i = 0;
  NvOSD_TextParams *txt_params = NULL;
  guint vehicle_count = 0;
  guint person_count = 0;

  if (!_nvdsmeta_quark)
    _nvdsmeta_quark = g_quark_from_static_string (NVDS_META_STRING);

  while ((gst_meta = gst_buffer_iterate_meta (buf, &state))) {
    if (gst_meta_api_type_has_tag (gst_meta->info->api, _nvdsmeta_quark)) {

      nvdsmeta = (NvDsMeta *) gst_meta;

      /* We are interested only in intercepting Meta of type
       * "NVDS_META_FRAME_INFO" as they are from our infer elements. */
      if (nvdsmeta->meta_type == NVDS_META_FRAME_INFO) {
        frame_meta = (NvDsFrameMeta *) nvdsmeta->meta_data;
        if (frame_meta == NULL) {
          g_print ("NvDS Meta contained NULL meta \n");
          return GST_PAD_PROBE_OK;
        }

        /* We reset the num_strings here as we plan to iterate through the
         *  the detected objects and form our own strings.
         *  The pipeline generated strings shall be discarded.
         */
        frame_meta->num_strings = 0;

        num_rects = frame_meta->num_rects;

        /* This means we have num_rects in frame_meta->obj_params,
         * now lets iterate through them */

        for (rect_index = 0; rect_index < num_rects; rect_index++) {
          /* Now using above information we need to form a text that should
           * be displayed on top of the bounding box, so lets form it here. */

          obj_meta = (NvDsObjectParams *) & frame_meta->obj_params[rect_index];

          txt_params = &(obj_meta->text_params);
          if (txt_params->display_text)
            g_free (txt_params->display_text);

          txt_params->display_text = g_malloc0 (MAX_DISPLAY_LEN);

          g_snprintf (txt_params->display_text, MAX_DISPLAY_LEN, "%s ",
              pgie_classes_str[obj_meta->class_id]);

          if (obj_meta->class_id == PGIE_CLASS_ID_VEHICLE)
            vehicle_count++;
          if (obj_meta->class_id == PGIE_CLASS_ID_PERSON)
            person_count++;

          /* Now set the offsets where the string should appear */
          txt_params->x_offset = obj_meta->rect_params.left;
          txt_params->y_offset = obj_meta->rect_params.top - 25;

          /* Font , font-color and font-size */
          txt_params->font_params.font_name = "Arial";
          txt_params->font_params.font_size = 10;
          txt_params->font_params.font_color.red = 1.0;
          txt_params->font_params.font_color.green = 1.0;
          txt_params->font_params.font_color.blue = 1.0;
          txt_params->font_params.font_color.alpha = 1.0;

          /* Text background color */
          txt_params->set_bg_clr = 1;
          txt_params->text_bg_clr.red = 0.0;
          txt_params->text_bg_clr.green = 0.0;
          txt_params->text_bg_clr.blue = 0.0;
          txt_params->text_bg_clr.alpha = 1.0;

          frame_meta->num_strings++;
        }
      }
    }
  }
  g_print ("Frame Number = %d Number of objects = %d "
      "Vehicle Count = %d Person Count = %d\n",
      frame_number, num_rects, vehicle_count, person_count);
  frame_number++;

  return GST_PAD_PROBE_OK;
}

static GstElement* pPipeline = NULL;

static gboolean seek_pipeline (gpointer data) {
  GstElement *bin = (GstElement *) data;
  gboolean ret;
  g_print("%s line%d\n", __FUNCTION__, __LINE__);

  gst_element_set_state (bin, GST_STATE_PAUSED);

  GstElement* source = gst_bin_get_by_name((GstBin*) data, "file-source");
  gst_element_set_state(source, GST_STATE_READY);
  g_object_set (G_OBJECT (source), "location", filename, NULL);
  gst_element_set_state(source, GST_STATE_PAUSED);

  //gst_pad_send_event (gst_element_get_static_pad (bin, "sink"), gst_event_new_flush_start ());
  //gst_pad_send_event (gst_element_get_static_pad (bin, "sink"), gst_event_new_flush_stop (TRUE));

  ret = gst_element_seek (bin, 1.0, GST_FORMAT_TIME,
      (GstSeekFlags) (GST_SEEK_FLAG_KEY_UNIT | GST_SEEK_FLAG_FLUSH), GST_SEEK_TYPE_SET, 0,
      GST_SEEK_TYPE_NONE, GST_CLOCK_TIME_NONE);


  gst_element_set_state (bin, GST_STATE_PLAYING);

  g_print("%s line%d\n", __FUNCTION__, __LINE__);

done:
  return FALSE;
}


static gboolean
bus_call (GstBus * bus, GstMessage * msg, gpointer data)
{
  GMainLoop *loop = (GMainLoop *) data;
  switch (GST_MESSAGE_TYPE (msg)) {
    case GST_MESSAGE_EOS:
      g_print ("End of stream\n");
      seek_pipeline(pPipeline);
      //g_main_loop_quit (loop);
      break;
    case GST_MESSAGE_ERROR:{
      gchar *debug;
      GError *error;
      gst_message_parse_error (msg, &error, &debug);
      g_printerr ("ERROR from element %s: %s\n",
          GST_OBJECT_NAME (msg->src), error->message);
      g_free (debug);
      g_printerr ("Error: %s\n", error->message);
      g_error_free (error);
      g_main_loop_quit (loop);
      break;
    }
    default:
      break;
  }
  return TRUE;
}


int
main (int argc, char *argv[])
{
  GMainLoop *loop = NULL;
  GstElement *pipeline = NULL, *source = NULL, *h264parser =
      NULL,
      *decoder = NULL, *sink = NULL, *pgie = NULL, *nvvidconv =
      NULL, *nvosd = NULL, *filter1 = NULL, *filter2 = NULL;
  GstBus *bus = NULL;
  guint bus_watch_id;
  GstCaps *caps1 = NULL, *caps2 = NULL;
  gulong osd_probe_id = 0;
  GstPad *osd_sink_pad = NULL;

  /* Check input arguments */
  if (argc != 2) {
    g_printerr ("Usage: %s <H264 filename>\n", argv[0]);
    return -1;
  }

  /* Standard GStreamer initialization */
  gst_init (&argc, &argv);
  loop = g_main_loop_new (NULL, FALSE);

  strcpy(filename, argv[1]);

  /* Create gstreamer elements */
  /* Create Pipeline element that will form a connection of other elements */
  pipeline = gst_pipeline_new ("dstest1-pipeline");

  /* Source element for reading from the file */
  source = gst_element_factory_make ("filesrc", "file-source");

  /* Since the data format in the input file is elementary h264 stream,
   * we need a h264parser */
  h264parser = gst_element_factory_make ("h264parse", "h264-parser");

  /* Use nvdec_h264 for hardware accelerated decode on GPU */
  decoder = gst_element_factory_make ("nvdec_h264", "nvh264-decoder");

  /* Use nvinfer to run inferencing on decoder's output,
   * behaviour of inferencing is set through config file */
  pgie = gst_element_factory_make ("nvinfer", "primary-nvinference-engine");

  /* Use convertor to convert from NV12 to RGBA as required by nvosd */
  nvvidconv = gst_element_factory_make ("nvvidconv", "nvvideo-converter");

  /* Create OSD to draw on the converted RGBA buffer */
  nvosd = gst_element_factory_make ("nvosd", "nv-onscreendisplay");

  /* Finally render the osd output */
  sink = gst_element_factory_make ("fakesink", "nvvideo-renderer");

  /* caps filter for nvvidconv to convert NV12 to RGBA as nvosd expects input
   * in RGBA format */
  filter1 = gst_element_factory_make ("capsfilter", "filter1");
  filter2 = gst_element_factory_make ("capsfilter", "filter2");
  if (!pipeline || !source || !h264parser || !decoder || !pgie
      || !filter1 || !nvvidconv || !filter2 || !nvosd || !sink) {
    g_printerr ("One element could not be created. Exiting.\n");
    return -1;
  }

  /* we set the input filename to the source element */
  g_object_set (G_OBJECT (source), "location", argv[1], NULL);

  /* Set all the necessary properties of the nvinfer element,
   * the necessary ones are : */
  g_object_set (G_OBJECT (pgie),
      "config-file-path", "dstest1_pgie_config.txt", NULL);

  /* we set the osd properties here */
  g_object_set (G_OBJECT (nvosd), "font-size", 15, NULL);

  /* we add a message handler */
  bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
  bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
  gst_object_unref (bus);

  /* Set up the pipeline */
  /* we add all elements into the pipeline */
  gst_bin_add_many (GST_BIN (pipeline),
      source, h264parser, decoder, pgie,
      filter1, nvvidconv, filter2, nvosd, sink, NULL);
  caps1 = gst_caps_from_string ("video/x-raw(memory:NVMM), format=NV12");
  g_object_set (G_OBJECT (filter1), "caps", caps1, NULL);
  gst_caps_unref (caps1);
  caps2 = gst_caps_from_string ("video/x-raw(memory:NVMM), format=RGBA");
  g_object_set (G_OBJECT (filter2), "caps", caps2, NULL);
  gst_caps_unref (caps2);

  /* we link the elements together */
  /* file-source -> h264-parser -> nvh264-decoder ->
   * nvinfer -> filter1 -> nvvidconv -> filter2 -> nvosd -> video-renderer */
  gst_element_link_many (source, h264parser, decoder, pgie, filter1,
      nvvidconv, filter2, nvosd, sink, NULL);

  /* Lets add probe to get informed of the meta data generated, we add probe to
   * the sink pad of the osd element, since by that time, the buffer would have
   * had got all the metadata. */
  osd_sink_pad = gst_element_get_static_pad (nvosd, "sink");

  pPipeline = pipeline;

  if (!osd_sink_pad)
    g_print ("Unable to get sink pad\n");
  else
    osd_probe_id = gst_pad_add_probe (osd_sink_pad, GST_PAD_PROBE_TYPE_BUFFER,
        osd_sink_pad_buffer_probe, NULL, NULL);

  /* Set the pipeline to "playing" state */
  g_print ("Now playing: %s\n", argv[1]);
  gst_element_set_state (pipeline, GST_STATE_PLAYING);

  /* Wait till pipeline encounters an error or EOS */
  g_print ("Running...\n");
  g_main_loop_run (loop);

  /* Out of the main loop, clean up nicely */
  g_print ("Returned, stopping playback\n");
  gst_element_set_state (pipeline, GST_STATE_NULL);
  g_print ("Deleting pipeline\n");
  gst_object_unref (GST_OBJECT (pipeline));
  g_source_remove (bus_watch_id);
  g_main_loop_unref (loop);
  return 0;


}

Dynamic pipeline for switching between multiple file sources

/**
MIT License

Copyright (c) 2018 NVIDIA CORPORATION. All rights reserved.

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*
*/

#include <glib.h>
#include <gst/gst.h>
#include <gst/gstpipeline.h>

#include "gstnvdsmeta.h"

/* As defined in the yolo plugins header*/

#define YOLO_UNIQUE_ID 15

gint frame_number = 0;
gboolean flag = true;
GMainLoop* loop = NULL;

static gboolean source_switch_thread(gpointer* data)
{
    GstElement* pipeline = (GstElement*) data;
    GstElement* source = gst_bin_get_by_name(GST_BIN(pipeline), "file-source");
    GstElement* h264parser = gst_bin_get_by_name(GST_BIN(pipeline), "h264-parser");
    GstElement* sink = gst_bin_get_by_name(GST_BIN(pipeline), "nvvideo-renderer");
    gst_element_set_state(pipeline, GST_STATE_PAUSED);
    GstStateChangeReturn ret = GST_STATE_CHANGE_FAILURE;
    ret = gst_element_set_state(source, GST_STATE_NULL);
    if (ret == GST_STATE_CHANGE_FAILURE)
    {
        g_print("Unable to set state change for source element \n");
        g_main_loop_quit(loop);
    }
    if (flag)
    {
        g_object_set(G_OBJECT(source), "location", "./sample_720p_2.h264", NULL);
        flag = false;
    }
    else
    {
        g_object_set(G_OBJECT(source), "location", "./sample_720p.h264", NULL);
        flag = true;
    }
    gst_pad_activate_mode(gst_element_get_static_pad(h264parser, "sink"), GST_PAD_MODE_PUSH, true);
    gst_element_sync_state_with_parent(h264parser);
    gst_element_sync_state_with_parent(source);
    gst_element_sync_state_with_parent(sink);
    frame_number = 0;
    gst_element_set_state(pipeline, GST_STATE_PLAYING);
    return false;
}

static GstPadProbeReturn eos_probe_cb(GstPad* pad, GstPadProbeInfo* info, gpointer u_data)
{
    if (GST_EVENT_TYPE(GST_PAD_PROBE_INFO_DATA(info)) != GST_EVENT_EOS) { return GST_PAD_PROBE_OK; }

    gst_element_seek((GstElement*) u_data, 1.0, GST_FORMAT_TIME,
                     (GstSeekFlags)(GST_SEEK_FLAG_FLUSH), GST_SEEK_TYPE_SET, 0, GST_SEEK_TYPE_NONE,
                     GST_CLOCK_TIME_NONE);

    g_idle_add((GSourceFunc) source_switch_thread, u_data);
    return GST_PAD_PROBE_DROP;
}

/* osd_sink_pad_buffer_probe  will extract metadata received on OSD sink pad
 * and get a count of objects of interest */

static GstPadProbeReturn osd_sink_pad_buffer_probe(GstPad* pad, GstPadProbeInfo* info,
                                                   gpointer u_data)
{

    GstMeta* gst_meta = NULL;
    NvDsMeta* nvdsmeta = NULL;
    gpointer state = NULL;
    static GQuark _nvdsmeta_quark = 0;
    GstBuffer* buf = (GstBuffer*) info->data;
    NvDsFrameMeta* frame_meta = NULL;
    guint num_rects = 0, rect_index = 0;
    NvDsObjectParams* obj_meta = NULL;
    guint car_count = 0;
    guint person_count = 0;
    guint bicycle_count = 0;
    guint truck_count = 0;

    if (!_nvdsmeta_quark) _nvdsmeta_quark = g_quark_from_static_string(NVDS_META_STRING);

    while ((gst_meta = gst_buffer_iterate_meta(buf, &state)))
    {
        if (gst_meta_api_type_has_tag(gst_meta->info->api, _nvdsmeta_quark))
        {

            nvdsmeta = (NvDsMeta*) gst_meta;

            /* We are interested only in intercepting Meta of type
             * "NVDS_META_FRAME_INFO" as they are from our infer elements. */
            if (nvdsmeta->meta_type == NVDS_META_FRAME_INFO)
            {
                frame_meta = (NvDsFrameMeta*) nvdsmeta->meta_data;
                if (frame_meta == NULL)
                {
                    g_print("NvDS Meta contained NULL meta \n");
                    return GST_PAD_PROBE_OK;
                }

                num_rects = frame_meta->num_rects;

                /* This means we have num_rects in frame_meta->obj_params.
                 * Now lets iterate through them and count the number of cars,
                 * trucks, persons and bicycles in each frame */

                for (rect_index = 0; rect_index < num_rects; rect_index++)
                {
                    obj_meta = (NvDsObjectParams*) &frame_meta->obj_params[rect_index];
                    if (!g_strcmp0(obj_meta->attr_info[YOLO_UNIQUE_ID].attr_label, "car"))
                        car_count++;
                    else if (!g_strcmp0(obj_meta->attr_info[YOLO_UNIQUE_ID].attr_label, "person"))
                        person_count++;
                    else if (!g_strcmp0(obj_meta->attr_info[YOLO_UNIQUE_ID].attr_label, "bicycle"))
                        bicycle_count++;
                    else if (!g_strcmp0(obj_meta->attr_info[YOLO_UNIQUE_ID].attr_label, "truck"))
                        truck_count++;
                }
            }
        }
    }
    g_print(
        "Frame Number = %d Number of objects = %d "
        "Car Count = %d Person Count = %d "
        "Bicycle Count = %d Truck Count = %d \n",
        frame_number, num_rects, car_count, person_count, bicycle_count, truck_count);
    frame_number++;

    return GST_PAD_PROBE_OK;
}

static gboolean bus_call(GstBus* bus, GstMessage* msg, gpointer data)
{
    GMainLoop* loop = (GMainLoop*) data;
    switch (GST_MESSAGE_TYPE(msg))
    {
    case GST_MESSAGE_EOS:
        g_print("End of stream\n");
        g_main_loop_quit(loop);
        break;
    case GST_MESSAGE_ERROR:
    {
        gchar* debug;
        GError* error;
        gst_message_parse_error(msg, &error, &debug);
        g_printerr("ERROR from element %s: %s\n", GST_OBJECT_NAME(msg->src), error->message);
        g_free(debug);
        g_printerr("Error: %s\n", error->message);
        g_error_free(error);
        g_main_loop_quit(loop);
        break;
    }
    default: break;
    }
    return TRUE;
}

int main(int argc, char* argv[])
{

    GstElement *pipeline = NULL, *source = NULL, *h264parser = NULL, *decoder = NULL, *sink = NULL,
               *nvvidconv = NULL, *nvosd = NULL, *filter1 = NULL, *filter2 = NULL, *yolo = NULL;
    GstBus* bus = NULL;
    guint bus_watch_id;
    GstCaps *caps1 = NULL, *caps2 = NULL;
    gulong osd_sink_probe_id = 0, osd_src_probe_id = 0;
    GstPad *osd_sink_pad = NULL, *osd_src_pad = NULL;

    /* Check input arguments */
    if (argc != 2)
    {
        g_printerr("Usage: %s <H264 filename>\n", argv[0]);
        return -1;
    }

    /* Standard GStreamer initialization */
    gst_init(&argc, &argv);
    loop = g_main_loop_new(NULL, FALSE);

    /* Create gstreamer elements */
    /* Create Pipeline element that will form a connection of other elements */
    pipeline = gst_pipeline_new("dstest1-pipeline");

    /* Source element for reading from the file */
    source = gst_element_factory_make("filesrc", "file-source");

    /* Since the data format in the input file is elementary h264 stream,
     * we need a h264parser */
    h264parser = gst_element_factory_make("h264parse", "h264-parser");

    /* Use nvdec_h264 for hardware accelerated decode on GPU */
    decoder = gst_element_factory_make("nvdec_h264", "nvh264-decoder");

    /* Use convertor to convert from NV12 to RGBA as required by nvosd and yolo plugins */
    nvvidconv = gst_element_factory_make("nvvidconv", "nvvideo-converter");

    /* Use yolo to run inference instead of pgie */
    yolo = gst_element_factory_make("nvyolo", "yolo-inference-engine");

    /* Create OSD to draw on the converted RGBA buffer */
    nvosd = gst_element_factory_make("nvosd", "nv-onscreendisplay");

    /* Finally render the osd output */
    sink = gst_element_factory_make("nveglglessink", "nvvideo-renderer");

    /* caps filter for nvvidconv to convert NV12 to RGBA as nvosd expects input
     * in RGBA format */
    filter1 = gst_element_factory_make("capsfilter", "filter1");
    filter2 = gst_element_factory_make("capsfilter", "filter2");
    if (!pipeline || !source || !h264parser || !decoder || !filter1 || !nvvidconv || !filter2
        || !nvosd || !sink || !yolo)
    {
        g_printerr("One element could not be created. Exiting.\n");
        return -1;
    }

    /* we set the input filename to the source element */
    g_object_set(G_OBJECT(source), "location", argv[1], NULL);

    /* we set the osd properties here */
    g_object_set(G_OBJECT(nvosd), "font-size", 15, NULL);

    /* we add a message handler */
    bus = gst_pipeline_get_bus(GST_PIPELINE(pipeline));
    bus_watch_id = gst_bus_add_watch(bus, bus_call, loop);
    gst_object_unref(bus);

    /* Set up the pipeline */
    /* we add all elements into the pipeline */
    gst_bin_add_many(GST_BIN(pipeline), source, h264parser, decoder, filter1, nvvidconv, filter2,
                     yolo, nvosd, sink, NULL);
    caps1 = gst_caps_from_string("video/x-raw(memory:NVMM), format=NV12");
    g_object_set(G_OBJECT(filter1), "caps", caps1, NULL);
    gst_caps_unref(caps1);
    caps2 = gst_caps_from_string("video/x-raw(memory:NVMM), format=RGBA");
    g_object_set(G_OBJECT(filter2), "caps", caps2, NULL);
    gst_caps_unref(caps2);

    /* we link the elements together */
    /* file-source -> h264-parser -> nvh264-decoder ->
     * filter1 -> nvvidconv -> filter2 -> yolo -> nvosd -> video-renderer */
    gst_element_link_many(source, h264parser, decoder, filter1, nvvidconv, filter2, yolo, nvosd,
                          sink, NULL);

    /* Lets add probe to get informed of the meta data generated, we add probe to
     * the sink pad of the osd element, since by that time, the buffer would have
     * had got all the metadata. */
    osd_sink_pad = gst_element_get_static_pad(nvosd, "sink");
    if (!osd_sink_pad)
        g_print("Unable to get OSD sink pad\n");
    else
        osd_sink_probe_id = gst_pad_add_probe(osd_sink_pad, GST_PAD_PROBE_TYPE_BUFFER,
                                              osd_sink_pad_buffer_probe, NULL, NULL);

    osd_src_pad = gst_element_get_static_pad(nvosd, "src");
    if (!osd_src_pad)
        g_print("Unable to get OSD src pad \n");
    else
        osd_src_probe_id = gst_pad_add_probe(osd_src_pad, GST_PAD_PROBE_TYPE_EVENT_DOWNSTREAM,
                                             eos_probe_cb, pipeline, NULL);

    /* Set the pipeline to "playing" state */
    g_print("Now playing: %s\n", argv[1]);
    gst_element_set_state(pipeline, GST_STATE_PLAYING);

    /* Wait till pipeline encounters an error or EOS */
    g_print("Running...\n");
    g_main_loop_run(loop);

    /* Out of the main loop, clean up nicely */
    g_print("Returned, stopping playback\n");
    gst_element_set_state(pipeline, GST_STATE_NULL);
    g_print("Deleting pipeline\n");
    gst_object_unref(GST_OBJECT(pipeline));
    g_source_remove(bus_watch_id);
    g_main_loop_unref(loop);
    return 0;
}

Great !

Thanks a lot - this does solves the problem for me. Great solution !

Can you please elaborate a bit on why it works - just in case I will run into something similar in the future ?
Specifically,

  1. When is the callback function of “g_idle_add” called by the pipeline ?
  2. Why do you need to remove the “filesrc” element and create it anew ?
  3. Why do you need to explicitly set the pad activation mode for h264parser in this case ?

Thanks a lot,
Simon.

Hi,

I am trying to run your code in DeepStream 5.0. But after changing the location of the source, the program hangs up