Encoding and saving to a file with deepstream_test1_app.c

I’m trying to modify the deepstream_test1_app.c to save the output of nvosd to a file (.h264 or mp4), I have tried with few pipelines but have not got a playable output file.

My last attempt has been:

gst_element_link_many(source, h264parser, decoder, pgie, filter1, nvvidconv,
filter2, nvosd, converter, filter3, qtmux, sink, NULL);
filter3 === “video/x-h264”

When I try this pipeline the app executes correctly but, it hangs after the detection is printed out since the app never finishes cleanly I interrupt with ctrl-c and the output file is there but has no bytes.

Any help is greatly appreciated.

This is the diff between the original code provided in the sdk and my current code (I can provide the full code if needed):

diff --git a/sources/apps/deepstream-test1/deepstream_test1_app.c b/sources/apps/deepstream-test1/deepstream_test1_app.c
index 94787b7…0e72a87 100644
— a/sources/apps/deepstream-test1/deepstream_test1_app.c
+++ b/sources/apps/deepstream-test1/deepstream_test1_app.c
@@ -150,18 +150,26 @@ static gboolean bus_call(GstBus * bus, GstMessage * msg, gpointer data) {

int main(int argc, char *argv) {
GMainLoop *loop = NULL;

  • GstElement *pipeline = NULL, *source = NULL, *h264parser = NULL, *decoder =
  •   	NULL, *sink = NULL, *pgie = NULL, *nvvidconv = NULL, *nvosd = NULL,
    
  •   	*filter1 = NULL, *filter2 = NULL;
    
  • GstElement *pipeline = NULL;
  • GstElement *source = NULL, *h264parser = NULL, *decoder = NULL,
  •   	*pgie = NULL, *nvvidconv = NULL, *nvosd = NULL, *sink = NULL;
    
  • GstElement *filter1 = NULL, *filter2 = NULL, *filter3 = NULL;
  • GstElement *qtmux = NULL, *converter = NULL;
  • GstBus *bus = NULL;
    guint bus_watch_id;
  • GstCaps *caps1 = NULL, *caps2 = NULL;
  • GstCaps *caps1 = NULL, *caps2 = NULL, *caps3 = NULL;

  • ;
    gulong osd_probe_id = 0;
    GstPad *osd_sink_pad = NULL;

    /* Check input arguments */

  • if (argc != 2) {
  •   g_printerr("Usage: %s <H264 filename>\n", argv[0]);
    
  • if (argc != 3) {
  •   g_printerr("Usage: %s <H264 input filename> <output filename>\n",
    
  •   		argv[0]);
      return -1;
    
    }

@@ -193,15 +201,24 @@ int main(int argc, char argv[]) {
/
Create OSD to draw on the converted RGBA buffer */
nvosd = gst_element_factory_make(“nvosd”, “nv-onscreendisplay”);

  • /* Finally render the osd output */
  • sink = gst_element_factory_make(“nveglglessink”, “nvvideo-renderer”);
  • /* Creating the converter to switch back to UYVY */

  • converter = gst_element_factory_make(“videoconvert”, “videoconverter”);

  • /* Creating the mp4 mux */

  • qtmux = gst_element_factory_make(“qtmux”, “mux”);

  • /* Finally save the file */

  • sink = gst_element_factory_make(“filesink”, “save-file”);

    /* caps filter for nvvidconv to convert NV12 to RGBA as nvosd expects input

    • in RGBA format */
      filter1 = gst_element_factory_make(“capsfilter”, “filter1”);
      filter2 = gst_element_factory_make(“capsfilter”, “filter2”);
  • filter3 = gst_element_factory_make(“capsfilter”, “filter3”);

  • if (!pipeline || !source || !h264parser || !decoder || !pgie || !filter1

  •   	|| !nvvidconv || !filter2 || !nvosd || !sink) {
    
  •   	|| !nvvidconv || !filter2 || !nvosd || !converter || !filter3
    
  •   	|| !qtmux || !sink) {
      g_printerr("One element could not be created. Exiting.\n");
      return -1;
    

    }
    @@ -209,6 +226,9 @@ int main(int argc, char argv[]) {
    /
    we set the input filename to the source element */
    g_object_set(G_OBJECT(source), “location”, argv[1], NULL);

  • /* we set the output filename to the source element */

  • g_object_set(G_OBJECT(sink), “location”, argv[2], NULL);

  • /* Set all the necessary properties of the nvinfer element,

    • the necessary ones are : */
      g_object_set(G_OBJECT(pgie), “config-file-path”, “dstest1_pgie_config.txt”,
      @@ -225,19 +245,26 @@ int main(int argc, char argv[]) {
      /
      Set up the pipeline /
      /
      we add all elements into the pipeline */
      gst_bin_add_many(GST_BIN(pipeline), source, h264parser, decoder, pgie,
  •   	filter1, nvvidconv, filter2, nvosd, sink, NULL);
    
  •   	filter1, nvvidconv, filter2, nvosd, converter, filter3, qtmux, sink,
    
  •   	NULL);
    
  • caps1 = gst_caps_from_string(“video/x-raw(memory:NVMM), format=NV12”);
    g_object_set(G_OBJECT(filter1), “caps”, caps1, NULL);
    gst_caps_unref(caps1);

  • caps2 = gst_caps_from_string(“video/x-raw(memory:NVMM), format=RGBA”);
    g_object_set(G_OBJECT(filter2), “caps”, caps2, NULL);
    gst_caps_unref(caps2);

  • caps3 = gst_caps_from_string(“video/x-h264”);

  • g_object_set(G_OBJECT(filter3), “caps”, caps3, NULL);

  • gst_caps_unref(caps3);

  • /* we link the elements together /
    /
    file-source → h264-parser → nvh264-decoder →

    • nvinfer → filter1 → nvvidconv → filter2 → nvosd → video-renderer */
      gst_element_link_many(source, h264parser, decoder, pgie, filter1, nvvidconv,
  •   	filter2, nvosd, sink, NULL);
    
  •   	filter2, nvosd, converter, filter3, qtmux, sink, NULL);
    

    /* Lets add probe to get informed of the meta data generated, we add probe to

    • the sink pad of the osd element, since by that time, the buffer would have
1 Like

Hi juan,
Please refer to the patch:

diff --git a/apps/deepstream-test1/deepstream_test1_app.c b/apps/deepstream-test1/deepstream_test1_app.c
index 434b9ee..ecbfc10 100644
--- a/apps/deepstream-test1/deepstream_test1_app.c
+++ b/apps/deepstream-test1/deepstream_test1_app.c
@@ -166,6 +166,14 @@ main (int argc, char *argv[])
   gulong osd_probe_id = 0;
   GstPad *osd_sink_pad = NULL;
 
+  GstElement *nvvidconv1 = NULL,
+             *filter3 = NULL,
+             *videoconvert = NULL,
+             *filter4 = NULL,
+             *x264enc = NULL,
+             *qtmux = NULL;
+  GstCaps *caps3 = NULL, *caps4 = NULL;
+
   /* Check input arguments */
   if (argc != 2) {
     g_printerr ("Usage: %s <H264 filename>\n", argv[0]);
@@ -200,22 +208,35 @@ main (int argc, char *argv[])
   /* Create OSD to draw on the converted RGBA buffer */
   nvosd = gst_element_factory_make ("nvosd", "nv-onscreendisplay");
 
+  nvvidconv1 = gst_element_factory_make ("nvvidconv", "nvvideo-converter1");
+  videoconvert = gst_element_factory_make ("videoconvert", "converter");
+  x264enc = gst_element_factory_make ("x264enc", "h264 encoder");
+  qtmux = gst_element_factory_make ("qtmux", "muxer");
+
   /* Finally render the osd output */
-  sink = gst_element_factory_make ("nveglglessink", "nvvideo-renderer");
+  sink = gst_element_factory_make ("filesink", "filesink");
 
   /* caps filter for nvvidconv to convert NV12 to RGBA as nvosd expects input
    * in RGBA format */
   filter1 = gst_element_factory_make ("capsfilter", "filter1");
   filter2 = gst_element_factory_make ("capsfilter", "filter2");
+  filter3 = gst_element_factory_make ("capsfilter", "filter3");
+  filter4 = gst_element_factory_make ("capsfilter", "filter4");
   if (!pipeline || !source || !h264parser || !decoder || !pgie
       || !filter1 || !nvvidconv || !filter2 || !nvosd || !sink) {
     g_printerr ("One element could not be created. Exiting.\n");
     return -1;
   }
+  if (!nvvidconv1 || !x264enc || !qtmux || !filter3 || !filter4) {
+    g_printerr ("One element could not be created. Exiting.\n");
+    return -1;
+  }
 
   /* we set the input filename to the source element */
   g_object_set (G_OBJECT (source), "location", argv[1], NULL);
 
+  g_object_set (G_OBJECT (sink), "location", "out.mp4", NULL);
+
   /* Set all the necessary properties of the nvinfer element,
    * the necessary ones are : */
   g_object_set (G_OBJECT (pgie),
@@ -233,19 +254,28 @@ main (int argc, char *argv[])
   /* we add all elements into the pipeline */
   gst_bin_add_many (GST_BIN (pipeline),
       source, h264parser, decoder, pgie,
-      filter1, nvvidconv, filter2, nvosd, sink, NULL);
+      filter1, nvvidconv, filter2, nvosd, nvvidconv1, filter3,
+      videoconvert, filter4, x264enc, qtmux, sink, NULL);
   caps1 = gst_caps_from_string ("video/x-raw(memory:NVMM), format=NV12");
   g_object_set (G_OBJECT (filter1), "caps", caps1, NULL);
   gst_caps_unref (caps1);
   caps2 = gst_caps_from_string ("video/x-raw(memory:NVMM), format=RGBA");
   g_object_set (G_OBJECT (filter2), "caps", caps2, NULL);
   gst_caps_unref (caps2);
+  caps3 = gst_caps_from_string ("video/x-raw, format=RGBA");
+  g_object_set (G_OBJECT (filter3), "caps", caps3, NULL);
+  gst_caps_unref (caps3);
+  caps4 = gst_caps_from_string ("video/x-raw, format=NV12");
+  g_object_set (G_OBJECT (filter4), "caps", caps4, NULL);
+  gst_caps_unref (caps4);
 
   /* we link the elements together */
   /* file-source -> h264-parser -> nvh264-decoder ->
    * nvinfer -> filter1 -> nvvidconv -> filter2 -> nvosd -> video-renderer */
   gst_element_link_many (source, h264parser, decoder, pgie, filter1,
-      nvvidconv, filter2, nvosd, sink, NULL);
+      nvvidconv, filter2, nvosd, nvvidconv1, filter3,
+      videoconvert, filter4,
+      x264enc, qtmux, sink, NULL);
 
   /* Lets add probe to get informed of the meta data generated, we add probe to
    * the sink pad of the osd element, since by that time, the buffer would have
3 Likes

Hi Dane, Adding the additional converter and the codec worked perfectly, Thanks!

I’m still curious and want to understand a bit more. Why do we need two video converters together? How can I debug those kind of problems (in case I face a similar one in the future)?

Thanks again.

Juan Luis

Hi Juan,
nvvidconv1 is to sync RGBA GPU buffers to CPU buffers. videoconvert is to convert RGBA buffers to NV12 buffers for encoding.

Hi Juan / DaneLLL,

I am trying to run ./deepstream-test1-app from the docker image and got the error “EGL display connection”. I guess the above patch is the solution since I don’t have a nvidia display card, and need to change the display to type 1 (FakeSink) or type 3 (File).

Could you please take a look at my issue at ERROR: Could not init EGL display connection with ./deepstream-test1-app sample_720p.... - DeepStream SDK - NVIDIA Developer Forums

Question, is this the right solution?; if so, how can I apply the above patch within the docker since I am getting this error: diff: unrecognized option ‘–git’

Hi sanchezvr7,
Let’s discuss in
[url]https://devtalk.nvidia.com/default/topic/1061645/deepstream-sdk/error-could-not-init-egl-display-connection-with-deepstream-test1-app-sample_720p-/[/url]

Queries:-

  1. while running a live stream video through RTSP, this code is saving video but as soon as live stream video is stopped then deepstream stops but the video file generated gets corrupted, and it’s not playable( i tried vlc and quicktime player). plz suggest how can I stop the camera feed without getting this output file corrupted.

  2. This code is not for Jetson platform. I tried to add “nvegltransform” element to make it compatible with Jetson Nano. But I am getting error while linking “Elements could not be linked. Exiting.”. while debugging, I got to know that issue is with linking of “nvegltransform” element.

INFO        GST_ELEMENT_PADS gstutils.c:1227:gst_element_get_compatible_pad:<muxer> Could not find a compatible pad to link to nvegl-transform:sink

I also tried to change the position of this element while linking, but it’s not fitting anywhere. Plz help

  1. what changes are needed in this patch to save the video file as well as display it?

Hi maniKTL,
The patch in #2 is mainly for DS3.0. You may make a new post if you run DS4.0.1.

Deleted post

Hi @DaneLLL,

I need the same thing (encoding and saving to a file) but for DS4.0.1 and deepstream-test2-app.c, could you please help me (by providing a patch as same as in #2) to do this?

P/S: I’m using jetson nano board

Hi LuatNT,
Please refer to following posts and integrate into your usecase.
[url]https://devtalk.nvidia.com/default/topic/1062551/deepstream-sdk/save-part-of-video-from-frames-in-ds4-0-pipeline-in-deepstream-test2-app-/post/5381275/#5381275[/url]
[url]https://devtalk.nvidia.com/default/topic/1062551/deepstream-sdk/save-part-of-video-from-frames-in-ds4-0-pipeline-in-deepstream-test2-app-/post/5381361/#5381361[/url]

Hi DaneLLL,
i have made a new post → [url]https://devtalk.nvidia.com/default/topic/1065388/deepstream-sdk/save-output-video-for-deepstream-4-0-1/[/url]

I am using DS 4.0.1, here is the new post https://devtalk.nvidia.com/default/topic/1065388/deepstream-sdk/save-output-video-for-deepstream-4-0-1/

Hi DaneLLL,

I got it done (based on your patch in #2) by myself yesterday but anyway, thank you very much for your help and support! :)

Can you post the solution? I am also on ds4. Thanks.

good day

I would like to save the incoming stream to a file but I want to save the raw unmodified stream to file,
This stream is from an IP camera and as such is an encoded RTSP stream either h264/h265.

The obvious choice would be to simply save the content to file and not decode it but i am having trouble with this part I have a basic attempt of it below but I am yet to succeed…

I have succeeded by using gst-launch with the following command:

gst-launch-1.0 -e rtspsrc location=rtsp://admin:surionDev@192.168.0.31:554/profile1 protocols=tcp ! rtph264depay ! h264parse ! mp4mux ! filesink location=save.mp4

Any help would be greatly appreciated

import sys
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst
from common.is_aarch_64 import is_aarch64

import gi
import os
is_live = True

try:
    gi.require_version('Gst', '1.0')
except ValueError:
    print('Could not find required Gstreamer library')
    sys.exit(1)
    
    
def make_file_sink_pipeline():
    pipeline = Gst.Pipeline()
    camera_src = "rtsp://admin:surionDev@192.168.0.31:554/profile1"
    video_source = Gst.ElementFactory.make('rtspsrc', 'source')
    video_source.set_property('location', camera_src)
    video_depay = Gst.ElementFactory.make('rtph264depay', 'video_depay')
    video_parser = Gst.ElementFactory.make('h264parse', 'video_parser')
    # mux = Gst.ElementFactory.make('mp4mux', 'mp4muxer')
    mux = Gst.ElementFactory.make('matroskamux', 'mux')
    # mux = Gst.ElementFactory.make('flvmux', 'flv')
    filesink = Gst.ElementFactory.make('filesink', 'fsink')
    # filesink.set_property('max-size-time', 15000000000)
    # filesink.set_property('location', 'rec_file.mp4')
    # filesink.set_property('location', 'rec_file.mkv')
    filesink.set_property('location', 'rec_file.flv')
    pipeline.add(video_source)
    pipeline.add(video_depay)
    pipeline.add(video_parser)
    pipeline.add(mux)
    pipeline.add(filesink)

    video_source.link(video_depay)
    video_depay.link(video_parser)
    video_parser.link(mux)
    mux.link(filesink)

    # pipeline.add(video_source)
    # pipeline.add(mux)
    # pipeline.add(filesink)
    #
    # video_source.link(mux)
    # mux.link(filesink)

    pipeline.set_state(Gst.State.PLAYING)

if __name__ == "__main__":
    os.environ["GST_DEBUG"] = '3'
    Gst.init(None)
    # camera -> mux -> file
    make_file_sink_pipeline()
    while 1:
        pass

Regards Andrew

Hi Andrew,

Please help to open a new topic for your issue. Thanks