Encoder and opencv consumer

Hi Folks,

I would like to record 1080p (60 fps) video and concurrently process those frames in opencv. I looked around for examples to follow, and thought of using argus/build/samples/gstVideoEncode as template.

  1. I implemented a consumer class, for opencv processing. I am able to read camera frames into a cv::Mat.
bool OCVConsumerThread::threadExecute()
{
    IStream *iStream = interface_cast<IStream>(m_stream);
    IFrameConsumer *iFrameConsumer = interface_cast<IFrameConsumer>(m_consumer);
    Argus::Status status;
    int ret;

    // Wait until the producer has connected to the stream.
    OCV_CONSUMER_PRINT("Waiting until producer is connected...\n");
    if (iStream->waitUntilConnected() != STATUS_OK)
        ORIGINATE_ERROR("Stream failed to connect.");
    OCV_CONSUMER_PRINT("Producer has connected; continuing.\n");;

    int frameCount = 0;
    while (true)
    {
        // Acquire a Frame.
        UniqueObj<Frame> frame(iFrameConsumer->acquireFrame());
        IFrame *iFrame = interface_cast<IFrame>(frame);
        if (!iFrame)
            break;

        // Get the Frame's Image.
        Image *image = iFrame->getImage();
        //IImageJPEG *iJPEG = interface_cast<IImageJPEG>(image);
        //if (!iJPEG)
        //    ORIGINATE_ERROR("Failed to get IImageJPEG interface.");

        EGLStream::NV::IImageNativeBuffer *iImageNativeBuffer
              = interface_cast<EGLStream::NV::IImageNativeBuffer>(image);
        TEST_ERROR_RETURN(!iImageNativeBuffer, "Failed to create an IImageNativeBuffer");

        int fd = iImageNativeBuffer->createNvBuffer(Size {m_framesize.width, m_framesize.height},
               NvBufferColorFormat_YUV420, NvBufferLayout_Pitch, &status);
        if (status != STATUS_OK)
               TEST_ERROR_RETURN(status != STATUS_OK, "Failed to create a native buffer");


        uint8_t* data_mem;
        int fsize = m_framesize.width * m_framesize.height ;

        data_mem = (uint8_t*)mmap(NULL, fsize, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
        if (data_mem == MAP_FAILED)
           printf("mmap failed : %s\n", strerror(errno));


        cv::Mat imgbuf = cv::Mat(m_framesize.height, m_framesize.width, CV_8UC1, data_mem);
        cv::imshow("img", imgbuf);
        cv::waitKey(1);

        OCV_CONSUMER_PRINT("Acquired frame no. %d %d\n", m_framesize.height, m_framesize.width);
    }

    OCV_CONSUMER_PRINT("No more frames. Cleaning up.\n");

    PROPAGATE_ERROR(requestShutdown());

    return true;
}
  1. I replaced the PreviewConsumerThread of the original example with OCVConsumerThread.
static bool execute()
{
    using namespace Argus;

    // Initialize the preview window and EGL display.
    Window &window = Window::getInstance();
    PROPAGATE_ERROR(g_display.initialize(window.getEGLNativeDisplay()));

    // Create CameraProvider.
    UniqueObj<CameraProvider> cameraProvider(CameraProvider::create());
    ICameraProvider *iCameraProvider = interface_cast<ICameraProvider>(cameraProvider);
    if (!iCameraProvider)
        ORIGINATE_ERROR("Failed to open CameraProvider");

    // Get/use the first available CameraDevice.
    std::vector<CameraDevice*> cameraDevices;
    if (iCameraProvider->getCameraDevices(&cameraDevices) != STATUS_OK)
        ORIGINATE_ERROR("Failed to get CameraDevices");
    if (cameraDevices.size() == 0)
        ORIGINATE_ERROR("No CameraDevices available");
    CameraDevice *cameraDevice = cameraDevices[0];
    ICameraProperties *iCameraProperties = interface_cast<ICameraProperties>(cameraDevice);
    if (!iCameraProperties)
        ORIGINATE_ERROR("Failed to get ICameraProperties interface");

    // Create CaptureSession.
    UniqueObj<CaptureSession> captureSession(iCameraProvider->createCaptureSession(cameraDevice));
    ICaptureSession *iSession = interface_cast<ICaptureSession>(captureSession);
    if (!iSession)
        ORIGINATE_ERROR("Failed to create CaptureSession");

    // Get the sensor mode to determine the video output stream resolution.
    std::vector<Argus::SensorMode*> sensorModes;
    iCameraProperties->getSensorModes(&sensorModes);
    if (sensorModes.size() == 0)
        ORIGINATE_ERROR("Failed to get sensor modes");
    ISensorMode *iSensorMode = interface_cast<ISensorMode>(sensorModes[0]);
    if (!iSensorMode)
        ORIGINATE_ERROR("Failed to get sensor mode interface");

    // Set common output stream settings.
    UniqueObj<OutputStreamSettings> streamSettings(iSession->createOutputStreamSettings());
    IOutputStreamSettings *iStreamSettings = interface_cast<IOutputStreamSettings>(streamSettings);
    if (!iStreamSettings)
        ORIGINATE_ERROR("Failed to create OutputStreamSettings");
    iStreamSettings->setPixelFormat(PIXEL_FMT_YCbCr_420_888);
    iStreamSettings->setEGLDisplay(g_display.get());
    iStreamSettings->setResolution(PREVIEW_STREAM_SIZE);

    // Create video encoder stream.
    //iStreamSettings->setResolution(iSensorMode->getResolution());
    UniqueObj<OutputStream> videoStream(iSession->createOutputStream(streamSettings.get()));
    IStream *iVideoStream = interface_cast<IStream>(videoStream);
    if (!iVideoStream)
        ORIGINATE_ERROR("Failed to create video stream");

    // Create preview stream.
    //iStreamSettings->setResolution(PREVIEW_STREAM_SIZE);
    //UniqueObj<OutputStream> previewStream(iSession->createOutputStream(streamSettings.get()));
    //IStream *iPreviewStream = interface_cast<IStream>(previewStream);
    //if (!iPreviewStream)
    //    ORIGINATE_ERROR("Failed to create preview stream");

    UniqueObj<OutputStream> ocvStream(iSession->createOutputStream(streamSettings.get()));
    if (!ocvStream.get())
        ORIGINATE_ERROR("Failed to create StorageStream");



    // Create capture Request and enable the streams in the Request.
    UniqueObj<Request> request(iSession->createRequest(CAPTURE_INTENT_VIDEO_RECORD));
    IRequest *iRequest = interface_cast<IRequest>(request);
    if (!iRequest)
        ORIGINATE_ERROR("Failed to create Request");
    if (iRequest->enableOutputStream(videoStream.get()) != STATUS_OK)
        ORIGINATE_ERROR("Failed to enable video stream in Request");
    //if (iRequest->enableOutputStream(previewStream.get()) != STATUS_OK)
    //    ORIGINATE_ERROR("Failed to enable preview stream in Request");
    if (iRequest->enableOutputStream(ocvStream.get()) != STATUS_OK)
        ORIGINATE_ERROR("Failed to enable preview stream in Request");


    // Initialize the GStreamer video encoder consumer.
    GstVideoEncoder gstVideoEncoder;
    if (!gstVideoEncoder.initialize(iVideoStream->getEGLStream(), PREVIEW_STREAM_SIZE,
                                    FRAMERATE, BITRATE, ENCODER, MUXER, OUTPUT))
        ORIGINATE_ERROR("Failed to initialize GstVideoEncoder EGLStream consumer");
    if (!gstVideoEncoder.startRecording())
        ORIGINATE_ERROR("Failed to start video recording");

    // Initialize the preview consumer.
    //PreviewConsumerThread previewConsumer(g_display.get(), iPreviewStream->getEGLStream());
    //PROPAGATE_ERROR(previewConsumer.initialize());
    //PROPAGATE_ERROR(previewConsumer.waitRunning());

    // Initialize the ocv consumer.
    OCVConsumerThread ocvConsumer(ocvStream.get(), PREVIEW_STREAM_SIZE);
    PROPAGATE_ERROR(ocvConsumer.initialize());
    PROPAGATE_ERROR(ocvConsumer.waitRunning());


    // Perform repeat capture requests for LENGTH seconds.
    if (iSession->repeat(request.get()) != STATUS_OK)
        ORIGINATE_ERROR("Failed to start repeat capture requests");
    PROPAGATE_ERROR(window.pollingSleep(LENGTH));
    iSession->stopRepeat();

    // Wait until all frames have completed before stopping recording.
    /// @todo: Not doing this may cause a deadlock.
    iSession->waitForIdle();

    // Stop video recording.
    if (!gstVideoEncoder.stopRecording())
        ORIGINATE_ERROR("Failed to stop video recording");
    gstVideoEncoder.shutdown();
    videoStream.reset();

    // Stop preview.
    //previewStream.reset();
    //PROPAGATE_ERROR(previewConsumer.shutdown());

    // Stop ocv consumer.
    ocvStream.reset();
    PROPAGATE_ERROR(ocvConsumer.shutdown());

    return true;
}
  1. When I build and run this. I see that my encoder output (.mp4) is decodable and looks good. However the output shown by imshow is garbage. Could you see any problem with my mapping ?

I also observed that my the encoder pipeline chokes when I create buffer (createNvBuffer) like this -

int fd = iImageNativeBuffer->createNvBuffer(Size {m_framesize.width, m_framesize.height},
               NvBufferColorFormat_YUV420, NvBufferLayout_BlockLinear, &status);

But encoder does not choke with -

int fd = iImageNativeBuffer->createNvBuffer(Size {m_framesize.width, m_framesize.height},
               NvBufferColorFormat_YUV420, NvBufferLayout_Pitch, &status);

I guess I am not setting stride/pitch of buffer properly before mapping for cv:Mat.

Thanks,

I am away from any Jetson to test it, but it seems your are getting frames in I420 format while your consumer/display expects 8bpp monochrome format.
Maybe using opencv CvtColor function for I420 to BGR can help, using Mat format CV_8UC3 for color display.
You also may adjust your framesize passed to mmap for I420 frame size (more than 1 byte per pixel).

Hi Honey_Patouceul,
Thanks for your response. I need to get access to Y plane only, as my processing algorithm is based on Y plane. Displaying the image, is just validation step that I am looking to get past. So I would prefer to not convert the image format, using CvtColor.
Looking at the image I am suspecting that the YUV420 format written out by camera might not be in linear format, it could be Block format ? Would there be a way to ensure that YUV420 is written out by camera in Planar format ?
Thanks,

Hi,
Please refer to the sample of dumping a frame

{
            NvBufferParams params;
            NvBufferGetParams(fd, &params);

            char filename[256];

            sprintf(filename, "buffer%dx%d.yuv", params.width[0],  params.height[0]);
            FILE *fp = fopen(filename, "wb");

            for (unsigned i = 0; i < params.num_planes; i++) { 

                size_t size = params.height[i] * params.pitch[i];
                //size_t size = params.psize[i];
                char *vptr = (char*)mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, params.offset[i]);

                // TO DO SOMETHING WITH vptr
                printf("plane[%d] %dx%d, pitch=%d offset=%d size=%ld, vptr=%p\n",  i, params.width[i],  params.height[i],
                         params.pitch[i],  params.offset[i], size, vptr);

                for (unsigned j = 0; j < params.height[i]; j++) { 
                    fwrite(vptr+params.pitch[i]*j, params.width[i], 1, fp);
                }

                munmap(vptr, size);
            }
            m_dmabuf = iNativeBuffer->createNvBuffer(iStream->getResolution(),
                                                     NvBufferColorFormat_YUV420,
                                                     NvBufferLayout_Pitch);
    }

The output format is block linear. For post-processing, please make it pitch linear

m_dmabuf = iNativeBuffer->createNvBuffer(iStream->getResolution(),
                                                     NvBufferColorFormat_YUV420,
                                                     NvBufferLayout_Pitch);

Hi DaneLLL,

I did try using NvBufferGetParams(fd, &params). Always had difficulty getting it to compile and link. Was getting NvBufferGetParams as undefined symbol. Could you please give compilation/link command for it ? Or could you please tell me which library to link for this function ?

Thanks,

Hi, it is built in libnvbuf_utils.so

Thanks DaneLLL.

I have it working now. I am on to next issue here.

Somehow when I run this encoder and OCVConsumer pipe, I am not able to get 1080P @ 30 fps performance. There is big lag (feels almost > 2 secs) between motion in front of camera and when it appears on my imshow windows.

The thread below claims to have 1280 @ 540 fps. What is best way to acheive that type of performance ? What is the max performance that you have been able to achieve ( for encode and opencv/imshow pipeline) in your lab ?

Thanks,

Forgot to quote the thread.

https://devtalk.nvidia.com/default/topic/1009706/jetson-tx1/libargus-repeating-or-interpolating-frames-/post/5154377/#5154377

Thanks,

Further more, when I try to increase clks, using $sudo ~/jetson_clks.sh - then I get segmentation fault with following code -

Would memory allocation or anything in memory change upon change of clocks ?

Thanks,

// Acquire a Frame.
        UniqueObj<Frame> frame(iFrameConsumer->acquireFrame());
        IFrame *iFrame = interface_cast<IFrame>(frame);
        if (!iFrame)
            break;

        // Get the Frame's Image.
        Image *image = iFrame->getImage();
        EGLStream::NV::IImageNativeBuffer *iImageNativeBuffer
              = interface_cast<EGLStream::NV::IImageNativeBuffer>(image);
        TEST_ERROR_RETURN(!iImageNativeBuffer, "Failed to create an IImageNativeBuffer");

        int fd = iImageNativeBuffer->createNvBuffer(Size {m_framesize.width, m_framesize.height},
               NvBufferColorFormat_YUV420, NvBufferLayout_Pitch, &status);
        if (status != STATUS_OK)
               TEST_ERROR_RETURN(status != STATUS_OK, "Failed to create a native buffer");

 
        NvBufferParams params;
        NvBufferGetParams(fd, &params);

        int fsize = params.pitch[0] * m_framesize.height ;
        char *data_mem = (char*)mmap(NULL, fsize, PROT_READ | PROT_WRITE, MAP_SHARED, fd, params.offset[0]);
        if (data_mem == MAP_FAILED)
           printf("mmap failed : %s\n", strerror(errno));

        cv::Mat imgbuf = cv::Mat(m_framesize.height, m_framesize.width, CV_8UC1, data_mem,params.pitch[0]);
        cv::imshow("img", imgbuf);
        cv::waitKey(1);
        NvBufferDestroy(fd);

Hi, are you able to achieve 1080p30 without cv::imshow?

HI DaneLLL,

I am running Encoder and OCVConsumer threads concurrently on same camera feed. The encoder thread seems to encode well at 1080p @ 30 fps. However the OCVConsumer thread which does imshow - appears very very sluggish. This is very visual observation. If I get rid of imshow - how will I get a feel of performance ? What I expect is that the image displayed by imshow should not be sluggish.

Another observation is that I see another blank display window, which is residual from the PreviewConsumerThread which I replaced in gstVideoEncode example. There is nothing that is displayed in this window, since my imshow window is separate. Could it get slow due to that ?

I am attaching my code below -

static bool execute()
{
    using namespace Argus;

    // Initialize the preview window and EGL display.
    Window &window = Window::getInstance();
    PROPAGATE_ERROR(g_display.initialize(window.getEGLNativeDisplay()));

    // Create CameraProvider.
    UniqueObj<CameraProvider> cameraProvider(CameraProvider::create());
    ICameraProvider *iCameraProvider = interface_cast<ICameraProvider>(cameraProvider);
    if (!iCameraProvider)
        ORIGINATE_ERROR("Failed to open CameraProvider");

    // Get/use the first available CameraDevice.
    std::vector<CameraDevice*> cameraDevices;
    if (iCameraProvider->getCameraDevices(&cameraDevices) != STATUS_OK)
        ORIGINATE_ERROR("Failed to get CameraDevices");
    if (cameraDevices.size() == 0)
        ORIGINATE_ERROR("No CameraDevices available");
    CameraDevice *cameraDevice = cameraDevices[0];
    ICameraProperties *iCameraProperties = interface_cast<ICameraProperties>(cameraDevice);
    if (!iCameraProperties)
        ORIGINATE_ERROR("Failed to get ICameraProperties interface");

    // Create CaptureSession.
    UniqueObj<CaptureSession> captureSession(iCameraProvider->createCaptureSession(cameraDevice));
    ICaptureSession *iSession = interface_cast<ICaptureSession>(captureSession);
    if (!iSession)
        ORIGINATE_ERROR("Failed to create CaptureSession");

#if 0
    // Get the sensor mode to determine the video output stream resolution.
    std::vector<Argus::SensorMode*> sensorModes;
    iCameraProperties->getSensorModes(&sensorModes);
    if (sensorModes.size() == 0)
        ORIGINATE_ERROR("Failed to get sensor modes");
    ISensorMode *iSensorMode = interface_cast<ISensorMode>(sensorModes[0]);
    if (!iSensorMode)
        ORIGINATE_ERROR("Failed to get sensor mode interface");
#endif

    // Set common output stream settings.
    UniqueObj<OutputStreamSettings> streamSettings(iSession->createOutputStreamSettings());
    IOutputStreamSettings *iStreamSettings = interface_cast<IOutputStreamSettings>(streamSettings);
    if (!iStreamSettings)
        ORIGINATE_ERROR("Failed to create OutputStreamSettings");
    iStreamSettings->setPixelFormat(PIXEL_FMT_YCbCr_420_888);
    iStreamSettings->setEGLDisplay(g_display.get());

    // Create video encoder stream.
    iStreamSettings->setResolution(PREVIEW_STREAM_SIZE);
    UniqueObj<OutputStream> videoStream(iSession->createOutputStream(streamSettings.get()));
    IStream *iVideoStream = interface_cast<IStream>(videoStream);
    if (!iVideoStream)
        ORIGINATE_ERROR("Failed to create video stream");

    // Create preview stream.
#if 0
    iStreamSettings->setResolution(PREVIEW_STREAM_SIZE);
    UniqueObj<OutputStream> previewStream(iSession->createOutputStream(streamSettings.get()));
    IStream *iPreviewStream = interface_cast<IStream>(previewStream);
    if (!iPreviewStream)
        ORIGINATE_ERROR("Failed to create preview stream");
#endif

    UniqueObj<OutputStream> ocvStream(iSession->createOutputStream(streamSettings.get()));
    if (!ocvStream.get())
        ORIGINATE_ERROR("Failed to create StorageStream");


    // Create capture Request and enable the streams in the Request.
    UniqueObj<Request> request(iSession->createRequest(CAPTURE_INTENT_VIDEO_RECORD));
    IRequest *iRequest = interface_cast<IRequest>(request);
    if (!iRequest)
        ORIGINATE_ERROR("Failed to create Request");
    if (iRequest->enableOutputStream(videoStream.get()) != STATUS_OK)
        ORIGINATE_ERROR("Failed to enable video stream in Request");
    //if (iRequest->enableOutputStream(previewStream.get()) != STATUS_OK)
    //    ORIGINATE_ERROR("Failed to enable preview stream in Request");
    if (iRequest->enableOutputStream(ocvStream.get()) != STATUS_OK)
        ORIGINATE_ERROR("Failed to enable preview stream in Request");

    // Initialize the GStreamer video encoder consumer.
    GstVideoEncoder gstVideoEncoder;
    if (!gstVideoEncoder.initialize(iVideoStream->getEGLStream(), PREVIEW_STREAM_SIZE,
                                    FRAMERATE, BITRATE, ENCODER, MUXER, OUTPUT))
        ORIGINATE_ERROR("Failed to initialize GstVideoEncoder EGLStream consumer");
    if (!gstVideoEncoder.startRecording())
        ORIGINATE_ERROR("Failed to start video recording");

    // Initialize the preview consumer.
    //PreviewConsumerThread previewConsumer(g_display.get(), iPreviewStream->getEGLStream());
    //PROPAGATE_ERROR(previewConsumer.initialize());
    //PROPAGATE_ERROR(previewConsumer.waitRunning());
    // Initialize the ocv consumer.
    OCVConsumerThread ocvConsumer(ocvStream.get(), PREVIEW_STREAM_SIZE);
    PROPAGATE_ERROR(ocvConsumer.initialize());
    PROPAGATE_ERROR(ocvConsumer.waitRunning());

    // Perform repeat capture requests for LENGTH seconds.
    if (iSession->repeat(request.get()) != STATUS_OK)
        ORIGINATE_ERROR("Failed to start repeat capture requests");
    PROPAGATE_ERROR(window.pollingSleep(LENGTH));
    iSession->stopRepeat();

    // Wait until all frames have completed before stopping recording.
    /// @todo: Not doing this may cause a deadlock.
    iSession->waitForIdle();

    // Stop video recording.
    if (!gstVideoEncoder.stopRecording())
        ORIGINATE_ERROR("Failed to stop video recording");
    gstVideoEncoder.shutdown();
    videoStream.reset();

    // Stop preview.
    //previewStream.reset();
    //PROPAGATE_ERROR(previewConsumer.shutdown());

    // Stop ocv consumer.
    ocvStream.reset();
    PROPAGATE_ERROR(ocvConsumer.shutdown());

    return true;
}

I guess, I will try to profile (measure time for OCVConsumerThread::execute, which reads the camera frame) after isolating imshow out. This can eliminate the doubt that imshow is the culprit.

Thanks,

Hi, we can do nothing with cv::imshow because it is not developed by NVIDIA. Could you please try NvEglRenderer?

Please other opencv experts share experience.

Thanks DaneLLL, could you please provide an example of NvEglRenderer ? I could not find any .cpp file using it on my Tx1 box ?

Thanks,

Hi, please refer to tegra_multimedia_api\samples\09_camera_jpeg_capture

Hi DaneLLL

  1. imshow seem to take 18 ms (minimum observed value) - 29 ms (max observed value)

  2. Just FYI - When I run 09_camera_jpeg_capture, I get some errors. I think it may not matter much to my issue of performance. I will focus on using NvEglRenderer

ubuntu@tegra-ubuntu:~/tegra_multimedia_api/samples/09_camera_jpeg_capture$ ./camera_jpeg_capture 
NvPclHwGetModuleList: No module data found
NvPclHwGetModuleList: No module data found
NvPclHwGetModuleList: No module data found
LSC: LSC surface is not based on full res!
LSC: LSC surface is not based on full res!
LSC: LSC surface is not based on full res!
LSC: LSC surface is not based on full res!
PRODUCER: Creating output stream
PRODUCER: Launching consumer thread
CONSUMER: Waiting until producer is connected...
CONSUMER: Waiting until producer is connected...
PRODUCER: Starting repeat capture requests.
CONSUMER: Producer has connected; continuing.
CONSUMER: Producer has connected; continuing.
CONSUMER: Done.
CONSUMER: Done.
PRODUCER: Done -- exiting.
(NvOdmDevice) Error NotInitialized: V4L2Device not powered on (in dvs/git/dirty/git-master_linux/camera-partner/imager/src/V4L2Device.cpp, function setControlVal(), line 378)
(NvOdmDevice) Error NotInitialized: V4L2Device not powered on (in dvs/git/dirty/git-master_linux/camera-partner/imager/src/V4L2Device.cpp, function setControlVal(), line 378)

Please kindly share your result with NvEglRenderer.

Hi DaneLLL,

Sorry about the delay. I have not yet tried it. But plan to do so. Will update, soon.

THanks,

Hi DaneLLL,

I get segfault when I try to display using NvEglRenderer. Although I do see that my screen shows images for quarter of a second. Whereas my capture time is set to 15 sec. Following is my code to display using NvEglRenderer…

bool OCVConsumerThread::threadExecute()
{
    IStream *iStream = interface_cast<IStream>(m_stream);
    IFrameConsumer *iFrameConsumer = interface_cast<IFrameConsumer>(m_consumer);
    Argus::Status status;
    int ret;

    // Wait until the producer has connected to the stream.
    OCV_CONSUMER_PRINT("Waiting until producer is connected...%d\n", m_id);
    if (iStream->waitUntilConnected() != STATUS_OK)
        ORIGINATE_ERROR("Stream failed to connect.");
    OCV_CONSUMER_PRINT("Producer has connected; continuing. %d\n", m_id);

    int frameCount = 0;
    while (true)
    {
        // Acquire a Frame.
        UniqueObj<Frame> frame(iFrameConsumer->acquireFrame());
        IFrame *iFrame = interface_cast<IFrame>(frame);
        if (!iFrame)
            break;

 #if 0
        // Get the Frame's Image.
        Image *image = iFrame->getImage();
        EGLStream::NV::IImageNativeBuffer *iImageNativeBuffer
              = interface_cast<EGLStream::NV::IImageNativeBuffer>(image);
        TEST_ERROR_RETURN(!iImageNativeBuffer, "Failed to create an IImageNativeBuffer");

        int fd = iImageNativeBuffer->createNvBuffer(Size {m_framesize.width, m_framesize.height},
               NvBufferColorFormat_YUV420, NvBufferLayout_Pitch, &status);
        if (status != STATUS_OK)
               TEST_ERROR_RETURN(status != STATUS_OK, "Failed to create a native buffer");

        NvBufferParams params;
        NvBufferGetParams(fd, &params);

        int fsize = params.pitch[0] * m_framesize.height ;
        char *data_mem = (char*)mmap(NULL, fsize, PROT_READ | PROT_WRITE, MAP_SHARED, fd, params.offset[0]);
        if (data_mem == MAP_FAILED)
           printf("mmap failed : %s\n", strerror(errno));

        cv::Mat imgbuf = cv::Mat(m_framesize.height, m_framesize.width, CV_8UC1, data_mem,params.pitch[0]);

        std::ostringstream fileName;
        fileName << "image" << m_id;

        cv::imshow(fileName.str().c_str(), imgbuf);
        cv::waitKey(1);
        NvBufferDestroy(fd);
#else 

        // Get the IImageNativeBuffer extension interface.
        NV::IImageNativeBuffer *iNativeBuffer =
            interface_cast<NV::IImageNativeBuffer>(iFrame->getImage());
        if (!iNativeBuffer)
            OCV_CONSUMER_PRINT("IImageNativeBuffer not supported by Image.");

        // If we don't already have a buffer, create one from this image.
        // Otherwise, just blit to our buffer.
        if (m_dmabuf == -1)
        {
            m_dmabuf = iNativeBuffer->createNvBuffer(iStream->getResolution(),
                                                     NvBufferColorFormat_YUV420,
                                                     NvBufferLayout_BlockLinear);
            if (m_dmabuf == -1)
                OCV_CONSUMER_PRINT("\tFailed to create NvBuffer\n");
        }
        else if (iNativeBuffer->copyToNvBuffer(m_dmabuf) != STATUS_OK)
        {
            OCV_CONSUMER_PRINT("Failed to copy frame to NvBuffer.");
        }


        m_renderer->render(m_dmabuf);
#endif

        //OCV_CONSUMER_PRINT("Acquired frame no. %d %d %d %d %d\n", params.width[0], params.height[0], params.pitch[0],m_framesize.width, m_framesize.height);
    }

    OCV_CONSUMER_PRINT("No more frames. Cleaning up.\n");

    PROPAGATE_ERROR(requestShutdown());

    return true;
}

Is there a way to display cv::Mat using NvEglRenerer ?

Hi, we have several samples demonstrating NvEglRenderer. Please give a patch on one sample so that we can reproduce your issue.