Hi Folks,
We have two cameras with our Tx1. I am looking to record and process recorded frames with openCV, from each camera. I followed argus/samples/gstVideoEncode, as and example. I am able to record and read frames in an ‘OCVConsumer’ class, for processing, with one camera. I am having difficulty operating encode and OCVConsumer pipes with two cameras concurrently.
Following is my code.
#include <Argus/Argus.h>
#include <gst/gst.h>
#include <stdlib.h>
#include <unistd.h>
#include "Error.h"
#include "PreviewConsumer.h"
#include "OCVConsumer.h"
#include <opencv2/opencv.hpp>
static const Argus::Size PREVIEW_STREAM_SIZE(640, 480);
namespace ArgusSamples
{
// Constants.
static const int32_t FRAMERATE = 30;
static const int32_t BITRATE = 14000000;
static const char* ENCODER = "omxh264enc";
static const char* MUXER = "qtmux";
static const char* OUTPUT = "argus_gstvideoencode_out.mp4";
static const uint32_t LENGTH = 10; // in seconds.
// Globals.
EGLDisplayHolder g_display;
/**
* Class to initialize and control GStreamer video encoding from an EGLStream.
*/
class GstVideoEncoder
{
public:
GstVideoEncoder()
: m_state(GST_STATE_NULL)
, m_pipeline(NULL)
, m_videoEncoder(NULL)
{
}
~GstVideoEncoder()
{
shutdown();
}
/**
* Initialize the GStreamer video encoder pipeline.
* @param[in] eglStream The EGLStream to consume frames from.
* @param[in] resolution The resolution of the video.
* @param[in] framerate The framerate of the video (in frames per second).
* @param[in] encoder The encoder to use for encoding. Options include:
* avenc_h263, omxh264enc, omxh265enc, omxvp8enc, avenc_mpeg4
* @param[in] muxer The muxer/container to use. Options include:
* qtmux (MP4), 3gppmux (3GP), avimux (AVI), identity (H265)
* @param[in] output The filename/path for the encoded output.
*/
bool initialize(EGLStreamKHR eglStream, Argus::Size resolution,
int32_t framerate, int32_t bitrate,
const char* encoder, const char* muxer, const char* output, int camindex)
{
// Initialize GStreamer.
gst_init(NULL, NULL);
// Create pipeline.
m_pipeline = gst_pipeline_new("video_pipeline");
if (!m_pipeline)
ORIGINATE_ERROR("Failed to create video pipeline");
// Create EGLStream video source.
GstElement *videoSource = gst_element_factory_make("nveglstreamsrc", NULL);
if (!videoSource)
ORIGINATE_ERROR("Failed to create EGLStream video source");
if (!gst_bin_add(GST_BIN(m_pipeline), videoSource))
{
gst_object_unref(videoSource);
ORIGINATE_ERROR("Failed to add video source to pipeline");
}
g_object_set(G_OBJECT(videoSource), "display", g_display.get(), NULL);
g_object_set(G_OBJECT(videoSource), "eglstream", eglStream, NULL);
// Create queue.
GstElement *queue = gst_element_factory_make("queue", NULL);
if (!queue)
ORIGINATE_ERROR("Failed to create queue");
if (!gst_bin_add(GST_BIN(m_pipeline), queue))
{
gst_object_unref(queue);
ORIGINATE_ERROR("Failed to add queue to pipeline");
}
// Create encoder.
m_videoEncoder = gst_element_factory_make(encoder, NULL);
if (!m_videoEncoder)
ORIGINATE_ERROR("Failed to create video encoder");
if (!gst_bin_add(GST_BIN(m_pipeline), m_videoEncoder))
{
gst_object_unref(m_videoEncoder);
ORIGINATE_ERROR("Failed to add video encoder to pipeline");
}
g_object_set(G_OBJECT(m_videoEncoder), "bitrate", bitrate, NULL);
// Create muxer.
GstElement *videoMuxer = gst_element_factory_make(muxer, NULL);
if (!videoMuxer)
ORIGINATE_ERROR("Failed to create video muxer");
if (!gst_bin_add(GST_BIN(m_pipeline), videoMuxer))
{
gst_object_unref(videoMuxer);
ORIGINATE_ERROR("Failed to add video muxer to pipeline");
}
// Create file sink.
GstElement *fileSink = gst_element_factory_make("filesink", NULL);
if (!fileSink)
ORIGINATE_ERROR("Failed to create file sink");
if (!gst_bin_add(GST_BIN(m_pipeline), fileSink))
{
gst_object_unref(fileSink);
ORIGINATE_ERROR("Failed to add file sink to pipeline");
}
std::ostringstream fileName;
fileName << "encodedStream" << camindex << ".mp4";
g_object_set(G_OBJECT(fileSink), "location", fileName.str().c_str(), NULL);
// Create caps filter to describe EGLStream image format.
GstCaps *caps = gst_caps_new_simple("video/x-raw",
"format", G_TYPE_STRING, "I420",
"width", G_TYPE_INT, resolution.width,
"height", G_TYPE_INT, resolution.height,
"framerate", GST_TYPE_FRACTION, framerate, 1,
NULL);
if (!caps)
ORIGINATE_ERROR("Failed to create caps");
GstCapsFeatures *features = gst_caps_features_new("memory:NVMM", NULL);
if (!features)
{
gst_caps_unref(caps);
ORIGINATE_ERROR("Failed to create caps feature");
}
gst_caps_set_features(caps, 0, features);
// Link EGLStream source to queue via caps filter.
if (!gst_element_link_filtered(videoSource, queue, caps))
{
gst_caps_unref(caps);
ORIGINATE_ERROR("Failed to link EGLStream source to queue");
}
gst_caps_unref(caps);
// Link queue to encoder
if (!gst_element_link(queue, m_videoEncoder))
ORIGINATE_ERROR("Failed to link queue to encoder");
// Link encoder to muxer pad.
if (!gst_element_link_pads(m_videoEncoder, "src", videoMuxer, "video_%u"))
ORIGINATE_ERROR("Failed to link encoder to muxer pad");
// Link muxer to sink.
if (!gst_element_link(videoMuxer, fileSink))
ORIGINATE_ERROR("Failed to link muxer to sink");
return true;
}
/**
* Shutdown the GStreamer pipeline.
*/
void shutdown()
{
if (m_state == GST_STATE_PLAYING)
stopRecording();
if (m_pipeline)
gst_object_unref(GST_OBJECT(m_pipeline));
m_pipeline = NULL;
}
/**
* Start recording video.
*/
bool startRecording()
{
if (!m_pipeline || !m_videoEncoder)
ORIGINATE_ERROR("Video encoder not initialized");
if (m_state != GST_STATE_NULL)
ORIGINATE_ERROR("Video encoder already recording");
// Start the pipeline.
if (gst_element_set_state(m_pipeline, GST_STATE_PLAYING) == GST_STATE_CHANGE_FAILURE)
ORIGINATE_ERROR("Failed to start recording.");
m_state = GST_STATE_PLAYING;
return true;
}
/**
* Stop recording video.
*/
bool stopRecording()
{
if (!m_pipeline || !m_videoEncoder)
ORIGINATE_ERROR("Video encoder not initialized");
if (m_state != GST_STATE_PLAYING)
ORIGINATE_ERROR("Video encoder not recording");
// Send the end-of-stream event.
GstPad *pad = gst_element_get_static_pad(m_videoEncoder, "sink");
if (!pad)
ORIGINATE_ERROR("Failed to get 'sink' pad");
bool result = gst_pad_send_event(pad, gst_event_new_eos());
gst_object_unref(pad);
if (!result)
ORIGINATE_ERROR("Failed to send end of stream event to encoder");
// Wait for the event to complete.
GstBus *bus = gst_pipeline_get_bus(GST_PIPELINE(m_pipeline));
if (!bus)
ORIGINATE_ERROR("Failed to get bus");
result = gst_bus_poll(bus, GST_MESSAGE_EOS, GST_CLOCK_TIME_NONE);
gst_object_unref(bus);
if (!result)
ORIGINATE_ERROR("Failed to wait for the EOF event");
// Stop the pipeline.
if (gst_element_set_state(m_pipeline, GST_STATE_NULL) == GST_STATE_CHANGE_FAILURE)
ORIGINATE_ERROR("Failed to stop recording.");
m_state = GST_STATE_NULL;
return true;
}
protected:
GstState m_state;
GstElement *m_pipeline;
GstElement *m_videoEncoder;
};
class aaCameraInterface
{
public:
aaCameraInterface(int idx)
: m_camindex(idx)
{
}
~aaCameraInterface()
{
}
bool initialize (CameraDevice *cd, NvEglRenderer *renderer, Window & window)
{
// Create CameraProvider.
UniqueObj<CameraProvider> cameraProvider(CameraProvider::create());
ICameraProvider *iCameraProvider = interface_cast<ICameraProvider>(cameraProvider);
if (!iCameraProvider)
ORIGINATE_ERROR("Failed to open CameraProvider");
// Get/use the first available CameraDevice.
std::vector<CameraDevice*> cameraDevices;
if (iCameraProvider->getCameraDevices(&cameraDevices) != STATUS_OK)
ORIGINATE_ERROR("Failed to get CameraDevices");
if (cameraDevices.size() == 0)
ORIGINATE_ERROR("No CameraDevices available");
CameraDevice *cameraDevice = cameraDevices[m_camindex];
ICameraProperties *iCameraProperties = interface_cast<ICameraProperties>(cameraDevice);
if (!iCameraProperties)
ORIGINATE_ERROR("Failed to get ICameraProperties interface");
// Create CaptureSession.
UniqueObj<CaptureSession> captureSession(iCameraProvider->createCaptureSession(cameraDevice));
ICaptureSession *iSession = interface_cast<ICaptureSession>(captureSession);
if (!iSession)
ORIGINATE_ERROR("Failed to create CaptureSession");
// Set common output stream settings.
UniqueObj<OutputStreamSettings> streamSettings(iSession->createOutputStreamSettings());
IOutputStreamSettings *iStreamSettings = interface_cast<IOutputStreamSettings>(streamSettings);
if (!iStreamSettings)
ORIGINATE_ERROR("Failed to create OutputStreamSettings");
iStreamSettings->setPixelFormat(PIXEL_FMT_YCbCr_420_888);
iStreamSettings->setEGLDisplay(g_display.get());
//iStreamSettings->setEGLDisplay(renderer->getEGLDisplay());
// Create video encoder stream.
iStreamSettings->setResolution(PREVIEW_STREAM_SIZE);
UniqueObj<OutputStream> videoStream(iSession->createOutputStream(streamSettings.get()));
IStream *iVideoStream = interface_cast<IStream>(videoStream);
if (!iVideoStream)
ORIGINATE_ERROR("Failed to create video stream");
UniqueObj<OutputStream> ocvStream(iSession->createOutputStream(streamSettings.get()));
if (!ocvStream.get())
ORIGINATE_ERROR("Failed to create StorageStream");
// Initialize the GStreamer video encoder consumer.
GstVideoEncoder gstVideoEncoder;
if (!gstVideoEncoder.initialize(iVideoStream->getEGLStream(), PREVIEW_STREAM_SIZE,
FRAMERATE, BITRATE, ENCODER, MUXER, OUTPUT, m_camindex))
ORIGINATE_ERROR("Failed to initialize GstVideoEncoder EGLStream consumer");
if (!gstVideoEncoder.startRecording())
ORIGINATE_ERROR("Failed to start video recording");
// Initialize the ocv consumer.
OCVConsumerThread ocvConsumer(ocvStream.get(), PREVIEW_STREAM_SIZE, renderer);
PROPAGATE_ERROR(ocvConsumer.initialize());
PROPAGATE_ERROR(ocvConsumer.waitRunning());
// Create capture Request and enable the streams in the Request.
UniqueObj<Request> request(iSession->createRequest(CAPTURE_INTENT_VIDEO_RECORD));
IRequest *iRequest = interface_cast<IRequest>(request);
if (!iRequest)
ORIGINATE_ERROR("Failed to create Request");
if (iRequest->enableOutputStream(videoStream.get()) != STATUS_OK)
ORIGINATE_ERROR("Failed to enable video stream in Request");
//if (iRequest->enableOutputStream(previewStream.get()) != STATUS_OK)
// ORIGINATE_ERROR("Failed to enable preview stream in Request");
if (iRequest->enableOutputStream(ocvStream.get()) != STATUS_OK)
ORIGINATE_ERROR("Failed to enable preview stream in Request");
// Perform repeat capture requests for LENGTH seconds.
if (iSession->repeat(request.get()) != STATUS_OK)
ORIGINATE_ERROR("Failed to start repeat capture requests");
PROPAGATE_ERROR(window.pollingSleep(LENGTH));
iSession->stopRepeat();
// Wait until all frames have completed before stopping recording.
/// @todo: Not doing this may cause a deadlock.
iSession->waitForIdle();
// Stop video recording.
if (!gstVideoEncoder.stopRecording())
ORIGINATE_ERROR("Failed to stop video recording");
gstVideoEncoder.shutdown();
videoStream.reset();
// Stop ocv consumer.
ocvStream.reset();
PROPAGATE_ERROR(ocvConsumer.shutdown());
return true;
}
protected:
CameraDevice *m_cameraDevice;
NvEglRenderer *m_renderer;
ICaptureSession *m_iSession;
GstVideoEncoder m_gstVideoEncoder;
OCVConsumerThread m_ocvConsumer;
int m_camindex;
};
static bool execute()
{
using namespace Argus;
aaCameraInterface aaCamInterface0(0);
aaCameraInterface aaCamInterface1(1);
// Initialize the preview window and EGL display.
Window &window = Window::getInstance();
PROPAGATE_ERROR(g_display.initialize(window.getEGLNativeDisplay()));
aaCamInterface0.initialize(NULL,NULL,window);
aaCamInterface1.initialize(NULL,NULL,window);
return true;
}
}; // namespace ArgusSamples
int main(int argc, const char *argv[])
{
// NvEglRenderer *renderer = NvEglRenderer::createEglRenderer("renderer0", PREVIEW_STREAM_SIZE.width,
// PREVIEW_STREAM_SIZE.height, 0, 0);
// if (!renderer)
// ORIGINATE_ERROR("Failed to create EGLRenderer.");
if (!ArgusSamples::execute())
return EXIT_FAILURE;
return EXIT_SUCCESS;
}
WHen I run this, I get encoded output from first camera but nothing from second. Following are the on screen messages. It seems there is some problem related to launching / running threads. Could someone please spot the basic issue about thread, which I seem to have overlooked ?
Thanks,
ubuntu@tegra-ubuntu:~/Downloads/argus/build/samples/gstVideoEncode$ ./argus_gstvideoencode
Inside NvxLiteH264DecoderLowLatencyInitNvxLiteH264DecoderLowLatencyInit set DPB and MjstreamingInside NvxLiteH265DecoderLowLatencyInitNvxLiteH265DecoderLowLatencyInit set DPB and MjstreamingOCV CONSUMER: Waiting until producer is connected...
Framerate set to : 30 at NvxVideoEncoderSetParameterNvMMLiteOpen : Block : BlockType = 4
===== MSENC =====
OCV CONSUMER: Producer has connected; continuing.
NvMMLiteBlockCreate : Block : BlockType = 4
OCV CONSUMER: No more frames. Cleaning up.
OCV CONSUMER: Done.
(Argus) Error InvalidState: Receive thread is not running cannot send. (in src/rpc/socket/client/ClientSocketManager.cpp, function send(), line 94)
(Argus) Error InvalidState: (propagating from src/rpc/socket/client/SocketClientDispatch.cpp, function dispatch(), line 101)
(Argus) Error InvalidState: Receive thread is not running cannot send. (in src/rpc/socket/client/ClientSocketManager.cpp, function send(), line 94)
(Argus) Error InvalidState: (propagating from src/rpc/socket/client/SocketClientDispatch.cpp, function dispatch(), line 101)
Error generated. /home/ubuntu/Downloads/argus/samples/gstVideoEncode/main.cpp, initialize:286 Failed to open CameraProvider
(Argus) Error InvalidState: Receive thread is not running cannot send. (in src/rpc/socket/client/ClientSocketManager.cpp, function send(), line 94)
(Argus) Error InvalidState: (propagating from src/rpc/socket/client/SocketClientDispatch.cpp, function dispatch(), line 101)