Hi Folks,
I would like to record 1080p (60 fps) video and concurrently process those frames in opencv. I looked around for examples to follow, and thought of using argus/build/samples/gstVideoEncode as template.
- I implemented a consumer class, for opencv processing. I am able to read camera frames into a cv::Mat.
bool OCVConsumerThread::threadExecute()
{
IStream *iStream = interface_cast<IStream>(m_stream);
IFrameConsumer *iFrameConsumer = interface_cast<IFrameConsumer>(m_consumer);
Argus::Status status;
int ret;
// Wait until the producer has connected to the stream.
OCV_CONSUMER_PRINT("Waiting until producer is connected...\n");
if (iStream->waitUntilConnected() != STATUS_OK)
ORIGINATE_ERROR("Stream failed to connect.");
OCV_CONSUMER_PRINT("Producer has connected; continuing.\n");;
int frameCount = 0;
while (true)
{
// Acquire a Frame.
UniqueObj<Frame> frame(iFrameConsumer->acquireFrame());
IFrame *iFrame = interface_cast<IFrame>(frame);
if (!iFrame)
break;
// Get the Frame's Image.
Image *image = iFrame->getImage();
//IImageJPEG *iJPEG = interface_cast<IImageJPEG>(image);
//if (!iJPEG)
// ORIGINATE_ERROR("Failed to get IImageJPEG interface.");
EGLStream::NV::IImageNativeBuffer *iImageNativeBuffer
= interface_cast<EGLStream::NV::IImageNativeBuffer>(image);
TEST_ERROR_RETURN(!iImageNativeBuffer, "Failed to create an IImageNativeBuffer");
int fd = iImageNativeBuffer->createNvBuffer(Size {m_framesize.width, m_framesize.height},
NvBufferColorFormat_YUV420, NvBufferLayout_Pitch, &status);
if (status != STATUS_OK)
TEST_ERROR_RETURN(status != STATUS_OK, "Failed to create a native buffer");
uint8_t* data_mem;
int fsize = m_framesize.width * m_framesize.height ;
data_mem = (uint8_t*)mmap(NULL, fsize, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (data_mem == MAP_FAILED)
printf("mmap failed : %s\n", strerror(errno));
cv::Mat imgbuf = cv::Mat(m_framesize.height, m_framesize.width, CV_8UC1, data_mem);
cv::imshow("img", imgbuf);
cv::waitKey(1);
OCV_CONSUMER_PRINT("Acquired frame no. %d %d\n", m_framesize.height, m_framesize.width);
}
OCV_CONSUMER_PRINT("No more frames. Cleaning up.\n");
PROPAGATE_ERROR(requestShutdown());
return true;
}
- I replaced the PreviewConsumerThread of the original example with OCVConsumerThread.
static bool execute()
{
using namespace Argus;
// Initialize the preview window and EGL display.
Window &window = Window::getInstance();
PROPAGATE_ERROR(g_display.initialize(window.getEGLNativeDisplay()));
// Create CameraProvider.
UniqueObj<CameraProvider> cameraProvider(CameraProvider::create());
ICameraProvider *iCameraProvider = interface_cast<ICameraProvider>(cameraProvider);
if (!iCameraProvider)
ORIGINATE_ERROR("Failed to open CameraProvider");
// Get/use the first available CameraDevice.
std::vector<CameraDevice*> cameraDevices;
if (iCameraProvider->getCameraDevices(&cameraDevices) != STATUS_OK)
ORIGINATE_ERROR("Failed to get CameraDevices");
if (cameraDevices.size() == 0)
ORIGINATE_ERROR("No CameraDevices available");
CameraDevice *cameraDevice = cameraDevices[0];
ICameraProperties *iCameraProperties = interface_cast<ICameraProperties>(cameraDevice);
if (!iCameraProperties)
ORIGINATE_ERROR("Failed to get ICameraProperties interface");
// Create CaptureSession.
UniqueObj<CaptureSession> captureSession(iCameraProvider->createCaptureSession(cameraDevice));
ICaptureSession *iSession = interface_cast<ICaptureSession>(captureSession);
if (!iSession)
ORIGINATE_ERROR("Failed to create CaptureSession");
// Get the sensor mode to determine the video output stream resolution.
std::vector<Argus::SensorMode*> sensorModes;
iCameraProperties->getSensorModes(&sensorModes);
if (sensorModes.size() == 0)
ORIGINATE_ERROR("Failed to get sensor modes");
ISensorMode *iSensorMode = interface_cast<ISensorMode>(sensorModes[0]);
if (!iSensorMode)
ORIGINATE_ERROR("Failed to get sensor mode interface");
// Set common output stream settings.
UniqueObj<OutputStreamSettings> streamSettings(iSession->createOutputStreamSettings());
IOutputStreamSettings *iStreamSettings = interface_cast<IOutputStreamSettings>(streamSettings);
if (!iStreamSettings)
ORIGINATE_ERROR("Failed to create OutputStreamSettings");
iStreamSettings->setPixelFormat(PIXEL_FMT_YCbCr_420_888);
iStreamSettings->setEGLDisplay(g_display.get());
iStreamSettings->setResolution(PREVIEW_STREAM_SIZE);
// Create video encoder stream.
//iStreamSettings->setResolution(iSensorMode->getResolution());
UniqueObj<OutputStream> videoStream(iSession->createOutputStream(streamSettings.get()));
IStream *iVideoStream = interface_cast<IStream>(videoStream);
if (!iVideoStream)
ORIGINATE_ERROR("Failed to create video stream");
// Create preview stream.
//iStreamSettings->setResolution(PREVIEW_STREAM_SIZE);
//UniqueObj<OutputStream> previewStream(iSession->createOutputStream(streamSettings.get()));
//IStream *iPreviewStream = interface_cast<IStream>(previewStream);
//if (!iPreviewStream)
// ORIGINATE_ERROR("Failed to create preview stream");
UniqueObj<OutputStream> ocvStream(iSession->createOutputStream(streamSettings.get()));
if (!ocvStream.get())
ORIGINATE_ERROR("Failed to create StorageStream");
// Create capture Request and enable the streams in the Request.
UniqueObj<Request> request(iSession->createRequest(CAPTURE_INTENT_VIDEO_RECORD));
IRequest *iRequest = interface_cast<IRequest>(request);
if (!iRequest)
ORIGINATE_ERROR("Failed to create Request");
if (iRequest->enableOutputStream(videoStream.get()) != STATUS_OK)
ORIGINATE_ERROR("Failed to enable video stream in Request");
//if (iRequest->enableOutputStream(previewStream.get()) != STATUS_OK)
// ORIGINATE_ERROR("Failed to enable preview stream in Request");
if (iRequest->enableOutputStream(ocvStream.get()) != STATUS_OK)
ORIGINATE_ERROR("Failed to enable preview stream in Request");
// Initialize the GStreamer video encoder consumer.
GstVideoEncoder gstVideoEncoder;
if (!gstVideoEncoder.initialize(iVideoStream->getEGLStream(), PREVIEW_STREAM_SIZE,
FRAMERATE, BITRATE, ENCODER, MUXER, OUTPUT))
ORIGINATE_ERROR("Failed to initialize GstVideoEncoder EGLStream consumer");
if (!gstVideoEncoder.startRecording())
ORIGINATE_ERROR("Failed to start video recording");
// Initialize the preview consumer.
//PreviewConsumerThread previewConsumer(g_display.get(), iPreviewStream->getEGLStream());
//PROPAGATE_ERROR(previewConsumer.initialize());
//PROPAGATE_ERROR(previewConsumer.waitRunning());
// Initialize the ocv consumer.
OCVConsumerThread ocvConsumer(ocvStream.get(), PREVIEW_STREAM_SIZE);
PROPAGATE_ERROR(ocvConsumer.initialize());
PROPAGATE_ERROR(ocvConsumer.waitRunning());
// Perform repeat capture requests for LENGTH seconds.
if (iSession->repeat(request.get()) != STATUS_OK)
ORIGINATE_ERROR("Failed to start repeat capture requests");
PROPAGATE_ERROR(window.pollingSleep(LENGTH));
iSession->stopRepeat();
// Wait until all frames have completed before stopping recording.
/// @todo: Not doing this may cause a deadlock.
iSession->waitForIdle();
// Stop video recording.
if (!gstVideoEncoder.stopRecording())
ORIGINATE_ERROR("Failed to stop video recording");
gstVideoEncoder.shutdown();
videoStream.reset();
// Stop preview.
//previewStream.reset();
//PROPAGATE_ERROR(previewConsumer.shutdown());
// Stop ocv consumer.
ocvStream.reset();
PROPAGATE_ERROR(ocvConsumer.shutdown());
return true;
}
- When I build and run this. I see that my encoder output (.mp4) is decodable and looks good. However the output shown by imshow is garbage. Could you see any problem with my mapping ?
I also observed that my the encoder pipeline chokes when I create buffer (createNvBuffer) like this -
int fd = iImageNativeBuffer->createNvBuffer(Size {m_framesize.width, m_framesize.height},
NvBufferColorFormat_YUV420, NvBufferLayout_BlockLinear, &status);
But encoder does not choke with -
int fd = iImageNativeBuffer->createNvBuffer(Size {m_framesize.width, m_framesize.height},
NvBufferColorFormat_YUV420, NvBufferLayout_Pitch, &status);
I guess I am not setting stride/pitch of buffer properly before mapping for cv:Mat.
Thanks,