This is main source code but I think we should only focus on v4l2 relation part.
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <fcntl.h>
#include <errno.h>
#include <stdlib.h>
#include <signal.h>
#include <poll.h>
#include <ctime>
#include <ratio>
#include <chrono>
#include "HYEncoder.h"
using namespace std;
using namespace std::chrono;
high_resolution_clock::time_point timeStamp0;
high_resolution_clock::time_point timeStamp1;
void HY_Encoder::SetDefaults(context_t * ctx) {
ctx->cam_fd = -1;
ctx->cam_pixfmt = V4L2_PIX_FMT_UYVY;
ctx->cam_w = 1280;
ctx->cam_h = 720;
ctx->frame = 0;
ctx->save_n_frame = 0;
ctx->conv = NULL;
ctx->vic_pixfmt = V4L2_PIX_FMT_YUV420M;
ctx->vic_flip = (enum v4l2_flip_method) -1;
ctx->vic_interpolation = (enum v4l2_interpolation_method) -1;
ctx->vic_tnr = (enum v4l2_tnr_algorithm) -1;
ctx->g_buff = NULL;
ctx->renderer = NULL;
ctx->got_error = false;
ctx->fps = 30;
ctx->conv_output_plane_buf_queue = new queue < nv_buffer * >;
ctx->enc_output_buf_queue = new queue < nv_buffer *>;
pthread_mutex_init(&ctx->queue_lock, NULL);
pthread_cond_init(&ctx->queue_cond, NULL);
ctx->enable_cuda = false;
ctx->egl_image = NULL;
ctx->egl_display = EGL_NO_DISPLAY;
ctx->enable_verbose = false;
}
static nv_color_fmt nvcolor_fmt[] = {
// TODO add more pixel format mapping
{V4L2_PIX_FMT_UYVY, NvBufferColorFormat_UYVY},
{V4L2_PIX_FMT_VYUY, NvBufferColorFormat_VYUY},
{V4L2_PIX_FMT_YUYV, NvBufferColorFormat_YUYV},
{V4L2_PIX_FMT_YVYU, NvBufferColorFormat_YVYU},
};
NvBufferColorFormat HY_Encoder::GetNvbuffColorFmt(unsigned int v4l2_pixfmt) {
for (unsigned i = 0; i < sizeof(nvcolor_fmt); i++)
{
if (v4l2_pixfmt == nvcolor_fmt[i].v4l2_pixfmt)
return nvcolor_fmt[i].nvbuff_color;
}
return NvBufferColorFormat_Invalid;
}
bool HY_Encoder::CameraInitialize(context_t * ctx) {
struct v4l2_format fmt;
// Open camera device
ctx->cam_fd = open(ctx->cam_devname, O_RDWR);
if (ctx->cam_fd == -1)
ERROR_RETURN("Failed to open camera device %s: %s (%d)",
ctx->cam_devname, strerror(errno), errno);
// Set camera output format
memset(&fmt, 0, sizeof(fmt));
fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
fmt.fmt.pix.width = ctx->cam_w;
fmt.fmt.pix.height = ctx->cam_h;
fmt.fmt.pix.pixelformat = ctx->cam_pixfmt;
fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;
if (ioctl(ctx->cam_fd, VIDIOC_S_FMT, &fmt) < 0)
ERROR_RETURN("Failed to set camera output format: %s (%d)",
strerror(errno), errno);
// Get the real format in case the desired is not supported
memset(&fmt, 0, sizeof fmt);
fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (ioctl(ctx->cam_fd, VIDIOC_G_FMT, &fmt) < 0)
ERROR_RETURN("Failed to get camera output format: %s (%d)",
strerror(errno), errno);
if (fmt.fmt.pix.width != ctx->cam_w ||
fmt.fmt.pix.height != ctx->cam_h ||
fmt.fmt.pix.pixelformat != ctx->cam_pixfmt)
{
WARN("The desired format is not supported");
ctx->cam_w = fmt.fmt.pix.width;
ctx->cam_h = fmt.fmt.pix.height;
ctx->cam_pixfmt =fmt.fmt.pix.pixelformat;
}
INFO("Camera ouput format: (%d x %d) stride: %d, imagesize: %d",
fmt.fmt.pix.width,
fmt.fmt.pix.height,
fmt.fmt.pix.bytesperline,
fmt.fmt.pix.sizeimage);
return true;
}
bool HY_Encoder::VicInitialize(context_t * ctx) {
// Create VIC (VIdeo Converter) instance
ctx->conv = NvVideoConverter::createVideoConverter("conv");
if (ctx->conv == NULL)
ERROR_RETURN("Failed to create video converter");
if (ctx->vic_flip != -1 && ctx->conv->setFlipMethod(ctx->vic_flip) < 0)
ERROR_RETURN("Failed to set flip method");
if (ctx->vic_interpolation != -1 &&
ctx->conv->setInterpolationMethod(ctx->vic_interpolation) < 0)
ERROR_RETURN("Failed to set interpolation method");
if (ctx->vic_tnr != -1 && ctx->conv->setTnrAlgorithm(ctx->vic_tnr) < 0)
ERROR_RETURN("Failed to set tnr algorithm");
// Set up VIC output plane format
if (ctx->conv->setOutputPlaneFormat(ctx->cam_pixfmt, ctx->cam_w,
ctx->cam_h, V4L2_NV_BUFFER_LAYOUT_PITCH) < 0)
ERROR_RETURN("Failed to set up VIC output plane format");
// Set up VIC capture plane format
// The target format can be reconfigured from set_defaults()
if (ctx->conv->setCapturePlaneFormat(ctx->vic_pixfmt, ctx->cam_w,
ctx->cam_h, V4L2_NV_BUFFER_LAYOUT_PITCH) < 0)
ERROR_RETURN("Failed to set up VIC capture plane format");
// Allocate VIC output plane
if (ctx->conv->output_plane.setupPlane(V4L2_MEMORY_DMABUF,
V4L2_BUFFERS_NUM, false, false) < 0)
ERROR_RETURN("Failed to allocate VIC output plane");
// Allocate VIC capture plane
if (ctx->conv->capture_plane.setupPlane(V4L2_MEMORY_MMAP,
V4L2_BUFFERS_NUM, true, false) < 0)
ERROR_RETURN("Failed to allocate VIC capture plane");
return true;
}
bool HY_Encoder::EncoderInitialize(context_t * ctx) {
ctx->enc = NvVideoEncoder::createVideoEncoder("enc0");
if (ctx->enc == NULL)
ERROR_RETURN("Failed to create video encoder");
if (ctx->enc->setCapturePlaneFormat(V4L2_PIX_FMT_H264, ctx->cam_w,
ctx->cam_h, 2 * 1024 * 1024) < 0)
ERROR_RETURN("Failed to set up ENC capture plane format");
if (ctx->enc->setOutputPlaneFormat(V4L2_PIX_FMT_YUV420M, ctx->cam_w,
ctx->cam_h) < 0)
ERROR_RETURN("Failed to set up ENC output plane format");
if (ctx->enc->setBitrate(4<<20) < 0)
ERROR_RETURN("Failed to set up ENC bitrate");
if (ctx->enc->setProfile(V4L2_MPEG_VIDEO_H264_PROFILE_HIGH) < 0)
ERROR_RETURN("Failed to set up ENC profile");
if (ctx->enc->setLevel(V4L2_MPEG_VIDEO_H264_LEVEL_5_0) < 0)
ERROR_RETURN("Failed to set up ENC level");
if (ctx->enc->setRateControlMode(V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) < 0)
ERROR_RETURN("Failed to set up ENC rate control mode");
//VuNguyen: set frame interval and frame rate
if (ctx->enc->setIFrameInterval(30) < 0)
ERROR_RETURN("Failed to set up ENC frame interval");
if (ctx->enc->setFrameRate(30,1) < 0)
ERROR_RETURN("Failed to set up ENC frame rate");
if (ctx->enc->output_plane.setupPlane(V4L2_MEMORY_DMABUF, 10, false, false) < 0)
ERROR_RETURN("Failed to set up ENC output plane");
if (ctx->enc->capture_plane.setupPlane(V4L2_MEMORY_MMAP, 10, true, false) < 0)
ERROR_RETURN("Failed to set up ENC capture plane");
return true;
}
bool HY_Encoder::InitComponents(context_t * ctx) {
if (!this->CameraInitialize(ctx))
ERROR_RETURN("Failed to initialize camera device");
if (!this->VicInitialize(ctx))
ERROR_RETURN("Failed to initialize video converter");
if (!this->EncoderInitialize(ctx))
ERROR_RETURN("Failed to initialize encoder");
//Create an output file
//ctx->m_outputFile = new std::ofstream(ctx->cam_file);
timeStamp0 = high_resolution_clock::now();
timeStamp1 = high_resolution_clock::now();
INFO("Initialize v4l2 components successfully");
return true;
}
bool HY_Encoder::RequestCameraBuff(context_t *ctx) {
// Request camera v4l2 buffer
struct v4l2_requestbuffers rb;
memset(&rb, 0, sizeof(rb));
rb.count = V4L2_BUFFERS_NUM;
rb.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
rb.memory = V4L2_MEMORY_DMABUF;
if (ioctl(ctx->cam_fd, VIDIOC_REQBUFS, &rb) < 0)
ERROR_RETURN("Failed to request v4l2 buffers: %s (%d)",
strerror(errno), errno);
if (rb.count != V4L2_BUFFERS_NUM)
ERROR_RETURN("V4l2 buffer number is not as desired");
for (unsigned int index = 0; index < V4L2_BUFFERS_NUM; index++)
{
struct v4l2_buffer buf;
// Query camera v4l2 buf length
memset(&buf, 0, sizeof buf);
buf.index = index;
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_DMABUF;
if (ioctl(ctx->cam_fd, VIDIOC_QUERYBUF, &buf) < 0)
ERROR_RETURN("Failed to query buff: %s (%d)",
strerror(errno), errno);
// TODO add support for multi-planer
// Enqueue empty v4l2 buff into camera capture plane
buf.m.fd = (unsigned long)ctx->g_buff[index].dmabuff_fd;
if (buf.length != ctx->g_buff[index].size)
{
WARN("Camera v4l2 buf length is not expected");
ctx->g_buff[index].size = buf.length;
}
if (ioctl(ctx->cam_fd, VIDIOC_QBUF, &buf) < 0)
ERROR_RETURN("Failed to enqueue buffers: %s (%d)",
strerror(errno), errno);
}
return true;
}
bool HY_Encoder::EnqueueVicBuff(context_t *ctx) {
// Enqueue empty buffer into VIC output plane
for (unsigned int index = 0;
index < ctx->conv->output_plane.getNumBuffers(); index++)
{
struct v4l2_buffer v4l2_buf;
struct v4l2_plane planes[MAX_PLANES];
memset(&v4l2_buf, 0, sizeof(v4l2_buf));
memset(planes, 0, MAX_PLANES * sizeof(struct v4l2_plane));
v4l2_buf.index = index;
v4l2_buf.m.planes = planes;
if (ctx->conv->output_plane.qBuffer(v4l2_buf, NULL) < 0)
ERROR_RETURN("Failed to enqueue empty buffer into VIC output plane");
}
// Enqueue empty buffer into VIC capture plane
for (unsigned int index = 0;
index < ctx->conv->capture_plane.getNumBuffers(); index++)
{
struct v4l2_buffer v4l2_buf;
struct v4l2_plane planes[MAX_PLANES];
memset(&v4l2_buf, 0, sizeof(v4l2_buf));
memset(planes, 0, MAX_PLANES * sizeof(struct v4l2_plane));
v4l2_buf.index = index;
v4l2_buf.m.planes = planes;
if (ctx->conv->capture_plane.qBuffer(v4l2_buf, NULL) < 0)
ERROR_RETURN("Failed to enqueue empty buffer into VIC capture plane");
}
return true;
}
bool HY_Encoder::PrepareBuffers(context_t * ctx) {
// Allocate global buffer context
ctx->g_buff = (nv_buffer *)malloc(V4L2_BUFFERS_NUM * sizeof(nv_buffer));
if (ctx->g_buff == NULL)
ERROR_RETURN("Failed to allocate global buffer context");
// Create buffer and share it with camera and VIC output plane
for (unsigned int index = 0; index < V4L2_BUFFERS_NUM; index++)
{
int fd;
NvBufferParams params = {0};
if (-1 == NvBufferCreate(&fd, ctx->cam_w, ctx->cam_h,
NvBufferLayout_Pitch,
this->GetNvbuffColorFmt(ctx->cam_pixfmt)))
ERROR_RETURN("Failed to create NvBuffer");
ctx->g_buff[index].dmabuff_fd = fd;
if (-1 == NvBufferGetParams(fd, ¶ms))
ERROR_RETURN("Failed to get NvBuffer parameters");
// TODO add multi-planar support
// Currently it supports only YUV422 interlaced single-planar
ctx->g_buff[index].size = params.height[0] * params.pitch[0];
ctx->g_buff[index].start = (unsigned char *)mmap(
NULL,
ctx->g_buff[index].size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
ctx->g_buff[index].dmabuff_fd, 0);
}
if (!this->RequestCameraBuff(ctx))
ERROR_RETURN("Failed to set up camera buff");
if (!this->EnqueueVicBuff(ctx))
ERROR_RETURN("Failed to enqueue empty buff into VIC");
INFO("Succeed in preparing stream buffers");
return true;
}
bool HY_Encoder::StartStream(context_t * ctx) {
enum v4l2_buf_type type;
// Start v4l2 streaming
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (ioctl(ctx->cam_fd, VIDIOC_STREAMON, &type) < 0)
ERROR_RETURN("Failed to start streaming: %s (%d)",
strerror(errno), errno);
// Start VIC output plane
if (ctx->conv->output_plane.setStreamStatus(true) < 0)
ERROR_RETURN("Failed to start VIC output plane streaming");
// Start VIC capture plane
if (ctx->conv->capture_plane.setStreamStatus(true) < 0)
ERROR_RETURN("Failed to start VIC capture plane streaming");
// Start ENC output plane
if (ctx->enc->output_plane.setStreamStatus(true) < 0)
ERROR_RETURN("Failed to start ENC output plane streaming");
// Start ENC capture plane
if (ctx->enc->capture_plane.setStreamStatus(true) < 0)
ERROR_RETURN("Failed to start ENC capture plane streaming");
usleep(200);
INFO("Camera video streaming on ...");
return true;
}
static void abort(context_t *ctx) {
ctx->got_error = true;
if (ctx->conv)
ctx->conv->abort();
if (ctx->enc)
ctx->enc->abort();
}
static bool conv0_output_dqbuf_thread_callback(struct v4l2_buffer *v4l2_buf,
NvBuffer * buffer, NvBuffer * shared_buffer,
void *arg) {
context_t *ctx = (context_t *) arg;
nv_buffer * cam_g_buff;
if (!v4l2_buf)
{
abort(ctx);
ERROR_RETURN("Failed to dequeue conv output plane buffer");
}
// Fetch nv_buffer to do format conversion
pthread_mutex_lock(&ctx->queue_lock);
while (ctx->conv_output_plane_buf_queue->empty())
{
pthread_cond_wait(&ctx->queue_cond, &ctx->queue_lock);
}
cam_g_buff = ctx->conv_output_plane_buf_queue->front();
ctx->conv_output_plane_buf_queue->pop();
pthread_mutex_unlock(&ctx->queue_lock);
// Got EOS signal and return
if (cam_g_buff->dmabuff_fd == 0)
return false;
else
{
// Enqueue vic output plane
v4l2_buf->m.planes[0].m.fd =
(unsigned long)cam_g_buff->dmabuff_fd;
v4l2_buf->m.planes[0].bytesused = cam_g_buff->size;
}
if (ctx->conv->output_plane.qBuffer(*v4l2_buf, NULL) < 0)
{
abort(ctx);
ERROR_RETURN("Failed to enqueue VIC output plane");
}
return true;
}
static bool conv0_capture_dqbuf_thread_callback(struct v4l2_buffer *v4l2_buf,
NvBuffer * buffer, NvBuffer * shared_buffer,
void *arg) {
context_t *ctx = (context_t *) arg;
struct v4l2_buffer enc_buf;
struct v4l2_plane planes[MAX_PLANES];
memset(&enc_buf, 0, sizeof(v4l2_buf));
memset(planes, 0, MAX_PLANES * sizeof(struct v4l2_plane));
enc_buf.m.planes = planes;
if (buffer->planes[0].bytesused) {
enc_buf.m.planes[0].m.fd = buffer->planes[0].fd;
enc_buf.m.planes[0].bytesused = 1;
if (ctx->enc->output_plane.qBuffer(enc_buf, buffer) < 0)
{
abort(ctx);
ERROR_RETURN("Failed to queue buffer on ENC output plane");
}
}
if (ctx->conv->capture_plane.qBuffer(*v4l2_buf, buffer) < 0)
{
abort(ctx);
ERROR_RETURN("Failed to queue buffer on VIC capture plane");
}
return true;
}
static bool enc0_capture_dqbuf_thread_callback(struct v4l2_buffer *v4l2_buf,
NvBuffer * buffer, NvBuffer * shared_buffer,
void *arg) {
duration<double> time_span;
context_t *ctx = (context_t *) arg;
//printf("encoded frame size %d \n", buffer->planes[0].bytesused);
if (ctx->enc->capture_plane.qBuffer(*v4l2_buf, NULL) < 0)
{
abort(ctx);
ERROR_RETURN("Failed to queue buffer on ENC capture plane");
}
time_span = duration_cast<duration<double>>(high_resolution_clock::now() - timeStamp0);
//printf ("Processing Time 0 : %f \r\n", time_span.count());
//ctx->m_outputFile->write((char*) buffer->planes[0].data, buffer->planes[0].bytesused);
// Push nv_buffer into enc queue for muxer
if (ctx->enc_output_buf_queue->size() < MAX_QUEUE_BUFER)
{
pthread_mutex_lock(&ctx->encQueueLock);
nv_buffer *buff = new nv_buffer;
buff->size = buffer->planes[0].bytesused;
buff->start = (unsigned char*)malloc (buffer->planes[0].bytesused);
memcpy(buff->start, (char*) buffer->planes[0].data, buff->size);
ctx->enc_output_buf_queue->push(buff);
pthread_cond_broadcast(&ctx->encQueueCond);
pthread_mutex_unlock(&ctx->encQueueLock);
}
// GOT EOS
if (buffer->planes[0].bytesused == 0)
{
printf("Got EOS, exiting...\n");
return false;
}
if (0) { // quit
struct v4l2_buffer v4l2_buf;
struct v4l2_plane planes[MAX_PLANES];
memset(&v4l2_buf, 0, sizeof(v4l2_buf));
memset(planes, 0, MAX_PLANES * sizeof(struct v4l2_plane));
v4l2_buf.m.planes = planes;
v4l2_buf.m.planes[0].m.fd = -1;
ctx->enc->output_plane.qBuffer(v4l2_buf, NULL);
}
timeStamp0 = high_resolution_clock::now();
return true;
}
static bool conv1_output_dqbuf_thread_callback(struct v4l2_buffer *v4l2_buf,
NvBuffer * buffer, NvBuffer * shared_buffer,
void *arg) {
context_t *ctx = (context_t *) arg;
nv_buffer * cam_g_buff;
if (!v4l2_buf)
{
abort(ctx);
ERROR_RETURN("Failed to dequeue conv output plane buffer");
}
// Fetch nv_buffer to do format conversion
pthread_mutex_lock(&ctx->queue_lock);
while (ctx->conv_output_plane_buf_queue->empty())
{
pthread_cond_wait(&ctx->queue_cond, &ctx->queue_lock);
}
cam_g_buff = ctx->conv_output_plane_buf_queue->front();
ctx->conv_output_plane_buf_queue->pop();
pthread_mutex_unlock(&ctx->queue_lock);
// Got EOS signal and return
if (cam_g_buff->dmabuff_fd == 0)
return false;
else
{
// Enqueue vic output plane
v4l2_buf->m.planes[0].m.fd =
(unsigned long)cam_g_buff->dmabuff_fd;
v4l2_buf->m.planes[0].bytesused = cam_g_buff->size;
}
if (ctx->conv->output_plane.qBuffer(*v4l2_buf, NULL) < 0)
{
abort(ctx);
ERROR_RETURN("Failed to enqueue VIC output plane");
}
return true;
}
static bool conv1_capture_dqbuf_thread_callback(struct v4l2_buffer *v4l2_buf,
NvBuffer * buffer, NvBuffer * shared_buffer,
void *arg) {
context_t *ctx = (context_t *) arg;
struct v4l2_buffer enc_buf;
struct v4l2_plane planes[MAX_PLANES];
memset(&enc_buf, 0, sizeof(v4l2_buf));
memset(planes, 0, MAX_PLANES * sizeof(struct v4l2_plane));
enc_buf.m.planes = planes;
if (buffer->planes[0].bytesused) {
enc_buf.m.planes[0].m.fd = buffer->planes[0].fd;
enc_buf.m.planes[0].bytesused = 1;
if (ctx->enc->output_plane.qBuffer(enc_buf, buffer) < 0)
{
abort(ctx);
ERROR_RETURN("Failed to queue buffer on ENC output plane");
}
}
if (ctx->conv->capture_plane.qBuffer(*v4l2_buf, buffer) < 0)
{
abort(ctx);
ERROR_RETURN("Failed to queue buffer on VIC capture plane");
}
return true;
}
static bool enc1_capture_dqbuf_thread_callback(struct v4l2_buffer *v4l2_buf,
NvBuffer * buffer, NvBuffer * shared_buffer,
void *arg) {
duration<double> time_span;
context_t *ctx = (context_t *) arg;
//printf("encoded frame size %d \n", buffer->planes[0].bytesused);
if (ctx->enc->capture_plane.qBuffer(*v4l2_buf, NULL) < 0)
{
abort(ctx);
ERROR_RETURN("Failed to queue buffer on ENC capture plane");
}
time_span = duration_cast<duration<double>>(high_resolution_clock::now() - timeStamp1);
//printf ("Processing Time 1 : %f \r\n", time_span.count());
//ctx->m_outputFile->write((char*) buffer->planes[0].data, buffer->planes[0].bytesused);
// Push nv_buffer into enc queue for muxer
if (ctx->enc_output_buf_queue->size() < MAX_QUEUE_BUFER)
{
pthread_mutex_lock(&ctx->encQueueLock);
nv_buffer *buff = new nv_buffer;
buff->size = buffer->planes[0].bytesused;
buff->start = (unsigned char*)malloc (buffer->planes[0].bytesused);
memcpy(buff->start, (char*) buffer->planes[0].data, buff->size);
ctx->enc_output_buf_queue->push(buff);
pthread_cond_broadcast(&ctx->encQueueCond);
pthread_mutex_unlock(&ctx->encQueueLock);
}
// GOT EOS
if (buffer->planes[0].bytesused == 0)
{
printf("Got EOS, exiting...\n");
return false;
}
if (0) { // quit
struct v4l2_buffer v4l2_buf;
struct v4l2_plane planes[MAX_PLANES];
memset(&v4l2_buf, 0, sizeof(v4l2_buf));
memset(planes, 0, MAX_PLANES * sizeof(struct v4l2_plane));
v4l2_buf.m.planes = planes;
v4l2_buf.m.planes[0].m.fd = -1;
ctx->enc->output_plane.qBuffer(v4l2_buf, NULL);
}
timeStamp1 = high_resolution_clock::now();
return true;
}
bool HY_Encoder::StartCapture(context_t * ctx0, context_t * ctx1) {
struct pollfd fds0[1];
struct pollfd fds1[1];
static int count = 0;
ctx0->conv->capture_plane.setDQThreadCallback(conv0_capture_dqbuf_thread_callback);
ctx0->conv->output_plane.setDQThreadCallback(conv0_output_dqbuf_thread_callback);
ctx0->enc->capture_plane.setDQThreadCallback(enc0_capture_dqbuf_thread_callback);
ctx1->conv->capture_plane.setDQThreadCallback(conv1_capture_dqbuf_thread_callback);
ctx1->conv->output_plane.setDQThreadCallback(conv1_output_dqbuf_thread_callback);
ctx1->enc->capture_plane.setDQThreadCallback(enc1_capture_dqbuf_thread_callback);
// Start VIC processing thread
ctx0->conv->capture_plane.startDQThread(ctx0);
ctx0->conv->output_plane.startDQThread(ctx0);
ctx0->enc->capture_plane.startDQThread(ctx0);
ctx1->conv->capture_plane.startDQThread(ctx1);
ctx1->conv->output_plane.startDQThread(ctx1);
ctx1->enc->capture_plane.startDQThread(ctx1);
printf ("Start Capture \r\n");
// Enqueue all the empty capture plane buffers
for (uint32_t i = 0; i < ctx0->enc->capture_plane.getNumBuffers(); i++)
{
struct v4l2_buffer v4l2_buf;
struct v4l2_plane planes[MAX_PLANES];
memset(&v4l2_buf, 0, sizeof(v4l2_buf));
memset(planes, 0, MAX_PLANES * sizeof(struct v4l2_plane));
v4l2_buf.index = i;
v4l2_buf.m.planes = planes;
if (ctx0->enc->capture_plane.qBuffer(v4l2_buf, NULL) < 0)
ERROR_RETURN("Failed to queue buffer on ENC capture plane");
}
for (uint32_t i = 0; i < ctx1->enc->capture_plane.getNumBuffers(); i++)
{
struct v4l2_buffer v4l2_buf;
struct v4l2_plane planes[MAX_PLANES];
memset(&v4l2_buf, 0, sizeof(v4l2_buf));
memset(planes, 0, MAX_PLANES * sizeof(struct v4l2_plane));
v4l2_buf.index = i;
v4l2_buf.m.planes = planes;
if (ctx1->enc->capture_plane.qBuffer(v4l2_buf, NULL) < 0)
ERROR_RETURN("Failed to queue buffer on ENC capture plane");
}
fds0[0].fd = ctx0->cam_fd;
fds0[0].events = POLLIN;
fds1[0].fd = ctx1->cam_fd;
fds1[0].events = POLLIN;
printf ("Start Poll \r\n");
while (poll(fds0, 1, 5000) > 0 && poll(fds1, 1, 5000) > 0 && !ctx0->got_error &&
!ctx1->got_error && !ctx0->conv->isInError() && !ctx1->conv->isInError() && this->isRunning)
{
if (fds0[0].revents & POLLIN && fds1[0].revents & POLLIN) {
count++;
if (fds0[0].revents & POLLIN) {
struct v4l2_buffer v4l2_buf;
// Dequeue camera buff
memset(&v4l2_buf, 0, sizeof(v4l2_buf));
v4l2_buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
v4l2_buf.memory = V4L2_MEMORY_DMABUF;
if (ioctl(ctx0->cam_fd, VIDIOC_DQBUF, &v4l2_buf) < 0)
ERROR_RETURN("Failed to dequeue camera buff: %s (%d)",
strerror(errno), errno);
ctx0->frame++;
printf ("Get frame %d \r\n", ctx0->frame);
// Push nv_buffer into conv output queue for conversion.
// Camera run as 60Hz but we only capture as 30Hz
pthread_mutex_lock(&ctx0->queue_lock);
ctx0->conv_output_plane_buf_queue->push(&ctx0->g_buff[v4l2_buf.index]);
this->ConvertLeftFrame (v4l2_buf);
pthread_cond_broadcast(&ctx0->queue_cond);
pthread_mutex_unlock(&ctx0->queue_lock);
// Enqueue camera buff
// It might be more reasonable to wait for the completion of
// VIC processing before enqueue current buff. But VIC processing
// time is far less than camera frame interval, so we probably
// don't need such synchonization.
if (ioctl(ctx0->cam_fd, VIDIOC_QBUF, &v4l2_buf))
ERROR_RETURN("Failed to queue camera buffers: %s (%d)",
strerror(errno), errno);
}
if (fds1[0].revents & POLLIN) {
struct v4l2_buffer v4l2_buf;
// Dequeue camera buff
memset(&v4l2_buf, 0, sizeof(v4l2_buf));
v4l2_buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
v4l2_buf.memory = V4L2_MEMORY_DMABUF;
if (ioctl(ctx1->cam_fd, VIDIOC_DQBUF, &v4l2_buf) < 0)
ERROR_RETURN("Failed to dequeue camera buff: %s (%d)",
strerror(errno), errno);
ctx1->frame++;
// Push nv_buffer into conv output queue for conversion
// Camera run as 60Hz but we only capture as 30Hz
pthread_mutex_lock(&ctx1->queue_lock);
ctx1->conv_output_plane_buf_queue->push(&ctx1->g_buff[v4l2_buf.index]);
this->ConvertRightFrame (v4l2_buf);
pthread_cond_broadcast(&ctx1->queue_cond);
pthread_mutex_unlock(&ctx1->queue_lock);
// Enqueue camera buff
// It might be more reasonable to wait for the completion of
// VIC processing before enqueue current buff. But VIC processing
// time is far less than camera frame interval, so we probably
// don't need such synchonization.
if (ioctl(ctx1->cam_fd, VIDIOC_QBUF, &v4l2_buf))
ERROR_RETURN("Failed to queue camera buffers: %s (%d)",
strerror(errno), errno);
}
}
}
ctx0->enc->capture_plane.waitForDQThread(2000);
ctx1->enc->capture_plane.waitForDQThread(2000);
if (!this->isRunning && !ctx0->conv->isInError() && !ctx1->conv->isInError() )
{
// Signal EOS to the dq thread of VIC output plane
ctx0->g_buff[0].dmabuff_fd = 0;
pthread_mutex_lock(&ctx0->queue_lock);
ctx0->conv_output_plane_buf_queue->push(&ctx0->g_buff[0]);
pthread_cond_broadcast(&ctx0->queue_cond);
pthread_mutex_unlock(&ctx0->queue_lock);
ctx1->g_buff[0].dmabuff_fd = 0;
pthread_mutex_lock(&ctx1->queue_lock);
ctx1->conv_output_plane_buf_queue->push(&ctx1->g_buff[0]);
pthread_cond_broadcast(&ctx1->queue_cond);
pthread_mutex_unlock(&ctx1->queue_lock);
}
// Stop VIC dq thread
if (!ctx0->got_error && !ctx1->got_error)
{
ctx0->conv->waitForIdle(2000);
ctx0->conv->capture_plane.stopDQThread();
ctx0->conv->output_plane.stopDQThread();
ctx1->conv->waitForIdle(2000);
ctx1->conv->capture_plane.stopDQThread();
ctx1->conv->output_plane.stopDQThread();
}
return true;
}
bool HY_Encoder::StopStream(context_t * ctx) {
enum v4l2_buf_type type;
// Stop v4l2 streaming
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (ioctl(ctx->cam_fd, VIDIOC_STREAMOFF, &type))
ERROR_RETURN("Failed to stop streaming: %s (%d)",
strerror(errno), errno);
// Stop VIC output plane
if (ctx->conv->output_plane.setStreamStatus(false) < 0)
ERROR_RETURN("Failed to stop output plane streaming");
// Stop VIC capture plane
if (ctx->conv->capture_plane.setStreamStatus(false) < 0)
ERROR_RETURN("Failed to stop output plane streaming");
// Send EOS to ENC output plane
{
struct v4l2_buffer v4l2_buf;
struct v4l2_plane planes[MAX_PLANES];
memset(&v4l2_buf, 0, sizeof(v4l2_buf));
memset(planes, 0, MAX_PLANES * sizeof(struct v4l2_plane));
v4l2_buf.m.planes = planes;
v4l2_buf.m.planes[0].m.fd = -1;
ctx->enc->output_plane.qBuffer(v4l2_buf, NULL);
// Wait till capture plane DQ Thread finishes
// i.e. all the capture plane buffers are dequeued
ctx->enc->capture_plane.waitForDQThread(-1);
}
// Stop ENC output plane
if (ctx->enc->output_plane.setStreamStatus(false) < 0)
ERROR_RETURN("Failed to stop output plane streaming");
// Stop ENC capture plane
if (ctx->enc->capture_plane.setStreamStatus(false) < 0)
ERROR_RETURN("Failed to stop output plane streaming");
INFO("Camera video streaming off ...");
return true;
}
HY_Encoder::HY_Encoder () {
memset(&this->ctx0, 0, sizeof(context_t));
memset(&this->ctx1, 0, sizeof(context_t));
this->ctx0.cam_devname = "/dev/video0";
this->ctx1.cam_devname = "/dev/video1";
sprintf(this->ctx0.cam_file, "encode0.h264");
sprintf(this->ctx1.cam_file, "encode1.h264");
this->error = 0;
encoderThread = std::thread (&HY_Encoder::StartThread, this);
//encoderThread.detach();
}
HY_Encoder::~HY_Encoder () {
}
void HY_Encoder::StartThread () {
this->SetDefaults(&this->ctx0);
this->SetDefaults(&this->ctx1);
this->isRunning = true;
CHECK_ERROR(this->InitComponents(&this->ctx0), cleanup,
"Failed to initialize v4l2 components 0");
CHECK_ERROR(this->InitComponents(&this->ctx1), cleanup,
"Failed to initialize v4l2 components 1");
CHECK_ERROR(this->PrepareBuffers(&this->ctx0), cleanup,
"Failed to prepare v4l2 buffs");
CHECK_ERROR(this->PrepareBuffers(&this->ctx1), cleanup,
"Failed to prepare v4l2 buffs");
CHECK_ERROR(this->StartStream(&this->ctx0), cleanup,
"Failed to start streaming");
CHECK_ERROR(this->StartStream(&this->ctx1), cleanup,
"Failed to start streaming");
CHECK_ERROR(this->StartCapture(&this->ctx0, &this->ctx1), cleanup,
"Failed to start capturing")
CHECK_ERROR(this->StopStream(&this->ctx0), cleanup,
"Failed to stop streaming");
CHECK_ERROR(this->StopStream(&this->ctx1), cleanup,
"Failed to stop streaming");
cleanup:
if (this->ctx0.cam_fd > 0)
close(this->ctx0.cam_fd);
if (this->ctx1.cam_fd > 0)
close(this->ctx1.cam_fd);
if (this->ctx0.conv != NULL)
{
if (this->ctx0.conv->isInError())
{
printf("Video converter 0 is in error\n");
this->error = 1;
}
delete this->ctx0.conv;
}
if (this->ctx1.conv != NULL)
{
if (this->ctx1.conv->isInError())
{
printf("Video converter 1 is in error\n");
this->error = 1;
}
delete this->ctx1.conv;
}
if (this->ctx0.g_buff != NULL)
{
for (unsigned i = 0; i < V4L2_BUFFERS_NUM; i++)
if (this->ctx0.g_buff[i].dmabuff_fd)
NvBufferDestroy(this->ctx0.g_buff[i].dmabuff_fd);
free(this->ctx0.g_buff);
}
if (this->ctx1.g_buff != NULL)
{
for (unsigned i = 0; i < V4L2_BUFFERS_NUM; i++)
if (this->ctx1.g_buff[i].dmabuff_fd)
NvBufferDestroy(this->ctx1.g_buff[i].dmabuff_fd);
free(this->ctx1.g_buff);
}
delete this->ctx0.conv_output_plane_buf_queue;
delete this->ctx1.conv_output_plane_buf_queue;
if (this->ctx0.enc != NULL)
delete this->ctx0.enc;
if (this->ctx1.enc != NULL)
delete this->ctx1.enc;
if (this->error)
printf("App run failed\n");
else
printf("App run was successful\n");
this->isRunning = false;
}
void HY_Encoder::ConvertLeftFrame (struct v4l2_buffer &bufLeft) {
cv::Mat grayLeft;
cv::Mat inputLeft(this->ctx0.cam_h, this->ctx0.cam_w, CV_8UC2);
memcpy(inputLeft.data, (unsigned char *) this->ctx0.g_buff[bufLeft.index].start, this->ctx0.g_buff[bufLeft.index].size * sizeof(unsigned char));
cv::cvtColor(inputLeft, grayLeft, CV_YUV2BGR_UYVY);
std::lock_guard<std::mutex> lock(this->dataMutex);
// copy frames to safe place
this->stereoFrames.left = grayLeft;
}
void HY_Encoder::ConvertRightFrame (struct v4l2_buffer &bufRight) {
cv::Mat grayRight;
cv::Mat inputRight(this->ctx1.cam_h, this->ctx1.cam_w, CV_8UC2);
memcpy(inputRight.data, (unsigned char *) this->ctx1.g_buff[bufRight.index].start, this->ctx1.g_buff[bufRight.index].size * sizeof(unsigned char));
cv::cvtColor(inputRight, grayRight, CV_YUV2BGR_UYVY);
std::lock_guard<std::mutex> lock(this->dataMutex);
// copy frames to safe place
this->stereoFrames.right = grayRight;
}
bool HY_Encoder::GetFrames (cv::Mat & left, cv::Mat & right) {
std::lock_guard<std::mutex> lock(this->dataMutex);
left = this->stereoFrames.left;
right = this->stereoFrames.right;
return true;
}
void HY_Encoder::GetNvBuffer(nv_buffer ** buffLeft, nv_buffer ** buffRight) {
// Fetch NvBuffer to do muxer
pthread_mutex_lock(&this->ctx0.encQueueLock);
pthread_mutex_lock(&this->ctx1.encQueueLock);
while (this->ctx0.enc_output_buf_queue->empty() || this->ctx1.enc_output_buf_queue->empty())
{
pthread_cond_wait(&this->ctx0.encQueueCond, &this->ctx0.encQueueLock);
pthread_cond_wait(&this->ctx1.encQueueCond, &this->ctx1.encQueueLock);
}
*buffLeft = this->ctx0.enc_output_buf_queue->front();
*buffRight = this->ctx1.enc_output_buf_queue->front();
this->ctx0.enc_output_buf_queue->pop();
this->ctx1.enc_output_buf_queue->pop();
pthread_mutex_unlock(&this->ctx0.encQueueLock);
pthread_mutex_unlock(&this->ctx1.encQueueLock);
}
void HY_Encoder::GetReady () {
cv::Mat left;
cv::Mat right;
while (1)
{
this->GetFrames (left, right);
if (left.rows != 0 && right.rows !=0)
break;
}
}
void HY_Encoder::EncoderRelease() {
this->isRunning = false;
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
this->encoderThread.join();
}