kayccc,
I tried your gstreamer pipeline in my very similar openCV sample code (modified from bgfg_segm.cpp in the samples directory):
//this is a sample for foreground detection functions
int main(int argc, const char** argv)
{
help();
//CommandLineParser parser(argc, argv, keys);
bool useCamera = true;//parser.get<bool>("camera");
//string file = parser.get<string>("file_name");
//VideoCapture cap;
bool update_bg_model = true;
//if( useCamera )
//VideoCapture cap("device://nvcamera0u"); // open the camera
VideoCapture cap("nvcamerasrc ! video/x-raw(memory:NVMM), width=(int)1280, height=(int)720,format=(string)I420, framerate=(fraction)24/1 ! nvvidconv flip-method=2 ! video/x-raw, format=(string)BGRx ! videoconvert ! 'video/x-raw, format=(string)BGR' ! appsink"); // open the camera
//cap.open(camString);
//else
// cap.open(file.c_str());
//parser.printParams();
if( !cap.isOpened() )
{
printf("can not open camera or video file\n%s", "");
return -1;
}
then I ran strace on my binary to see what system calls were happening and we see this failed
system call:
open(“nvcamerasrc ! video/x-raw(memory:NVMM), width=(int)1280, height=(int)720,format=(string)I420, framerate=(fraction)24/1 ! nvvidconv flip-method=2 ! video/x-raw, format=(string)BGRx ! videoconvert ! ‘video/x-raw, format=(string)BGR’ ! appsink”, O_RDONLY|O_LARGEFILE) = -1 ENOENT (No such file or directory)
Anybody have any idea on the right way to get video out of the camera on the TX1? I know it is possible because the visionworks samples / demos can open the camera stream in this example (main_nvgstcamera_capture.cpp) :
int main(int argc, char** argv)
{
nvxio::Application &app = nvxio::Application::get();
nvxio::FrameSource::Parameters config;
config.frameWidth = 1280;
config.frameHeight = 720;
config.fps = 30;
//
// Parse command line arguments
//
vx_uint32 cameraID = 0u;
std::string resolution = "1280x720", input;
std::ostringstream stream;
app.setDescription("This sample captures frames from NVIDIA GStreamer camera");
app.addOption('c', "camera", "Input camera device ID", nvxio::OptionHandler::unsignedInteger(&cameraID,
nvxio::ranges::atMost(1u)));
app.addOption('r', "resolution", "Input frame resolution", nvxio::OptionHandler::oneOf(&resolution,
{ "2592x1944", "2592x1458", "1280x720", "640x480" }));
app.addOption('f', "fps", "Frames per second", nvxio::OptionHandler::unsignedInteger(&config.fps,
nvxio::ranges::atLeast(10u) & nvxio::ranges::atMost(120u)));
app.init(argc, argv);
stream << "device://nvcamera" << cameraID;
input = stream.str();
parseResolution(resolution, config);
//
// Create OpenVX context
//
nvxio::ContextGuard context;
//
// Messages generated by the OpenVX framework will be processed by nvxio::stdoutLogCallback
//
vxRegisterLogCallback(context, &nvxio::stdoutLogCallback, vx_false_e);
//
// Create a Frame Source
//
std::unique_ptr<nvxio::FrameSource> source(nvxio::createDefaultFrameSource(context, input));
When will the next version of linux for tegra be released with a video for linux driver for the camera?