Converting mat to vx_image and back

Hi Everyone,

I am trying to convert a Mat image to a vx_image, however, I keep getting a weird error.

Here is my code:

vector<Mat> toVisionWorks( Mat cv_left, Mat cv_right )
{

    cvtColor(cv_left, cv_left, cv::COLOR_RGB2GRAY);
    cvtColor(cv_right,cv_right, cv::COLOR_RGB2GRAY);

    Mat cv_left_dst(cv_left.size(), cv_left.type());
    Mat cv_right_dst(cv_right.size(), cv_right.type());

    vx_image vx_src_left = nvx_cv::createVXImageFromCVMat(context, cv_left);
    vx_image vx_dst_left = nvx_cv::createVXImageFromCVMat(context, cv_left_dst);

    vx_image vx_src_right = nvx_cv::createVXImageFromCVMat(context, cv_right);
    vx_image vx_dst_right = nvx_cv::createVXImageFromCVMat(context, cv_right_dst);

    vxuBox3x3(context, vx_src_left, vx_dst_left);
    vxuBox3x3(context, vx_src_right, vx_dst_right);

    vxReleaseImage(&vx_src_left);
    vxReleaseImage(&vx_dst_left);
    vxReleaseImage(&vx_src_right);
    vxReleaseImage(&vx_dst_right);

    cout << "Failed 2";

    //vx_image vx_left = createRGBImageFromRGBMat(context, cv_left);
    //vx_image vx_right = createRGBImageFromRGBMat(context, cv_right);


    cout << "Failed 1";

    vector<Mat> mat_images;
    mat_images.push_back(cv_left_dst);
    mat_images.push_back(cv_right_dst);

    return mat_images;
}

I am following the tutorial code in the documentation but I keep getting this error:
OpenCV Error: Assertion failed (vxGetStatus((vx_reference)img) == VX_SUCCESS) in createVXImageFromCVMat, file /usr/include/NVX/nvx_opencv_interop.hpp, line 296
terminate called after throwing an instance of ‘cv::Exception’
what(): /usr/include/NVX/nvx_opencv_interop.hpp:296: error: (-215) vxGetStatus((vx_reference)img) == VX_SUCCESS in function createVXImageFromCVMat

What is the reason for this error? I was able to circumvent it when I copied some code using createImageFromHandle, however, I got the same type of error when using VXImageToCVMatMapper, so I would prefer to solve the error for my prior code that I understand more.

Thank you for your help!

Hi,

I can compile your code without error.
Could you help to double check your makefile and environment?

Environment is set by Jetpack2.3.1

topic_988388.cpp

#include "NVXIO/Application.hpp"
#include "NVX/nvx_opencv_interop.hpp"
#include <opencv2/imgproc/imgproc.hpp>

std::vector<cv::Mat> toVisionWorks( cv::Mat cv_left, cv::Mat cv_right )
{
        nvxio::ContextGuard context;
        // please do some handle here

        cv::cvtColor(cv_left, cv_left, cv::COLOR_RGB2GRAY);
        cv::cvtColor(cv_right,cv_right, cv::COLOR_RGB2GRAY);

        cv::Mat cv_left_dst(cv_left.size(), cv_left.type());
        cv::Mat cv_right_dst(cv_right.size(), cv_right.type());

        vx_image vx_src_left = nvx_cv::createVXImageFromCVMat(context, cv_left);
        vx_image vx_dst_left = nvx_cv::createVXImageFromCVMat(context, cv_left_dst);
        vx_image vx_src_right = nvx_cv::createVXImageFromCVMat(context, cv_right);
        vx_image vx_dst_right = nvx_cv::createVXImageFromCVMat(context, cv_right_dst);

        vxuBox3x3(context, vx_src_left, vx_dst_left);
        vxuBox3x3(context, vx_src_right, vx_dst_right);

        vxReleaseImage(&vx_src_left);
        vxReleaseImage(&vx_dst_left);
        vxReleaseImage(&vx_src_right);
        vxReleaseImage(&vx_dst_right);

        std::cout << "Failed 2";

        //vx_image vx_left = createRGBImageFromRGBMat(context, cv_left);
        //vx_image vx_right = createRGBImageFromRGBMat(context, cv_right);


        std::cout << "Failed 1";

        std::vector<cv::Mat> mat_images;
        mat_images.push_back(cv_left_dst);
        mat_images.push_back(cv_right_dst);

        return mat_images;
}

int main(int argc, char* argv[])
{
    try
    {
        nvxio::Application &app = nvxio::Application::get();

        int imageW = 1920;
        int imageH = 1080;

        cv::Mat cv_left( imageW, imageH, CV_32FC1);
        cv::Mat cv_right(imageW, imageH, CV_32FC1);
        toVisionWorks(cv_left, cv_right);
    }
    catch (const std::exception& e)
    {
        std::cerr << "Error: " << e.what() << std::endl;
        return nvxio::Application::APP_EXIT_CODE_ERROR;
    }

    return nvxio::Application::APP_EXIT_CODE_SUCCESS;
}

Command:

g++  -I/usr/local/cuda-8.0/include -I/usr/local/cuda-8.0/include -DUSE_NPP=1 -I/usr/include/opencv -DUSE_OPENCV=1  -DCUDA_API_PER_THREAD_DEFAULT_STREAM -DUSE_GUI=1 -DUSE_GLFW=1 -DUSE_GLES=1 -DUSE_GSTREAMER=1 -DUSE_NVGSTCAMERA=1 -DUSE_GSTREAMER_OMX=1  -O3 -DNDEBUG -std=c++0x -o topic_988388.o -c topic_988388.cpp

Hi Thank you for your reply,

I have added your command definitions to my CMakeLists.txt file and I am still getting this error. Also, this isn’t a compile error but a runtime error.

Here is my CMakeLists.txt:

[b]cmake_minimum_required(VERSION 3.1)
project( MechaVision )

add_definitions(-DUSE_OPENCV=1 -DCUDA_API_PER_THREAD_DEFAULT_STREAM -DUSE_GUI=1 -DUSE_GLFW=1 -DUSE_GLES=1 -DUSE_GSTREAMER=1 -DUSE_NVGSTCAMERA=1 -DUSE_GSTREAMER_OMX=1 -O3 -DNDEBUG)
set(CMAKE_CXX_FLAGS “${CMAKE_CXX_FLAGS} -std=c++11”)

find_package( OpenCV REQUIRED )
find_package(VisionWorks REQUIRED)
find_package(VisionWorks-NVXIO)

include_directories(“~/Workspace/MechaVision”)
include_directories(“${OpenCV_INCLUDE_DIRS}”)
include_directories(“${VisionWorks_INCLUDE_DIRS}”)
include_directories(“${VisionWorks-NVXIO_INCLUDE_DIRS}”)
include_directories(“/usr/local/cuda-8.0/include”)
include_directories(“/usr/local/cuda/include”)

file(GLOB SOURCES
*.h
*.cpp
)

add_executable( MechaVision ${SOURCES})

target_link_libraries(MechaVision
${OpenCV_LIBS}
${VisionWorks_LIBRARIES}
${VisionWorks-NVXIO_LIBRARIES}
)
[/b]

I am using Jetpack 2.3.0 also.

Maybe I should just use the createImageFromHandle method?

Thank you for your time!

EDIT:

The issue was I was not including the contextGuard. Then once everything checked out with NVXIO_CHECK_REFERNCE everything worked.

Hi

This error looks like invalid cv::Mat image, such as invalid width/height.
I modify your code into ‘VisionWorks-1.5-Samples/samples/opencv_npp_interop/main_opencv_npp_interop.cpp’ sample.
This sample can compile and run normally, for your reference.

/*
# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#  * Redistributions of source code must retain the above copyright
#    notice, this list of conditions and the following disclaimer.
#  * Redistributions in binary form must reproduce the above copyright
#    notice, this list of conditions and the following disclaimer in the
#    documentation and/or other materials provided with the distribution.
#  * Neither the name of NVIDIA CORPORATION nor the names of its
#    contributors may be used to endorse or promote products derived
#    from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/

#include <iostream>
#include "NVXIO/Application.hpp"

    #include "NVX/nvx_opencv_interop.hpp"
    #include <opencv2/imgproc/imgproc.hpp>

#if !defined USE_OPENCV || !defined USE_NPP

int main(int, char**)
{
#ifndef USE_OPENCV
    std::cout << "NVXIO and samples were built without OpenCV support." << std::endl;
    std::cout << "Install OpenCV for Tegra and rebuild the sample." << std::endl;
#endif
#ifndef USE_NPP
    std::cout << "The sample was built without CUDA NPP support." << std::endl;
    std::cout << "Install CUDA NPP library and rebuild the sample." << std::endl;
#endif

    return nvxio::Application::APP_EXIT_CODE_ERROR;
}

#else

#include <string>
#include <iomanip>
#include <memory>

#include <NVX/nvx.h>
#include <NVX/nvx_timer.hpp>

#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>

#include "alpha_comp_node.hpp"

#include "NVXIO/Render.hpp"
#include "NVXIO/SyncTimer.hpp"
#include "NVXIO/Utility.hpp"

struct EventData
{
    EventData(): shouldStop(false), pause(false) {}
    bool shouldStop;
    bool pause;
};

static void eventCallback(void* eventData, vx_char key, vx_uint32, vx_uint32)
{
    EventData* data = static_cast<EventData*>(eventData);

    if (key == 27)
    {
        data->shouldStop = true;
    }
    else if (key == 32)
    {
        data->pause = !data->pause;
    }
}

static void displayState(nvxio::Render *renderer, const cv::Size & size, double proc_ms, double total_ms)
{
    std::ostringstream txt;

    txt << std::fixed << std::setprecision(1);

    txt << "Source size: " << size.width << 'x' << size.height << std::endl;
    txt << "Algorithm: " << proc_ms << " ms / " << 1000.0 / proc_ms << " FPS" << std::endl;
    txt << "Display: " << total_ms  << " ms / " << 1000.0 / total_ms << " FPS" << std::endl;

    txt << std::setprecision(6);
    txt.unsetf(std::ios_base::floatfield);
    txt << "LIMITED TO " << nvxio::Application::get().getFPSLimit() << " FPS FOR DISPLAY" << std::endl;
    txt << "Space - pause/resume" << std::endl;
    txt << "Esc - close the demo";

    nvxio::Render::TextBoxStyle style = {{255, 255, 255, 255}, {0, 0, 0, 127}, {10, 10}};
    renderer->putTextViewport(txt.str(), style);
}

static void VX_CALLBACK myLogCallback(vx_context /*context*/, vx_reference /*ref*/, vx_status /*status*/, const vx_char string[])
{
    std::cout << "VisionWorks LOG : " << string << std::endl;
}

//
// main - Application entry point
//

int main(int argc, char* argv[])
{
    try
    {
        nvxio::Application &app = nvxio::Application::get();

        //
        // Parse command line arguments
        //

        std::string fileName1 = app.findSampleFilePath("lena.jpg");
        std::string fileName2 = app.findSampleFilePath("baboon.jpg");

        app.setDescription("This sample accepts as input two images and performs alpha blending of them");
        app.addOption(0, "img1", "First image", nvxio::OptionHandler::string(&fileName1));
        app.addOption(0, "img2", "Second image", nvxio::OptionHandler::string(&fileName2));
        app.init(argc, argv);

        //
        // Load input images
        //

        if (fileName1 == fileName2)
        {
            std::cerr << "Error: Please, use different files for img1 and img2" << std::endl;
            return nvxio::Application::APP_EXIT_CODE_INVALID_VALUE;
        }

        cv::Mat cv_src1 = cv::imread(fileName1, cv::IMREAD_GRAYSCALE);
        cv::Mat cv_src2 = cv::imread(fileName2, cv::IMREAD_GRAYSCALE);

        if (cv_src1.empty())
        {
            std::cerr << "Error: Can't load input image " << fileName1 << std::endl;
            return nvxio::Application::APP_EXIT_CODE_NO_RESOURCE;
        }

        if (cv_src2.empty())
        {
            std::cerr << "Error: Can't load input image " << fileName2 << std::endl;
            return nvxio::Application::APP_EXIT_CODE_NO_RESOURCE;
        }

        if (cv_src1.size() != cv_src2.size())
        {
            std::cerr << "Error: Input images must have the same size." << std::endl;
            return nvxio::Application::APP_EXIT_CODE_INVALID_DIMENSIONS;
        }

        //
        // Create OpenVX context
        //

        nvxio::ContextGuard context;
        vxRegisterLogCallback(context, &myLogCallback, vx_false_e);
        vxDirective(context, VX_DIRECTIVE_ENABLE_PERFORMANCE);

        std::unique_ptr<nvxio::Render> renderer(nvxio::createDefaultRender(context, "OpenCV NPP Interop Sample",
                                                                           3 * cv_src1.cols, cv_src1.rows));

        if (!renderer) {
            std::cerr << "Error: Can't create a renderer." << std::endl;
            return nvxio::Application::APP_EXIT_CODE_NO_RENDER;
        }

        EventData eventData;
        renderer->setOnKeyboardEventCallback(eventCallback, &eventData);

        // !!!!!!!!!!!!!!!!!!!!!!!!!!!!
        //
        // your code start from here
        //
        // !!!!!!!!!!!!!!!!!!!!!!!!!!!!
        cv::Mat cv_left = cv_src1;
        cv::Mat cv_right = cv_src2;

        // please do some handle here

        cv::cvtColor(cv_left, cv_left, cv::COLOR_RGB2GRAY);
        cv::cvtColor(cv_right,cv_right, cv::COLOR_RGB2GRAY);

        cv::Mat cv_left_dst(cv_left.size(), cv_left.type());
        cv::Mat cv_right_dst(cv_right.size(), cv_right.type());

        vx_image vx_src_left = nvx_cv::createVXImageFromCVMat(context, cv_left);
        vx_image vx_dst_left = nvx_cv::createVXImageFromCVMat(context, cv_left_dst);
        vx_image vx_src_right = nvx_cv::createVXImageFromCVMat(context, cv_right);
        vx_image vx_dst_right = nvx_cv::createVXImageFromCVMat(context, cv_right_dst);

        vxuBox3x3(context, vx_src_left, vx_dst_left);
        vxuBox3x3(context, vx_src_right, vx_dst_right);

        vxReleaseImage(&vx_src_left);
        vxReleaseImage(&vx_dst_left);
        vxReleaseImage(&vx_src_right);
        vxReleaseImage(&vx_dst_right);

        std::cout << "Failed 2";

        //vx_image vx_left = createRGBImageFromRGBMat(context, cv_left);
        //vx_image vx_right = createRGBImageFromRGBMat(context, cv_right);

        std::cout << "Failed 1";

        std::vector<cv::Mat> mat_images;
        mat_images.push_back(cv_left_dst);
        mat_images.push_back(cv_right_dst);

        //
        // Release all objects
        //

        renderer->close();
    }
    catch (const std::exception& e)
    {
        std::cerr << "Error: " << e.what() << std::endl;
        return nvxio::Application::APP_EXIT_CODE_ERROR;
    }

    return nvxio::Application::APP_EXIT_CODE_SUCCESS;
}

#endif // USE_OPENCV

Thanks

Hi AastaLLL,

Thank you for your help. I was able to get the conversion working correctly and your example gave me an important lesson on using the context guard.

I have come up with another issue when trying to convert my code to work with the stereo vision example but I did not want to create another thread. I will show my code right away and explain the issue underneath.

/* Setup implementation type, params, and renderer */
            StereoMatching::ImplementationType implementationType = StereoMatching::HIGH_LEVEL_API;
            StereoMatching::StereoMatchingParams params;
            std::unique_ptr<nvxio::Render> renderer(nvxio::createDefaultRender(context,
            "Stereo Matching Demo", width, height));

            if (!renderer)
            {
                 std::cerr << "Error: Can't create a renderer" << std::endl;
                 return nvxio::Application::APP_EXIT_CODE_NO_RENDER;
            }

            /* Setup diaprity and color disparity outputs */
            vx_image disparity = vxCreateImage(context, width, height, VX_DF_IMAGE_U8);
            vx_image color_output = vxCreateImage(context, width, height, VX_DF_IMAGE_RGB);
            NVXIO_CHECK_REFERENCE(disparity);
            NVXIO_CHECK_REFERENCE(color_output);

std::unique_ptr<StereoMatching> stereo(
                StereoMatching::createStereoMatching(
                    context, params,
                    implementationType,
                    vx_left, vx_right, disparity));

//ColorDisparityGraph color_disp_graph(context, disparity, color_output, params.max_disparity);
            //bool color_disp_update = true;

for (;;)
            {

                cap1 >> cv_left;
                cap2 >> cv_right;

                //cvtColor(cv_left, cv_left, cv::COLOR_RGB2GRAY);
                //cvtColor(cv_right, cv_right, cv::COLOR_RGB2GRAY);

                vector<Mat> undistortedFrames = utilityClass.undistortFrames(cv_left, cv_right);
                cv_left = undistortedFrames[0];
                cv_right = undistortedFrames[1];

                minMaxLoc(cv_left, &minVal, &maxVal); //find minimum and maximum intensities
                cv_left.convertTo(cv_left, CV_8U, 255.0/(maxVal - minVal), -minVal * 255.0/(maxVal - minVal));
                minMaxLoc(cv_right, &minVal, &maxVal); //find minimum and maximum intensities
                cv_right.convertTo(cv_right, CV_8U, 255.0/(maxVal - minVal), -minVal * 255.0/(maxVal - minVal));

                vx_left = nvx_cv::createVXImageFromCVMat(context, cv_left);
                NVXIO_CHECK_REFERENCE(vx_left);
                vx_right = nvx_cv::createVXImageFromCVMat(context, cv_right);

                stereo->run();
                stereo->printPerfs();
                renderer->putImage(disparity);

                imshow("frame1", cv_left);
                imshow("frame2", cv_right);
                //imshow("undistort frame1", mat_images[0]);
                //imshow("undistort frame2", mat_images[1]);
                if (!renderer->flush())
                {
                    cout << "Finished processing" << endl;
                    fflush(stdout);
                    break;
                }
                if (waitKey(30) >= 0) break;
            }

So, I am able to convert my opencv webcam frames into vx_image and run stereo->run() however the vx_left and vx_right images stay the same so the disparity map is just the first frame that is ever seen. I thought that stereo class can access the image after it has been changed? For example, in the demo stereo vision main.cpp the processing loop just calls fetch source and not something like stereo.setImage(left). How can I have it so the stereo->run() will use the newly updated images? Also, I know the stereo vision class is running cause the stereo->printPerfs() function is printing times.

One more quick question, do you recommend putting all the opencv conversion/undistortion code and the stereo run in custom nodes? Then I could just process graph in the for loop?

Thank you for your help!

Hi,

CreateVXImageFromCVMat() let vx_image share the same memory as cv::Mat.
Problen is that the reference of cv_left/cv_right changes every time but no update to the stereo component.

Instead of changing the reference of cv_left/cv_right, try to update the the context directly.

For example:

// when create grapch
vx_left = nvx_cv::createVXImageFromCVMat(context, cv_left_fix);
vx_right = nvx_cv::createVXImageFromCVMat(context, cv_right_fix);
.....
// In for loop 
cv_left.copyTo(cv_left_fix);
cv_right.copyTo(cv_right_fix);
stereo->run();

Hi,
I’m working with cv::Mat and vx_image too, but I’m using vxCreateImageFromHandle using memory allocated with CUDA API.

In particular, I’m developing on a Jetson TX1 in which CPU and GPU share memory, so I’m trying to exploit the UVA/ZeroCopy feature in order to avoid redundant memcopys, but without success. In fact, even if I allocate pinned memory (UVA constraint) for images used inside a vx_graph, VisionWorks allocates memory and copies data in its “internal” buffers.

My idea is to allocate buffers in GPU memory both for inputs and outputs of a vx_graph in order to avoid redundant copies.

What am I doing wrong?

Thank you for helping me!

AataLLL,

Thank you for the reply. I have tried your recommended solution but the image still will not update. Here is the new code. Also, the stereo vision object was initialized with the cv_left_fix and cv_right_fix frames.

for (;;)
            {

                cap1 >> cv_left;
                cap2 >> cv_right;

                //cvtColor(cv_left, cv_left, cv::COLOR_RGB2GRAY);
                //cvtColor(cv_right, cv_right, cv::COLOR_RGB2GRAY);

                vector<Mat> undistortedFrames = utilityClass.undistortFrames(cv_left, cv_right);
                cv_left = undistortedFrames[0];
                cv_right = undistortedFrames[1];

                minMaxLoc(cv_left, &minVal, &maxVal); //find minimum and maximum intensities
                cv_left.convertTo(cv_left, CV_8U, 255.0/(maxVal - minVal), -minVal * 255.0/(maxVal - minVal));
                minMaxLoc(cv_right, &minVal, &maxVal); //find minimum and maximum intensities
                cv_right.convertTo(cv_right, CV_8U, 255.0/(maxVal - minVal), -minVal * 255.0/(maxVal - minVal));

cv_left.copyTo(cv_left_fix);
                cv_right.copyTo(cv_right_fix);

                vx_left = nvx_cv::createVXImageFromCVMat(context, cv_left_fix);
                NVXIO_CHECK_REFERENCE(vx_left);
                vx_right = nvx_cv::createVXImageFromCVMat(context, cv_right_fix);

//std::unique_ptr<StereoMatching> stereo(
                   // StereoMatching::createStereoMatching(
                        //context, params,
                        //implementationType,
                        //vx_left, vx_right, disparity));

stereo->run();
                stereo->printPerfs();
                renderer->putImage(disparity);

                imshow("frame1", cv_left);
                imshow("frame2", cv_right);
                //imshow("undistort frame1", mat_images[0]);
                //imshow("undistort frame2", mat_images[1]);
                if (!renderer->flush())
                {
                    cout << "Finished processing" << endl;
                    fflush(stdout);
                    break;
                }
                if (waitKey(30) >= 0) break;
            }

Maybe I need to create a setter function to set the new frame each time?

Thanks!

EDIT:
So, I was able to get it all working. I had to create a new stereo object each time in a for loop and initialize it with the new frame. Also, I believe there is a memory leak in the LOW_LEVEL_API_PYRAMIDAL cause when I was using that API the program would get killed after like 30s but if I use HIGH_LEVEL_API the program will not get killed.

Hi both,

Could you check this topic?
https://devtalk.nvidia.com/default/topic/906396/

More detail about vxAccessImagePatch can be found here:
https://www.khronos.org/registry/OpenVX/specs/1.0/html/df/d09/group__group__image.html

Hi Swooshftw, I have the same issue with the program being killed after 15 seconds after putting createImageFromCV() in a while loop, how do you use HIGH_LEVEL_API instead of the other one?

For me ,using visionworks ,opencv4+ needed.
Thanks.