Search Unity

How to use OpencV for Unity to achieve Image Stitching

Discussion in 'General Discussion' started by zengguangkk, Mar 28, 2022.

  1. zengguangkk

    zengguangkk

    Joined:
    Mar 2, 2022
    Posts:
    2
    I don't know how to achieve Stitching ,How to use opencV for Unity function implementation.and partial area display of captured image after completion of Stitching.And my feature extraction display and matching graph are mirror display.

    C++ code:

    Mat homo = findHomography(leftimagePoints, rightimagePoints, RANSAC);


    CalcCorners(homo, frame_R);



    Mat imageTransform1, imageTransform2;
    warpPerspective(imageR, imageTransform1, homo, Size(MAX(corners.right_top.x, corners.right_bottom.x), imageL.rows));

    int dst_width = imageTransform1.cols;
    int dst_height = frame_L.rows;
    if (imageTransform1.cols < frame_L.cols)
    dst_width = frame_L.cols;
    if (imageTransform1.rows < frame_L.rows)
    dst_height = frame_L.rows;
    Mat outimage(dst_height, dst_width, CV_8UC1);
    outimage.setTo(0);

    imageTransform1.copyTo(outimage(Rect(0, 0, imageTransform1.cols, imageTransform1.rows)));
    imageL.copyTo(outimage(Rect(0, 0, frame_L.cols, frame_L.rows)));

    ------------------------------------------------------------------------------------------------------------------------------------

    this my code:

    float angle = UnityEngine.Random.Range(0, 0), scale = 1.0f;
    Point center = new Point(RgrayMat.cols() * 0.5f, RgrayMat.rows() * 0.5f);
    Mat affine_matrix = Imgproc.getRotationMatrix2D(center, angle, scale);
    Imgproc.warpAffine(LgrayMat, RgrayMat, affine_matrix, RgrayMat.size());
    ORB detector = ORB.create();
    ORB extractor = ORB.create();
    MatOfKeyPoint keypoints1 = new MatOfKeyPoint();
    Mat descriptors1 = new Mat(LgrayMat.rows(), LgrayMat.cols(), CvType.CV_8UC1);
    detector.detect(LgrayMat, keypoints1);
    extractor.compute(LgrayMat, keypoints1, descriptors1);
    MatOfKeyPoint keypoints2 = new MatOfKeyPoint();
    Mat descriptors2 = new Mat(RgrayMat.rows(), RgrayMat.cols(), CvType.CV_8UC1);
    detector.detect(RgrayMat, keypoints2);
    extractor.compute(RgrayMat, keypoints2, descriptors2);

    DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_HAMMINGLUT);
    MatOfDMatch matches = new MatOfDMatch();
    List<MatOfDMatch> knnMatches = new List<MatOfDMatch>();
    matcher.knnMatch(descriptors1, descriptors2, knnMatches, 2);
    var arrayKnnMatches = knnMatches.ToArray();
    List<DMatch> goodMatches = new List<DMatch>();
    for (int i = 0; i < arrayKnnMatches.Length; ++i)
    {
    var array = arrayKnnMatches.toArray();
    if (array[0].distance < 0.7 * array[1].distance)
    {
    goodMatches.Add(array[0]);
    }
    }
    List<Point> objList = new List<Point>();
    List<Point> sceneList = new List<Point>();
    List<KeyPoint> keypoints_objectList = keypoints1.toList();
    List<KeyPoint> keypoints_sceneList = keypoints2.toList();
    for (int i = 0; i < goodMatches.Count; i++)
    {
    objList.Add(keypoints_objectList[goodMatches.queryIdx].pt);
    sceneList.Add(keypoints_sceneList[goodMatches.trainIdx].pt);
    //Debug.Log("keypoints_objectList=" + keypoints_objectList[goodMatches.queryIdx].pt);
    //Debug.Log("keypoints_sceneList=" + keypoints_objectList[goodMatches.queryIdx].pt);
    }
    //Debug.Log("objList.Count=" + objList.Count);
    //Debug.Log("sceneList.Count=" + sceneList.Count);
    if (objList.Count == 0)
    return;
    MatOfPoint2f obj = new MatOfPoint2f();
    MatOfPoint2f scene = new MatOfPoint2f();
    obj.fromList(objList);
    scene.fromList(sceneList);
    //Debug.Log("obj=" + obj.dump());
    //Debug.Log("scene=" + scene.dump());

    Mat H = Calib3d.findHomography(obj, scene, Calib3d.RANSAC);
    //Debug.Log("H=" + H.dump());
    Mat Mixcamera1 = new Mat(2160, 7680, CvType.CV_8UC1);
    matches.fromArray(goodMatches.ToArray());
    //Debug.Log("keypoints1=" + keypoints1.dump());
    Features2d.drawMatches(LgrayMat, keypoints1, RgrayMat, keypoints2, matches, Mixcamera1);
    Texture2D texture = new Texture2D(Mixcamera1.cols(), Mixcamera1.rows(), TextureFormat.RGBA32, false);
    Utils.matToTexture2D(Mixcamera1, texture);
    Mixcamera.texture = texture;
    //cal top point
    int Rvalue = SortCorners(H, LgrayMat);
    Debug.Log("Rvalue=" + Rvalue);
    if (Rvalue == 0)
    return;

    Mat imageTransform1 = new Mat();
    Mat imageTransform2 = new Mat();
    Debug.Log("right_top=" + cornersXY.right_top[0].x);
    Debug.Log("right_bottom=" + cornersXY.right_bottom[0].x);
    Imgproc.warpPerspective(RgrayMat, imageTransform1, H, new Size(Mathf.Max((float)cornersXY.right_top[0].x, (float)cornersXY.right_bottom[0].x), LgrayMat.rows()));
    //
    //Features2d.drawMatches(LgrayMat, keypoints1, RgrayMat, keypoints2, matches, Mresult);

    int dst_width = imageTransform1.cols();
    int dst_height = LgrayMat.rows();
    Debug.Log("dst_width=" + dst_width);
    Debug.Log("dst_height=" + dst_height);
    if (imageTransform1.cols() < LgrayMat.cols())
    dst_width = LgrayMat.cols();
    if (imageTransform1.rows() < LgrayMat.rows())
    dst_height = LgrayMat.rows();
    Mat outimage = new Mat(dst_height, dst_width, CvType.CV_8UC1);
    //Mat outimage = new Mat();
    outimage.setTo(new Scalar(0));
    OpenCVForUnity.CoreModule.Rect rect = new OpenCVForUnity.CoreModule.Rect(0, 0, imageTransform1.cols(), imageTransform1.rows());

    imageTransform1.copyTo(outimage);


    LgrayMat.copyTo(outimage);

    Mat Mresult = new Mat();
    Texture2D resulttexture = new Texture2D(outimage.cols(), outimage.rows(), TextureFormat.RGBA32, false);
    Utils.matToTexture2D(outimage, resulttexture);
    MresultImage.texture = resulttexture;
     
  2. MBrown42

    MBrown42

    Joined:
    Jan 23, 2018
    Posts:
    86
    Hi, first off your pasted code is a little tough to read, there's a code snippet thingy you can use to put it in posts, i recently discovered it myself after too many years. OpenCV is pretty powerful, may I ask why you are using it as opposed to a unity Texture2D? I think I am doing exactly what you are talking about for a new game, I use a seamless texture that is broken into little pieces and displayed in the right place as the player moves to maintain continuity. I loop across width and height in little pieces, grabbing each piece's necessary image of the big image, knowing what index and piece size and such, and then use GetPixel to grab the color and/or grayscale intensity at that pixel of the big image.

    If you need actual image processing and filtering like mirroring or whatnot, some you can probably do in raw code between the main big texture and a 2nd you create in memory. Or, maybe something like this would help out, or others like it.

    As a last resort, if you cannot do any of it in Unity, google around for "marshalling" between c/c++ land and Unity. I've only done it once a few years back with Unity 5, it may be easier now but it wasn't trivial.

    Hope that helps!
     
  3. zengguangkk

    zengguangkk

    Joined:
    Mar 2, 2022
    Posts:
    2
    Thanks for your reply. The problem has been solved. Texture2d is used because it needs to display webCamera images. The problem of mirroring may be caused by the logical sequence of the code. The problem disappeared after I adjusted the sequence of some codes for feature extraction.