Search Unity

  1. Megacity Metro Demo now available. Download now.
    Dismiss Notice
  2. Unity support for visionOS is now available. Learn more in our blog post.
    Dismiss Notice

[RELEASED] OpenCV for Unity

Discussion in 'Assets and Asset Store' started by EnoxSoftware, Oct 30, 2014.

  1. EnoxSoftware

    EnoxSoftware

    Joined:
    Oct 29, 2014
    Posts:
    1,557
    Imgcodecs.IMREAD_GRAYSCALE -> Imgcodecs.IMREAD_COLOR
    new Point(sceneCornersData[0] + img1Mat.cols(), sceneCornersData[1]) -> new Point(sceneCornersData[0], sceneCornersData[1])
    Code (CSharp):
    1. using System.Collections;
    2. using System.Collections.Generic;
    3. using System.Linq;
    4. using OpenCVForUnity.Calib3dModule;
    5. using OpenCVForUnity.CoreModule;
    6. using OpenCVForUnity.Features2dModule;
    7. using OpenCVForUnity.ImgcodecsModule;
    8. using OpenCVForUnity.ImgprocModule;
    9. using OpenCVForUnity.UnityUtils;
    10. using UnityEngine;
    11.  
    12. public class MatchPicAlt : MonoBehaviour
    13.  
    14. {
    15.  
    16.     public string nameSrc = "box.png";
    17.     public string nameDst = "boxinscene.png";
    18.  
    19.     // Start is called before the first frame update
    20.     void Start()
    21.     {
    22.  
    23.         //if true, The error log of the Native side OpenCV will be displayed on the Unity Editor Console.
    24.         Utils.setDebugMode(true);
    25.  
    26.         Debug.Log("Start");
    27.      
    28.         Mat img1Mat = Imgcodecs.imread(Utils.getFilePath(nameSrc), Imgcodecs.IMREAD_COLOR);
    29.         Mat img2Mat = Imgcodecs.imread(Utils.getFilePath(nameDst), Imgcodecs.IMREAD_COLOR);
    30.  
    31.         var akaze = AKAZE.create();
    32.  
    33.         MatOfKeyPoint keypoints1 = new MatOfKeyPoint();
    34.         Mat descriptors1 = new Mat();
    35.         Mat mask = new Mat();
    36.  
    37.         akaze.detectAndCompute(img1Mat, mask, keypoints1, descriptors1);
    38.  
    39.         MatOfKeyPoint keypoints2 = new MatOfKeyPoint();
    40.         Mat descriptors2 = new Mat();
    41.  
    42.         akaze.detectAndCompute(img2Mat, mask, keypoints2, descriptors2);
    43.  
    44.         var flann = FlannBasedMatcher.create();// DescriptorMatcher.create (DescriptorMatcher.FLANNBASED);
    45.  
    46.         flann.read(Utils.getFilePath("conf.yml"));
    47.  
    48.  
    49.  
    50.         var knnMatches = new List<MatOfDMatch>();
    51.         flann.knnMatch(descriptors1, descriptors2, knnMatches, 2);
    52.  
    53.         List<DMatch> good_matches = new List<DMatch>();
    54.         MatOfDMatch goodMatches = new MatOfDMatch();
    55.  
    56.         foreach (var match in knnMatches)
    57.         {
    58.             var arrMatch = match.toArray();
    59.             {
    60.                 if (arrMatch[0].distance < 0.7f * arrMatch[1].distance)
    61.                 {
    62.                     good_matches.Add(arrMatch[0]);
    63.                 }
    64.             }
    65.         }
    66.  
    67.         goodMatches.fromList(good_matches);
    68.  
    69.         Debug.Log("goodMatches Count " + good_matches.Count);
    70.  
    71.         Mat result = new Mat(new Size(img1Mat.cols() + img2Mat.cols(), img1Mat.rows()), CvType.CV_32FC2);
    72.  
    73.         List<Point> imgPoints1List = new List<Point>();
    74.         List<Point> imgPoints2List = new List<Point>();
    75.  
    76.         List<KeyPoint> keypoints1List = keypoints1.toList();
    77.         List<KeyPoint> keypoints2List = keypoints2.toList();
    78.  
    79.         for (int i = 0; i < good_matches.Count; i++)
    80.         {
    81.             imgPoints1List.Add(keypoints1List[good_matches[i].queryIdx].pt);
    82.             imgPoints2List.Add(keypoints2List[good_matches[i].trainIdx].pt);
    83.         }
    84.  
    85.         MatOfPoint2f obj = new MatOfPoint2f();
    86.         obj.fromList(imgPoints1List);
    87.         MatOfPoint2f scene = new MatOfPoint2f();
    88.         scene.fromList(imgPoints2List);
    89.  
    90.         Mat H = Calib3d.findHomography(obj, scene, Calib3d.RANSAC, 1);
    91.  
    92.         Mat objCorners = new Mat(4, 1, CvType.CV_32FC2), sceneCorners = new Mat();
    93.         float[] objCornersData = new float[(int) (objCorners.total() * objCorners.channels())];
    94.         objCorners.get(0, 0, objCornersData);
    95.         objCornersData[0] = 0;
    96.         objCornersData[1] = 0;
    97.         objCornersData[2] = img1Mat.cols();
    98.         objCornersData[3] = 0;
    99.         objCornersData[4] = img1Mat.cols();
    100.         objCornersData[5] = img1Mat.rows();
    101.         objCornersData[6] = 0;
    102.         objCornersData[7] = img1Mat.rows();
    103.         objCorners.put(0, 0, objCornersData);
    104.         Core.perspectiveTransform(objCorners, sceneCorners, H);
    105.         float[] sceneCornersData = new float[(int) (sceneCorners.total() * sceneCorners.channels())];
    106.         sceneCorners.get(0, 0, sceneCornersData);
    107.  
    108.         Imgproc.line(img2Mat, new Point(sceneCornersData[0], sceneCornersData[1]),
    109.         new Point(sceneCornersData[2], sceneCornersData[3]), new Scalar(0, 255, 0), 4);
    110.         Imgproc.line(img2Mat, new Point(sceneCornersData[2], sceneCornersData[3]),
    111.                 new Point(sceneCornersData[4], sceneCornersData[5]), new Scalar(0, 255, 0), 4);
    112.         Imgproc.line(img2Mat, new Point(sceneCornersData[4], sceneCornersData[5]),
    113.                 new Point(sceneCornersData[6], sceneCornersData[7]), new Scalar(0, 255, 0), 4);
    114.         Imgproc.line(img2Mat, new Point(sceneCornersData[6], sceneCornersData[7]),
    115.                 new Point(sceneCornersData[0], sceneCornersData[1]), new Scalar(0, 255, 0), 4);
    116.  
    117.  
    118.         Features2d.drawMatches(img1Mat, keypoints1, img2Mat, keypoints2, goodMatches, result);
    119.         Texture2D texture = new Texture2D(result.cols(), result.rows(), TextureFormat.RGBA32, false);
    120.  
    121.         Utils.matToTexture2D(result, texture);
    122.  
    123.  
    124.         gameObject.GetComponent<Renderer>().material.mainTexture = texture;
    125.  
    126.  
    127.         Utils.setDebugMode(true);
    128.  
    129.     }
    130.     // Update is called once per frame
    131.     void Update()
    132.     {
    133.  
    134.     }
    135. }
     
    elvis-satta likes this.
  2. H_v_H

    H_v_H

    Joined:
    May 21, 2018
    Posts:
    4
    Hello, I'm having difficulties loading my customized Darknet Yolov4 (not Yolov4-tiny) configuration to the YOLO webcam object detection sample scene for testing (*.weights, *.cfg, and *.names files).
    the scene keeps crashing.
     
  3. EnoxSoftware

    EnoxSoftware

    Joined:
    Oct 29, 2014
    Posts:
    1,557
    I found the sample code for darknet's Yolov4 and OpenCV here, so I tried it out.
    https://gist.github.com/UnaNancyOwen/d9b7e3653b27b43b8712c37217e4f422

    Tested model files :
    yolov4x-mish.weights
    https://github.com/AlexeyAB/darknet/releases/download/darknet_yolo_v4_pre/yolov4x-mish.weights
    yolov4x-mish.cfg
    https://github.com/AlexeyAB/darknet/blob/master/cfg/yolov4x-mish.cfg

    yolov4.PNG yolov4_result.PNG

    It looks like you got the result without any problems.
    It is often impossible to tell if the target model can be loaded with OpenCV without trying.
     
    Last edited: Dec 6, 2022
  4. H_v_H

    H_v_H

    Joined:
    May 21, 2018
    Posts:
    4
    Deleted the package and installed it again, solved the problem.
     
  5. elvis-satta

    elvis-satta

    Joined:
    Nov 13, 2013
    Posts:
    19
    Thanks @EnoxSoftware for the answer and guidance!

    By the way, i tried to adapt the scene to work with the webcam but failed: when the reference image is not framed in the cam i get two error and what is also strange, when the image target is framed, the picture flips vertically.

    What can I do to make it work with the webcam?

    Code (CSharp):
    1. using System.Collections.Generic;
    2. using OpenCVForUnity.Calib3dModule;
    3. using OpenCVForUnity.CoreModule;
    4. using OpenCVForUnity.Features2dModule;
    5. using OpenCVForUnity.ImgprocModule;
    6. using OpenCVForUnity.UnityUtils;
    7. using OpenCVForUnity.UnityUtils.Helper;
    8. using UnityEngine;
    9.  
    10. namespace OpenCVForUnityExample
    11. {
    12.  
    13.     [RequireComponent(typeof(WebCamTextureToMatHelper))]
    14.     public class Feature2DWebCam : MonoBehaviour
    15.     {
    16.         public Texture2D srcTex;
    17.         public int Score;
    18.         private int safeLength;
    19.         FpsMonitor fpsMonitor;
    20.         WebCamTextureToMatHelper webCamTextureToMatHelper;
    21.         Texture2D texture;
    22.         Mat grayMat;
    23.  
    24.         void Start()
    25.         {
    26.             fpsMonitor = GetComponent<FpsMonitor>();
    27.  
    28.             webCamTextureToMatHelper = gameObject.GetComponent<WebCamTextureToMatHelper>();
    29.  
    30. #if UNITY_ANDROID && !UNITY_EDITOR
    31.             // Avoids the front camera low light issue that occurs in only some Android devices (e.g. Google Pixel, Pixel2).
    32.             webCamTextureToMatHelper.avoidAndroidFrontCameraLowLightIssue = true;
    33. #endif
    34.             webCamTextureToMatHelper.Initialize();
    35.         }
    36.  
    37.         /// <summary>
    38.         /// Raises the web cam texture to mat helper initialized event.
    39.         /// </summary>
    40.         public void OnWebCamTextureToMatHelperInitialized()
    41.         {
    42.             Debug.Log("OnWebCamTextureToMatHelperInitialized");
    43.  
    44.             Mat webCamTextureMat = webCamTextureToMatHelper.GetMat();
    45.  
    46.             texture = new Texture2D(webCamTextureMat.cols(), webCamTextureMat.rows(), TextureFormat.RGBA32, false);
    47.             Utils.matToTexture2D(webCamTextureMat, texture, webCamTextureToMatHelper.GetBufferColors());
    48.  
    49.             gameObject.GetComponent<Renderer>().material.mainTexture = texture;
    50.  
    51.             gameObject.transform.localScale = new Vector3(webCamTextureMat.cols(), webCamTextureMat.rows(), 1);
    52.  
    53.             Debug.Log("Screen.width " + Screen.width + " Screen.height " + Screen.height + " Screen.orientation " + Screen.orientation);
    54.  
    55.             if (fpsMonitor != null)
    56.             {
    57.                 fpsMonitor.Add("width", webCamTextureMat.width().ToString());
    58.                 fpsMonitor.Add("height", webCamTextureMat.height().ToString());
    59.                 fpsMonitor.Add("orientation", Screen.orientation.ToString());
    60.             }
    61.  
    62.  
    63.             float width = webCamTextureMat.width();
    64.             float height = webCamTextureMat.height();
    65.  
    66.             float widthScale = (float)Screen.width / width;
    67.             float heightScale = (float)Screen.height / height;
    68.             if (widthScale < heightScale)
    69.             {
    70.                 Camera.main.orthographicSize = (width * (float)Screen.height / (float)Screen.width) / 2;
    71.             }
    72.             else
    73.             {
    74.                 Camera.main.orthographicSize = height / 2;
    75.             }
    76.  
    77.             grayMat = new Mat(webCamTextureMat.rows(), webCamTextureMat.cols(), CvType.CV_8UC3);
    78.         }
    79.  
    80.         /// <summary>
    81.         /// Raises the web cam texture to mat helper disposed event.
    82.         /// </summary>
    83.         public void OnWebCamTextureToMatHelperDisposed()
    84.         {
    85.             Debug.Log("OnWebCamTextureToMatHelperDisposed");
    86.             if (grayMat != null)
    87.                 grayMat.Dispose();
    88.  
    89.             if (texture != null)
    90.             {
    91.                 Texture2D.Destroy(texture);
    92.                 texture = null;
    93.             }
    94.         }
    95.  
    96.         /// <summary>
    97.         /// Raises the web cam texture to mat helper error occurred event.
    98.         /// </summary>
    99.         /// <param name="errorCode">Error code.</param>
    100.         public void OnWebCamTextureToMatHelperErrorOccurred(WebCamTextureToMatHelper.ErrorCode errorCode)
    101.         {
    102.             Debug.Log("OnWebCamTextureToMatHelperErrorOccurred " + errorCode);
    103.         }
    104.  
    105.         void Update()
    106.         {
    107.             Utils.setDebugMode(true);
    108.             if (webCamTextureToMatHelper.IsPlaying() && webCamTextureToMatHelper.DidUpdateThisFrame())
    109.             {
    110.  
    111.                 Mat rgbaMat = webCamTextureToMatHelper.GetMat();
    112.                 Imgproc.cvtColor(rgbaMat, grayMat, Imgproc.COLOR_RGBA2GRAY);
    113.                 Utils.matToTexture2D(rgbaMat, texture, webCamTextureToMatHelper.GetBufferColors());
    114.  
    115.  
    116.                 Mat img1Mat = new Mat(srcTex.height, srcTex.width, CvType.CV_8UC3);
    117.                 Utils.texture2DToMat(srcTex, img1Mat);
    118.  
    119.                 var akaze = AKAZE.create();
    120.  
    121.                 MatOfKeyPoint keypoints1 = new MatOfKeyPoint();
    122.                 Mat descriptors1 = new Mat();
    123.                 Mat mask = new Mat();
    124.  
    125.                 akaze.detectAndCompute(img1Mat, mask, keypoints1, descriptors1);
    126.  
    127.                 MatOfKeyPoint keypoints2 = new MatOfKeyPoint();
    128.                 Mat descriptors2 = new Mat();
    129.  
    130.                 akaze.detectAndCompute(rgbaMat, mask, keypoints2, descriptors2);
    131.  
    132.                 var flann = FlannBasedMatcher.create();// DescriptorMatcher.create (DescriptorMatcher.FLANNBASED);
    133.  
    134.                 flann.read(Utils.getFilePath("conf.yml"));
    135.  
    136.                 var knnMatches = new List<MatOfDMatch>();
    137.  
    138.                 flann.knnMatch(descriptors1, descriptors2, knnMatches, 2);
    139.  
    140.                 List<DMatch> good_matches = new List<DMatch>();
    141.                 MatOfDMatch goodMatches = new MatOfDMatch();
    142.  
    143.                 foreach (var match in knnMatches)
    144.                 {
    145.                     var arrMatch = match.toArray();
    146.                     {
    147.                         // Debug.Log(arrMatch.Length);
    148.                         safeLength = arrMatch.Length;
    149.  
    150.                         if (arrMatch.Length > 1)
    151.                         {
    152.  
    153.                             if (arrMatch[0].distance < 0.7f * arrMatch[1].distance)
    154.                             {
    155.                                 good_matches.Add(arrMatch[0]);
    156.  
    157.                             }
    158.  
    159.                         }
    160.  
    161.                     }
    162.                 }
    163.                 Debug.Log(safeLength + " SAFELENGTH");
    164.  
    165.                 goodMatches.fromList(good_matches);
    166.  
    167.                 if (good_matches.Count > 4 && safeLength > 1)
    168.  
    169.                 {
    170.  
    171.                     Debug.Log("goodMatches Count " + good_matches.Count);
    172.                     Score = good_matches.Count;
    173.  
    174.                     //    Mat result = new Mat(new Size(img1Mat.cols(), img1Mat.rows()), CvType.CV_8UC1);
    175.  
    176.                     List<Point> imgPoints1List = new List<Point>();
    177.                     List<Point> imgPoints2List = new List<Point>();
    178.  
    179.                     List<KeyPoint> keypoints1List = keypoints1.toList();
    180.                     List<KeyPoint> keypoints2List = keypoints2.toList();
    181.  
    182.                     for (int i = 0; i < good_matches.Count; i++)
    183.                     {
    184.                         imgPoints1List.Add(keypoints1List[good_matches[i].queryIdx].pt);
    185.                         imgPoints2List.Add(keypoints2List[good_matches[i].trainIdx].pt);
    186.                     }
    187.  
    188.                     MatOfPoint2f obj = new MatOfPoint2f();
    189.                     obj.fromList(imgPoints1List);
    190.                     MatOfPoint2f scene = new MatOfPoint2f();
    191.                     scene.fromList(imgPoints2List);
    192.  
    193.                     Mat H = Calib3d.findHomography(obj, scene, Calib3d.RANSAC, 1);
    194.  
    195.                     Mat objCorners = new Mat(4, 1, CvType.CV_32FC2), sceneCorners = new Mat();
    196.                     float[] objCornersData = new float[(int)(objCorners.total() * objCorners.channels())];
    197.                     objCorners.get(0, 0, objCornersData);
    198.                     objCornersData[0] = 0;
    199.                     objCornersData[1] = 0;
    200.                     objCornersData[2] = img1Mat.cols();
    201.                     objCornersData[3] = 0;
    202.                     objCornersData[4] = img1Mat.cols();
    203.                     objCornersData[5] = img1Mat.rows();
    204.                     objCornersData[6] = 0;
    205.                     objCornersData[7] = img1Mat.rows();
    206.                     objCorners.put(0, 0, objCornersData);
    207.                     Core.perspectiveTransform(objCorners, sceneCorners, H);
    208.                     float[] sceneCornersData = new float[(int)(sceneCorners.total() * sceneCorners.channels())];
    209.                     sceneCorners.get(0, 0, sceneCornersData);
    210.  
    211.                     Imgproc.line(rgbaMat, new Point(sceneCornersData[0], sceneCornersData[1]),
    212.                     new Point(sceneCornersData[2], sceneCornersData[3]), new Scalar(0, 255, 0), 4);
    213.                     Imgproc.line(rgbaMat, new Point(sceneCornersData[2], sceneCornersData[3]),
    214.                             new Point(sceneCornersData[4], sceneCornersData[5]), new Scalar(0, 255, 0), 4);
    215.                     Imgproc.line(rgbaMat, new Point(sceneCornersData[4], sceneCornersData[5]),
    216.                             new Point(sceneCornersData[6], sceneCornersData[7]), new Scalar(0, 255, 0), 4);
    217.                     Imgproc.line(rgbaMat, new Point(sceneCornersData[6], sceneCornersData[7]),
    218.                             new Point(sceneCornersData[0], sceneCornersData[1]), new Scalar(0, 255, 0), 4);
    219.  
    220.  
    221.                     //       Features2d.drawMatches(img1Mat, keypoints1, rgbaMat, keypoints2, goodMatches, result);
    222.  
    223.                     Utils.matToTexture2D(rgbaMat, texture, webCamTextureToMatHelper.GetBufferColors());
    224.  
    225.                     gameObject.GetComponent<Renderer>().material.mainTexture = texture;
    226.  
    227.                 }
    228.             }
    229.         }
    230.         void OnDestroy()
    231.         {
    232.             webCamTextureToMatHelper.Dispose();
    233.         }
    234.  
    235.         /// <summary>
    236.         /// Raises the back button click event.
    237.         /// </summary>
    238.         public void OnBackButtonClick()
    239.         {
    240.             //  SceneManager.LoadScene ("OpenCVForUnityExample");
    241.         }
    242.  
    243.         /// <summary>
    244.         /// Raises the play button click event.
    245.         /// </summary>
    246.         public void OnPlayButtonClick()
    247.         {
    248.             webCamTextureToMatHelper.Play();
    249.         }
    250.  
    251.         /// <summary>
    252.         /// Raises the pause button click event.
    253.         /// </summary>
    254.         public void OnPauseButtonClick()
    255.         {
    256.             webCamTextureToMatHelper.Pause();
    257.         }
    258.  
    259.         /// <summary>
    260.         /// Raises the stop button click event.
    261.         /// </summary>
    262.         public void OnStopButtonClick()
    263.         {
    264.             webCamTextureToMatHelper.Stop();
    265.         }
    266.  
    267.         /// <summary>
    268.         /// Raises the change camera button click event.
    269.         /// </summary>
    270.         public void OnChangeCameraButtonClick()
    271.         {
    272.             webCamTextureToMatHelper.requestedIsFrontFacing = !webCamTextureToMatHelper.requestedIsFrontFacing;
    273.         }
    274.  
    275.  
    276.     }
    277.  
    278. }
     
  6. EnoxSoftware

    EnoxSoftware

    Joined:
    Oct 29, 2014
    Posts:
    1,557
    Please try the attached Feature2DWebCam.unitypackage.
     

    Attached Files:

    elvis-satta likes this.
  7. bandangtaun

    bandangtaun

    Joined:
    Dec 17, 2022
    Posts:
    1
    hello, I want to draw a circle on the image to determine the sphericity value. how to calculate and draw the circle like this picture.

     
  8. bsganga99

    bsganga99

    Joined:
    Aug 25, 2022
    Posts:
    1
    i installed opencv package today, but the tensorflow older is missing in dnn folder..any idea about this?
     
  9. EnoxSoftware

    EnoxSoftware

    Joined:
    Oct 29, 2014
    Posts:
    1,557
  10. FSIL

    FSIL

    Joined:
    Nov 19, 2020
    Posts:
    6
    Hi
    I am using your webgl demo provided on asset store.
    I am using option "Lightweight PoseEstimation WebCam Example".
    Webgl demo working but running at 2-3fps.
    I installed android application also and running same option and running at 2-3 fps.
    Please reply, If I am doing something wrong or if there is another way to run.
    I am looking for body tracking on webgl as showing in "Lightweight PoseEstimation WebCam Example".

    Thank you
     
  11. lee_haw

    lee_haw

    Joined:
    Aug 3, 2022
    Posts:
    1
    Hello

    I am trying to access a UDP-Videostream in Unity via simple VideoCapture. I cannot open the stream in Unity as the open method returns false always. Same Code is working in Python with OpenCV. ffplay on terminal shows the stream as well. As I think FFMPEG is needed for decoding and reassembling. I tried to set the CAP_FFMPEG Flag but still nothing happens. Even with setDebug to true an try-catch-Block there is no exception thrown from OpenCV.
    Any ideas or related issues? Thanks
     
  12. youatt1984

    youatt1984

    Joined:
    May 27, 2022
    Posts:
    1
    Hey - I bought this asset specifically to leverage ArUco Marker Detection in Unity and it looks like you pulled support in 2.5.1. (At least I don't see it in the ContribModules folder in the repository anymore...) Is there a reason for that? It looks like it's still supported by OpenCV 4.7.0 and is included in the 5.0.0-pre. Can you pull it back in? Thanks for your help.
     
  13. Pospischil

    Pospischil

    Joined:
    Sep 11, 2018
    Posts:
    5
    I have tried to create a pipeline for tracking different image markers with OpenCVForUnity and ARFoundation.

    The recognition of the different markers actually works. The corresponding marker in the camera image is recognised correctly and the 3D object that is supposed to be displayed on it is shown.

    However, I have problems with the positioning. I use the ComputePose function for this as in the MarkerLess AR Example (https://github.com/EnoxSoftware/MarkerLessARExample).
    Code (CSharp):
    1.  /// <summary>
    2.         /// Computes the pose.
    3.         /// </summary>
    4.         /// <param name="pattern">Pattern.</param>
    5.         /// <param name="camMatrix">Cam matrix.</param>
    6.         /// <param name="distCoeff">Dist coeff.</param>
    7.         public void computePose(Pattern pattern, Mat camMatrix, MatOfDouble distCoeff)
    8.         {
    9.             Mat Rvec = new Mat();
    10.             Mat Tvec = new Mat();
    11.             Mat raux = new Mat();
    12.             Mat taux = new Mat();
    13.  
    14.             Calib3d.solvePnP(pattern.points3d, points2d, camMatrix, distCoeff, raux, taux);
    15.             raux.convertTo(Rvec, CvType.CV_32F);
    16.             taux.convertTo(Tvec, CvType.CV_32F);
    17.  
    18.             Mat rotMat = new Mat(3, 3, CvType.CV_64FC1);
    19.             Calib3d.Rodrigues(Rvec, rotMat);
    20.  
    21.             pose3d.SetRow(0, new Vector4((float)rotMat.get(0, 0)[0], (float)rotMat.get(0, 1)[0], (float)rotMat.get(0, 2)[0], (float)Tvec.get(0, 0)[0]));
    22.             pose3d.SetRow(1, new Vector4((float)rotMat.get(1, 0)[0], (float)rotMat.get(1, 1)[0], (float)rotMat.get(1, 2)[0], (float)Tvec.get(1, 0)[0]));
    23.             pose3d.SetRow(2, new Vector4((float)rotMat.get(2, 0)[0], (float)rotMat.get(2, 1)[0], (float)rotMat.get(2, 2)[0], (float)Tvec.get(2, 0)[0]));
    24.             pose3d.SetRow(3, new Vector4(0, 0, 0, 1));
    25.  
    26.             Rvec.Dispose();
    27.             Tvec.Dispose();
    28.             raux.Dispose();
    29.             taux.Dispose();
    30.             rotMat.Dispose();
    31.         }
    The problem is that the 3D object is displayed rather unstable on the marker and does not remain still. Even if I use more feature points for detection, this does not seem to have a significant effect.

    I also tried to use the Lerp filter from ARFoundationWithOpenCVForUnityExample (https://github.com/EnoxSoftware/ARF...rUcoExample/ARFoundationCameraArUcoExample.cs). This makes the object more stable on the marker, but it also results in being extremely sluggish and no longer seems to stick firmly to the marker, but instead swings slightly behind it.


    Does anyone have an idea or an approach where something could be adjusted to make the positioning more stable, but not to lose the impression that the object is glued to the marker?
     
  14. EnoxSoftware

    EnoxSoftware

    Joined:
    Oct 29, 2014
    Posts:
    1,557
    The ffmpeg backend is only available on Windows platforms. See ReadMe.pdf for usage instructions.
    https://forum.unity.com/threads/released-opencv-for-unity.277080/page-55#post-7427405
     
  15. EnoxSoftware

    EnoxSoftware

    Joined:
    Oct 29, 2014
    Posts:
    1,557
    In opencv4.7.0, the ARUco module was moved to the Objdetect module.https://github.com/opencv/opencv_contrib/pull/3394
    With this change, ARUcoExample in OpenCVForUnity has been moved to the objdetect folder.
    https://github.com/EnoxSoftware/Ope...odules/objdetect/ArUcoExample/ArUcoExample.cs
    https://enoxsoftware.github.io/Open..._open_c_v_for_unity_1_1_objdetect_module.html
     
    youatt1984 likes this.
  16. WayneVenter

    WayneVenter

    Joined:
    May 8, 2019
    Posts:
    56
    @EnoxSoftware would it be possible for you to create two more examples, one for object detection using Nanodet based on this, since the latest Yolo sample is broken when you use Yolo5 and export to ONNX, the new onnx >1.13 and Pytorch >1.13.1 exports with ONNXv8 format which no longer works in OpenCV (Object Detection) in the new 2.5.1 release. If you use an older ONNX export from Pytorch 1.11 inONNX v7 format and onnx1.12 it works, and noone seems to support the old weights file format anymore. For mobile we need new fast models, like Nanodet, so please if you can add a DNN example for this.
    Details here: https://github.com/opencv/opencv_zoo/tree/master/models/object_detection_nanodet

    Then another example, Yolo5 segmentation, there is a good example here: https://learnopencv.com/yolov5-instance-segmentation/
    Again, can't get this working with OpenCV Unity.
    Seems things work in Python 3.9 but once you try and bring that into Unity with OpenCV its just doesn't work.

    Can you guys assist, it would be greatly approciated.
     
    Last edited: Jan 13, 2023
  17. WayneVenter

    WayneVenter

    Joined:
    May 8, 2019
    Posts:
    56
    @EnoxSoftware or alternativly, please fix YoloObjectDetection to work with ONNX file which is more relevant that the weights file, and support for ONNXv8 format, and onnx >v1.13.1 when using PyTorch to export .pt files to onnx

    Perfect example is here, if you follow this guide and export ONNX it should be usabe in OpenCVUnity and the YoloObjectDetect example, but no go.
    https://github.com/ultralytics/yolov5/issues/251

    If you can fix OpenCVUnity DNN to work with Yolov5 please
    https://learnopencv.com/object-detection-using-yolov5-and-opencv-dnn-in-c-and-python/
     
    Last edited: Jan 13, 2023
  18. WayneVenter

    WayneVenter

    Joined:
    May 8, 2019
    Posts:
    56
    Dived further down the rabit hole and this is wher I get stuck, doing all of this using yolov5s.onnx works in Python but not in Unity.
    The outLayerType == "Identity", that is what I could work out, so the current Yolo example .cs file does not recognize the layer type, so if anyone can help me map this working Python opencv using onnx to C3 for Unity example.

    The code snippet I need help with is this:

    Code (Boo):
    1. def post_process(input_image, outputs):
    2.     # Lists to hold respective values while unwrapping.
    3.     class_ids = []
    4.     confidences = []
    5.     boxes = []
    6.  
    7.     # Rows.
    8.     rows = outputs[0].shape[1]
    9.  
    10.     image_height, image_width = input_image.shape[:2]
    11.  
    12.     # Resizing factor.
    13.     x_factor = image_width / INPUT_WIDTH
    14.     y_factor = image_height / INPUT_HEIGHT
    15.  
    16.     # Iterate through 25200 detections.
    17.     for r in range(rows):
    18.         row = outputs[0][0][r]
    19.         confidence = row[4]
    20.  
    21.         # Discard bad detections and continue.
    22.         if confidence >= CONFIDENCE_THRESHOLD:
    23.             classes_scores = row[5:]
    24.  
    25.             # Get the index of max class score.
    26.             class_id = np.argmax(classes_scores)
    27.  
    28.             #  Continue if the class score is above threshold.
    29.             if (classes_scores[class_id] > SCORE_THRESHOLD):
    30.                 confidences.append(confidence)
    31.                 class_ids.append(class_id)
    32.  
    33.                 cx, cy, w, h = row[0], row[1], row[2], row[3]
    34.  
    35.                 left = int((cx - w / 2) * x_factor)
    36.                 top = int((cy - h / 2) * y_factor)
    37.                 width = int(w * x_factor)
    38.                 height = int(h * y_factor)
    39.  
    40.                 box = np.array([left, top, width, height])
    41.                 boxes.append(box)
    42.  
    Python project can be pulled here: https://github.com/ultralytics/yolov5 and then here is the working Python code.

    Code (Boo):
    1. import cv2
    2. import numpy as np
    3.  
    4. # Constants.
    5. INPUT_WIDTH = 640
    6. INPUT_HEIGHT = 640
    7. SCORE_THRESHOLD = 0.5
    8. NMS_THRESHOLD = 0.45
    9. CONFIDENCE_THRESHOLD = 0.45
    10.  
    11. # Text parameters.
    12. FONT_FACE = cv2.FONT_HERSHEY_SIMPLEX
    13. FONT_SCALE = 0.7
    14. THICKNESS = 1
    15.  
    16. # Colors
    17. BLACK = (0, 0, 0)
    18. BLUE = (255, 178, 50)
    19. YELLOW = (0, 255, 255)
    20. RED = (0, 0, 255)
    21.  
    22.  
    23. def draw_label(input_image, label, left, top):
    24.     """Draw text onto image at location."""
    25.  
    26.     # Get text size.
    27.     text_size = cv2.getTextSize(label, FONT_FACE, FONT_SCALE, THICKNESS)
    28.     dim, baseline = text_size[0], text_size[1]
    29.     # Use text size to create a BLACK rectangle.
    30.     cv2.rectangle(input_image, (left, top), (left + dim[0], top + dim[1] + baseline), BLACK, cv2.FILLED);
    31.     # Display text inside the rectangle.
    32.     cv2.putText(input_image, label, (left, top + dim[1]), FONT_FACE, FONT_SCALE, YELLOW, THICKNESS, cv2.LINE_AA)
    33.  
    34.  
    35. def pre_process(input_image, net):
    36.     # Create a 4D blob from a frame.
    37.     blob = cv2.dnn.blobFromImage(input_image, 1 / 255, (INPUT_WIDTH, INPUT_HEIGHT), [0, 0, 0], 1, crop=False)
    38.  
    39.     # Sets the input to the network.
    40.     net.setInput(blob)
    41.  
    42.     # Runs the forward pass to get output of the output layers.
    43.     output_layers = net.getUnconnectedOutLayersNames()
    44.     outputs = net.forward(output_layers)
    45.     # print(outputs[0].shape)
    46.  
    47.     return outputs
    48.  
    49.  
    50. def post_process(input_image, outputs):
    51.     # Lists to hold respective values while unwrapping.
    52.     class_ids = []
    53.     confidences = []
    54.     boxes = []
    55.  
    56.     # Rows.
    57.     rows = outputs[0].shape[1]
    58.  
    59.     image_height, image_width = input_image.shape[:2]
    60.  
    61.     # Resizing factor.
    62.     x_factor = image_width / INPUT_WIDTH
    63.     y_factor = image_height / INPUT_HEIGHT
    64.  
    65.     # Iterate through 25200 detections.
    66.     for r in range(rows):
    67.         row = outputs[0][0][r]
    68.         confidence = row[4]
    69.  
    70.         # Discard bad detections and continue.
    71.         if confidence >= CONFIDENCE_THRESHOLD:
    72.             classes_scores = row[5:]
    73.  
    74.             # Get the index of max class score.
    75.             class_id = np.argmax(classes_scores)
    76.  
    77.             #  Continue if the class score is above threshold.
    78.             if (classes_scores[class_id] > SCORE_THRESHOLD):
    79.                 confidences.append(confidence)
    80.                 class_ids.append(class_id)
    81.  
    82.                 cx, cy, w, h = row[0], row[1], row[2], row[3]
    83.  
    84.                 left = int((cx - w / 2) * x_factor)
    85.                 top = int((cy - h / 2) * y_factor)
    86.                 width = int(w * x_factor)
    87.                 height = int(h * y_factor)
    88.  
    89.                 box = np.array([left, top, width, height])
    90.                 boxes.append(box)
    91.  
    92.     # Perform non maximum suppression to eliminate redundant overlapping boxes with
    93.     # lower confidences.
    94.     indices = cv2.dnn.NMSBoxes(boxes, confidences, CONFIDENCE_THRESHOLD, NMS_THRESHOLD)
    95.     for i in indices:
    96.         box = boxes[i]
    97.         left = box[0]
    98.         top = box[1]
    99.         width = box[2]
    100.         height = box[3]
    101.         cv2.rectangle(input_image, (left, top), (left + width, top + height), BLUE, 3 * THICKNESS)
    102.         label = "{}:{:.2f}".format(classes[class_ids[i]], confidences[i])
    103.         draw_label(input_image, label, left, top)
    104.  
    105.     return input_image
    106.  
    107.  
    108. if __name__ == '__main__':
    109.     # Load class names.
    110.     classesFile = "coco.names"
    111.     classes = None
    112.     with open(classesFile, 'rt') as f:
    113.         classes = f.read().rstrip('\n').split('\n')
    114.  
    115.     # Load image.
    116.     frame = cv2.imread('sample.jpg')
    117.  
    118.     # Give the weight files to the model and load the network using them.
    119.     modelWeights = "yolov5s.onnx"
    120.     net = cv2.dnn.readNet(modelWeights)
    121.  
    122.     #if not net.empty():
    123.     #    print('Net loaded successfully\n')
    124.     #    print('Net contains:')
    125.     #    for t in net.getLayerTypes():
    126.     #        print('\t%d layers of type %s' % (net.getLayersCount(t), t))
    127.  
    128.     # Process image.
    129.     detections = pre_process(frame, net)
    130.     img = post_process(frame.copy(), detections)
    131.  
    132.     # Put efficiency information. The function getPerfProfile returns the overall time for inference(t) and the timings for each of the layers(in layersTimes)
    133.     t, _ = net.getPerfProfile()
    134.     label = 'Inference time: %.2f ms' % (t * 1000.0 / cv2.getTickFrequency())
    135.     print(label)
    136.     cv2.putText(img, label, (20, 40), FONT_FACE, FONT_SCALE, RED, THICKNESS, cv2.LINE_AA)
    137.  
    138.     cv2.imshow('Output', img)
    139.     cv2.waitKey(0)
     
    Last edited: Jan 15, 2023
  19. WayneVenter

    WayneVenter

    Joined:
    May 8, 2019
    Posts:
    56
    So I decided to move onto MobileNetSDD to see if that can work, also no luck, I get the following error:

    Mat::n_1reshape__JII() : OpenCV(4.7.0-dev) C:\Users\satoo\Desktop\opencv\modules\core\src\matrix.cpp:1240: error: (-209:Sizes of input arguments do not match) Requested and source matrices have different count of elements in function 'cv::Mat::reshape'

    CvException: An error occurred on the C++ side, causing the class initialization to fail.Enclose the point where the error occurs in Utils.setDebugMode() method, and the error on the C++ side will be displayed on the console.

    I used this to convert https://github.com/dusty-nv/pytorch-ssd
    And this https://github.com/dusty-nv/jetson-inference/blob/dev/docs/pytorch-ssd.md

    It's like no models other than the sample ones in the project works, can you do a proper example for object detection using an .onnx file, either MobileNetSDD or Yolo?
     
  20. EnoxSoftware

    EnoxSoftware

    Joined:
    Oct 29, 2014
    Posts:
    1,557

    As you said, the YOLOv5 model will not work without modifying the code in the post process part.
    We have created a new "YOLOv5ObjectDetectionExample", please add it to your project after setup and try it.

    YOLOv5ObjectDetectonExample1.PNG YOLOv5ObjectDetectonExample2.PNG
     

    Attached Files:

  21. WayneVenter

    WayneVenter

    Joined:
    May 8, 2019
    Posts:
    56
    @EnoxSoftware you guys are absolute rock stars! Thank you, I created a 3D model in blender, animated it to rotate, exported the results, fed it into Roboflow, labeled, exported to Pytorch, trained a custom model using Yolov5, exported to .onnx format, loaded into Unity and whala ... custom object detection using Yolov5 and onnx file using your plugin!
    upload_2023-1-17_14-44-13.png upload_2023-1-17_14-44-40.png
     
    EnoxSoftware likes this.
  22. WayneVenter

    WayneVenter

    Joined:
    May 8, 2019
    Posts:
    56
    Here is a gif of how it's working
     

    Attached Files:

    EnoxSoftware likes this.
  23. fea777

    fea777

    Joined:
    Jun 29, 2015
    Posts:
    14
    I am trying to do text detection for other languages such as korean.
    Do you have any suggestions?
     
  24. EnoxSoftware

    EnoxSoftware

    Joined:
    Oct 29, 2014
    Posts:
    1,557
    OpenCVForUnity includes TextOCRExampe using the Dnn module, but the model files are only available for English and Chinese.
    https://github.com/EnoxSoftware/Ope...Unity/Examples/MainModules/dnn/TextOCRExample
    https://github.com/opencv/opencv_zoo/tree/master/models/text_recognition_crnn
    If you want to recognize Korean text, you need to train your model files for Korean.
    • text_recognition_CRNN_EN_2021sep.onnx can detect digits (0~9) and letters (return lowercase letters a~z) (view charset_36_EN.txt for details).
    • text_recognition_CRNN_CH_2021sep.onnx can detect digits (0~9), upper/lower-case letters (a~z and A~Z), and some special characters (view charset_94_CH.txt for details).
    • text_recognition_CRNN_CN_2021nov.onnx can detect digits (0~9), upper/lower-case letters (a~z and A~Z), some Chinese characters and some special characters (view charset_3944_CN.txt for details).
    • For details on training this model series, please visit https://github.com/zihaomu/deep-text-recognition-benchmark.
     
  25. karmatha

    karmatha

    Joined:
    Aug 25, 2012
    Posts:
    50
    Hi, I'm looking at the GrabCutExample but I notice the cut out area is actually black. How can I get this to be transparent? I started to look into doing a bitwise and operation but I couldn't figure it out honesty. Am I overlooking something?

    upload_2023-1-24_18-7-24.png
     
  26. EnoxSoftware

    EnoxSoftware

    Joined:
    Oct 29, 2014
    Posts:
    1,557
    To make the background portion of the image transparent, the color format of the final image must be RGBA.
    Try replacing lines 50-51 of the code with the following code.

    Code (CSharp):
    1.  
    2.             Mat foreground = new Mat(image.size(), CvType.CV_8UC4, new Scalar(0, 0, 0, 0));
    3.             Mat image_rgba = new Mat();
    4.             Imgproc.cvtColor(image, image_rgba, Imgproc.COLOR_RGB2RGBA);
    5.             image_rgba.copyTo(foreground, mask);
    6.  
     
    karmatha likes this.
  27. EnoxSoftware

    EnoxSoftware

    Joined:
    Oct 29, 2014
    Posts:
    1,557
    WayneVenter and ibompuis like this.
  28. karmatha

    karmatha

    Joined:
    Aug 25, 2012
    Posts:
    50
    That did the trick, thanks so much!
     
  29. ibompuis

    ibompuis

    Joined:
    Sep 13, 2012
    Posts:
    100
  30. WayneVenter

    WayneVenter

    Joined:
    May 8, 2019
    Posts:
    56
    @EnoxSoftware wow, thank you, I was working on Segmentation the last couple of weeks, you guys are great! Will definitly be using this in my projects to highlight the detected objects. When I work out the most optimal training workflow I'll share it with you, maybe you can add it as a readme.md or pdf in a future release. I found a lot of examples on how to do detection/segmentation using an existing/pre-trained model but not many on how to create, label and train your own models and then deploy them.
     
  31. ibompuis

    ibompuis

    Joined:
    Sep 13, 2012
    Posts:
    100
    you can start here:
    using Roboflow to create your dataset and label and train with: https://hub.ultralytics.com/
    The first video explain very well all you need to create and train custom data model
    Both can be used as free at start.

    I use yolov5 and v4 before in custom unity linux server, called by RESTAPI, all works like very well ! thanks @EnoxSoftware and unity :)))
     
  32. ibompuis

    ibompuis

    Joined:
    Sep 13, 2012
    Posts:
    100
    @EnoxSoftware I created .onnx model with ultralytics and try to use with last example you provided but no result appear and no error message. Can you check my model to see if something wrong ?
    I tried YOLOv5n and YOLOv5x model export in ultralytics
    Model on ultralytics work fine.
    https://we.tl/t-mFMo2dPdoU
     
  33. ibompuis

    ibompuis

    Joined:
    Sep 13, 2012
    Posts:
    100
    Model details:

    ONNX v7
    pytorch 1.11.0
    ai.onnx v12
    32
    {0: 'ange', 1: 'bouda', 2: 'colonial', 3: 'grece', 4: 'tree', 5: 'vase', 6: 'venise'}
    INPUTS
    -
    name: images
    type: float32[1,3,640,640]
    OUTPUTS
    -
    name: output
    type: float32[1,25200,12]
     
  34. EnoxSoftware

    EnoxSoftware

    Joined:
    Oct 29, 2014
    Posts:
    1,557
    A new version of the example has been released that supports a variable number of class elements.
    https://github.com/EnoxSoftware/YOLOv5WithOpenCVForUnityExample
    In our tests your custom model seems to work fine.
     
    ibompuis likes this.
  35. ibompuis

    ibompuis

    Joined:
    Sep 13, 2012
    Posts:
    100
  36. ibompuis

    ibompuis

    Joined:
    Sep 13, 2012
    Posts:
    100
  37. ibompuis

    ibompuis

    Joined:
    Sep 13, 2012
    Posts:
    100
    Another question :rolleyes:
    Sometimes I'v good prediction like 0.92 but class label not appear on the image box just the prediction number.
    The object name "ange" with class id 0 is detected with 0.90 but the the classID logged is 18 for example...
     
  38. EnoxSoftware

    EnoxSoftware

    Joined:
    Oct 29, 2014
    Posts:
    1,557
    A new version of the YOLOv5 example is now available, which fixes a bug that causes incorrect class inference results when using a custom model.
    https://github.com/EnoxSoftware/YOLOv5WithOpenCVForUnityExample
    Your checks helped us to find the fatal bug. Thank you very much!
     
    ibompuis likes this.
  39. ibompuis

    ibompuis

    Joined:
    Sep 13, 2012
    Posts:
    100
    My pleasure :D
    First model works fine but my last model with only one class not work with last version, no detection, no error etc. didi you check my last wetransfer ?
     
  40. EnoxSoftware

    EnoxSoftware

    Joined:
    Oct 29, 2014
    Posts:
    1,557
    Here are the inference results for the only one class model I ran in my environment.
    YOLOv5_1calass_model_settings.JPG YOLOv5_1calass_model.JPG
    The output is the same as the inference by python in the original YOLOv5, so I think there is no mistake.

    Inference result by YOLOv5/detect.py:
    person.jpg
     
    Last edited: Feb 9, 2023
  41. ibompuis

    ibompuis

    Joined:
    Sep 13, 2012
    Posts:
    100
    Strange behaviour...

    you can see here on ultralytics I have result too, but if I try the image in unity no result

    01.jpg

    the image for the test:

    IMG_4046_conv (9).jpg
     
  42. ibompuis

    ibompuis

    Joined:
    Sep 13, 2012
    Posts:
    100
    Like I said, no result in unity with referenced, trained image

    02.jpg
     
  43. EnoxSoftware

    EnoxSoftware

    Joined:
    Oct 29, 2014
    Posts:
    1,557
    The inference results run on UnityEditor in my environment seem to be correct.
    YOLOv5_1calass_model_2.JPG

    Hmmm, is it possible that the script is not the latest version?

    Could you please let me know your Unity environment?
    OS:
    Unity Editor ver:
    OpenCVForUnity ver:
    YOLOv5WithOpenCVForUnityExample ver:
     
  44. ibompuis

    ibompuis

    Joined:
    Sep 13, 2012
    Posts:
    100
    Yeah don't understand, I try to figure out why this not work and why only for the bottle model...
    Code is based on your example strictly, just added some behaviour for REST image request

    OS: MAC OSX
    Unity Editor ver: 2021.3.5f1
    OpenCVForUnity ver: last version on AsseStore
    YOLOv5WithOpenCVForUnityExample ver: 1.0.2

    Please, find modified script I use below
     

    Attached Files:

    Last edited: Feb 9, 2023
  45. ibompuis

    ibompuis

    Joined:
    Sep 13, 2012
    Posts:
    100
    I started fresh project only with your assets Opencv and yolo 1.02 package and same problem...

    OS MONTEREY 12.1
    Nom du modèle : MacBook Pro
    Identifiant du modèle : MacBookPro16,1
    Nom du processeur : Intel Core i9 8 cœurs
    Vitesse du processeur : 2,3 GHz
    Nombre de processeurs : 1
    Nombre total de cœurs : 8
    Cachede niveau 2 (par cœur) : 256 Ko
    Cache de niveau 3 : 16 Mo
    Technologie Hyper-Threading : Activé
    Mémoire : 16 Go
    Version du programme interne du système : 1715.60.5.0.0 (iBridge: 19.16.10647.0.0,0)
    Version du chargeur de système d’exploitation : 540.60.2~89

    01.jpg
     
  46. ibompuis

    ibompuis

    Joined:
    Sep 13, 2012
    Posts:
    100
    And now same new project with other model working: Creazy thing :confused::confused::confused:

    02.jpg
     
  47. EnoxSoftware

    EnoxSoftware

    Joined:
    Oct 29, 2014
    Posts:
    1,557
    Here is the result of creating a new project on my Mac and testing it.
    スクリーンショット 2023-02-10 3.47.55.png

    my environment:
    Model name: MacBook Pro
    Model ID: MacBookPro12,1
    Processor Name: Dual Core Intel Core i5
    Processor speed: 2.7GHz
    Number of processors: 1
    Total number of cores: 2
    Level 2 cache (per core): 256 KB
    Tertiary cache: 3 MB
    Hyper-Threading Technology: Enabled
    Memory: 8GB
    System Firmware Version: 430.140.3.0.0
    OS loader version: 540.120.3~19
    SMC version (system): 2.28f7
     
  48. ibompuis

    ibompuis

    Joined:
    Sep 13, 2012
    Posts:
    100
    I realy don't understand...
    Did you see something wrong on this : [RELEASED] OpenCV for Unity
     
    Last edited: Feb 9, 2023
  49. ibompuis

    ibompuis

    Joined:
    Sep 13, 2012
    Posts:
    100
    After some test it's appear that:

    Code (CSharp):
    1. Mat img = Imgcodecs.imread(Application.persistentDataPath+tempFileName);
    not working but this working

    Code (CSharp):
    1. string testImg = Utils.getFilePath("_OD/" + "test.jpg");
    2.         Mat img = Imgcodecs.imread(testImg);
    When I debug "Application.persistentDataPath+tempFileName" the file is present on directory

    Why this working with all image I use for the other model and not here ?
    all my image is .jpg
    It's just crazy :confused::confused::confused:


    Code (CSharp):
    1. Utils.setDebugMode(true);
    2.  
    3.         objectDetector = new YOLOv5ObjectDetector(request, result, model_filepath, classes_filepath, new Size(inpWidth, inpHeight), confThreshold, nmsThreshold, topK);
    4.  
    5.         //Mat img = Imgcodecs.imread(Application.persistentDataPath+tempFileName);
    6.         Debug.Log(Application.persistentDataPath+tempFileName);
    7.        
    8.         // TEST
    9.         string testImg = Utils.getFilePath("_OD/" + "test.jpg");
    10.         Mat img = Imgcodecs.imread(testImg);
    11.        
    12.         TickMeter tm = new TickMeter();
    13.         tm.start();
    14.  
    15.         Mat results = objectDetector.infer(img);
     
  50. ibompuis

    ibompuis

    Joined:
    Sep 13, 2012
    Posts:
    100
    @EnoxSoftware

    I found the problem:

    if I put physicaly the .jpg file on persistentDataPath and use

    Code (CSharp):
    1. Mat img = Imgcodecs.imread(Application.persistentDataPath+"/test.jpg");
    detection work fine.

    but when I use:

    Code (CSharp):
    1. byte[] bytes = imgTexture.EncodeToJPG();
    2. File.WriteBytes(tempFileName, bytes);
    File is created on persistentDataPath but detection not working

    But again, this only happen with jpg from the second model... all images from first model works fine :confused::confused::confused:
    The only difference between images is size, first model jpg image size is 4032 × 3024 and second model image size is 640 × 853
    :(