From d0e8bf65dd351fadadcc23f01dbf00d9f988e12f Mon Sep 17 00:00:00 2001 From: rainyl Date: Sun, 19 Jan 2025 18:59:33 +0800 Subject: [PATCH] finish synchrous functions --- packages/dartcv/lib/calib3d.dart | 1 + packages/dartcv/lib/src/calib3d/calib3d.dart | 1707 ++++++++++-- .../dartcv/lib/src/calib3d/calib3d_async.dart | 889 ++++-- packages/dartcv/lib/src/calib3d/fisheye.dart | 463 +++- .../dartcv/lib/src/calib3d/usac_params.dart | 132 + packages/dartcv/lib/src/core/point.dart | 35 + packages/dartcv/lib/src/g/calib3d.g.dart | 2449 +++++++++++++++-- packages/dartcv/lib/src/g/calib3d.yaml | 116 + packages/dartcv/lib/src/g/constants.g.dart | 84 - packages/dartcv/lib/src/g/core.g.dart | 15 + packages/dartcv/lib/src/g/core.yaml | 2 + packages/dartcv/lib/src/g/types.g.dart | 52 + packages/dartcv/lib/src/g/types.yaml | 10 + packages/dartcv/lib/src/g/videoio.g.dart | 35 + packages/dartcv/lib/src/g/videoio.yaml | 4 + packages/dartcv/lib/src/videoio/videoio.dart | 51 + .../calib3d_async_test.dart} | 0 .../test/calib3d/calib3d_fisheye_test.dart | 252 ++ .../dartcv/test/calib3d/calib3d_test.dart | 599 ++++ 19 files changed, 6179 insertions(+), 717 deletions(-) create mode 100644 packages/dartcv/lib/src/calib3d/usac_params.dart rename packages/dartcv/test/{calib3d_test.dart => calib3d/calib3d_async_test.dart} (100%) create mode 100644 packages/dartcv/test/calib3d/calib3d_fisheye_test.dart create mode 100644 packages/dartcv/test/calib3d/calib3d_test.dart diff --git a/packages/dartcv/lib/calib3d.dart b/packages/dartcv/lib/calib3d.dart index a741b7bf..9f653974 100644 --- a/packages/dartcv/lib/calib3d.dart +++ b/packages/dartcv/lib/calib3d.dart @@ -7,3 +7,4 @@ library dartcv.calib3d; export 'src/calib3d/calib3d.dart'; export 'src/calib3d/calib3d_async.dart'; export 'src/calib3d/fisheye.dart'; +export 'src/calib3d/usac_params.dart'; diff --git a/packages/dartcv/lib/src/calib3d/calib3d.dart b/packages/dartcv/lib/src/calib3d/calib3d.dart index ede79450..cbe44aec 100644 --- a/packages/dartcv/lib/src/calib3d/calib3d.dart +++ b/packages/dartcv/lib/src/calib3d/calib3d.dart @@ -10,81 +10,22 @@ import 'package:ffi/ffi.dart'; import '../core/base.dart'; import '../core/contours.dart'; +import '../core/cv_vec.dart'; import '../core/mat.dart'; import '../core/point.dart'; import '../core/rect.dart'; +import '../core/scalar.dart'; import '../core/size.dart'; import '../core/termcriteria.dart'; import '../g/constants.g.dart'; import '../g/types.g.dart' as cvg; import '../native_lib.dart' show ccalib3d; +import 'usac_params.dart'; -/// InitUndistortRectifyMap computes the joint undistortion and rectification transformation and represents the result in the form of maps for remap -/// -/// For further details, please see: -/// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga7dfb72c9cf9780a347fbe3d1c47e5d5a -(Mat map1, Mat map2) initUndistortRectifyMap( - InputArray cameraMatrix, - InputArray distCoeffs, - InputArray R, - InputArray newCameraMatrix, - (int, int) size, - int m1type, { - OutputArray? map1, - OutputArray? map2, -}) { - map1 ??= Mat.empty(); - map2 ??= Mat.empty(); - cvRun( - () => ccalib3d.cv_initUndistortRectifyMap( - cameraMatrix.ref, - distCoeffs.ref, - R.ref, - newCameraMatrix.ref, - size.cvd.ref, - m1type, - map1!.ref, - map2!.ref, - ffi.nullptr, - ), - ); - return (map1, map2); -} - -/// GetOptimalNewCameraMatrixWithParams computes and returns the optimal new camera matrix based on the free scaling parameter. +/// CalibrateCamera finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern. /// /// For further details, please see: -/// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga7a6c4e032c97f03ba747966e6ad862b1 -(Mat rval, Rect validPixROI) getOptimalNewCameraMatrix( - InputArray cameraMatrix, - InputArray distCoeffs, - (int, int) imageSize, - double alpha, { - (int, int) newImgSize = (0, 0), - bool centerPrincipalPoint = false, -}) { - final validPixROI = calloc(); - final rval = Mat.empty(); - cvRun( - () => ccalib3d.cv_getOptimalNewCameraMatrix( - cameraMatrix.ref, - distCoeffs.ref, - imageSize.cvd.ref, - alpha, - newImgSize.cvd.ref, - validPixROI, - centerPrincipalPoint, - rval.ptr, - ffi.nullptr, - ), - ); - return (rval, Rect.fromPointer(validPixROI)); -} - -// CalibrateCamera finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern. -// -// For further details, please see: -// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga3207604e4b1a1758aa66acb6ed5aa65d +/// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga3207604e4b1a1758aa66acb6ed5aa65d (double rmsErr, Mat cameraMatrix, Mat distCoeffs, Mat rvecs, Mat tvecs) calibrateCamera( Contours3f objectPoints, Contours2f imagePoints, @@ -120,150 +61,148 @@ import '../native_lib.dart' show ccalib3d; return (rmsErr, cameraMatrix, distCoeffs, rvecs, tvecs); } -// Transforms an image to compensate for lens distortion. -// The function transforms an image to compensate radial and tangential lens distortion. -// The function is simply a combination of initUndistortRectifyMap (with unity R ) and remap (with bilinear interpolation). See the former function for details of the transformation being performed. -// Those pixels in the destination image, for which there is no correspondent pixels in the source image, are filled with zeros (black color). -// A particular subset of the source image that will be visible in the corrected image can be regulated by newCameraMatrix. You can use getOptimalNewCameraMatrix to compute the appropriate newCameraMatrix depending on your requirements. -// The camera matrix and the distortion parameters can be determined using calibrateCamera. If the resolution of images is different from the resolution used at the calibration stage, fx,fy,cx and cy need to be scaled accordingly, while the distortion coefficients remain the same. -Mat undistort( - InputArray src, - InputArray cameraMatrix, - InputArray distCoeffs, { - OutputArray? dst, - InputArray? newCameraMatrix, -}) { +/// https://docs.opencv.org/4.11.0/d9/d0c/group__calib3d.html#gacd8162cfd39138d0bc29e4b53d080673 +bool checkChessboard(Mat img, Size size) => ccalib3d.cv_checkChessboard(img.ref, size.ref); + +/// For points in an image of a stereo pair, computes the corresponding epilines in the other image. +/// +/// https://docs.opencv.org/4.11.0/d9/d0c/group__calib3d.html#ga19e3401c94c44b47c229be6e51d158b7 +Mat computeCorrespondEpilines(InputArray points, int whichImage, InputArray F, {OutputArray? lines}) { + lines ??= Mat.empty(); + cvRun(() => ccalib3d.cv_computeCorrespondEpilines(points.ref, whichImage, F.ref, lines!.ref, ffi.nullptr)); + return lines; +} + +Mat convertPointsFromHomogeneous(InputArray src, {OutputArray? dst}) { dst ??= Mat.empty(); - newCameraMatrix ??= Mat.empty(); - cvRun( - () => ccalib3d.cv_undistort( - src.ref, - dst!.ref, - cameraMatrix.ref, - distCoeffs.ref, - newCameraMatrix!.ref, - ffi.nullptr, - ), - ); + cvRun(() => ccalib3d.cv_convertPointsFromHomogeneous(src.ref, dst!.ref, ffi.nullptr)); return dst; } -// UndistortPoints transforms points to compensate for lens distortion -// -// For further details, please see: -// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga55c716492470bfe86b0ee9bf3a1f0f7e -Mat undistortPoints( - InputArray src, - InputArray cameraMatrix, - InputArray distCoeffs, { - OutputArray? dst, - InputArray? R, - InputArray? P, - (int type, int count, double eps) criteria = (TERM_COUNT + TERM_EPS, 30, 1e-4), -}) { - R ??= Mat.empty(); - P ??= Mat.empty(); +Mat convertPointsHomogeneous(InputArray src, {OutputArray? dst}) { dst ??= Mat.empty(); - final tc = criteria.cvd; + cvRun(() => ccalib3d.cv_convertPointsHomogeneous(src.ref, dst!.ref, ffi.nullptr)); + return dst; +} + +// void cv::convertPointsToHomogeneous (InputArray src, OutputArray dst); +Mat convertPointsToHomogeneous(InputArray src, {OutputArray? dst}) { + dst ??= Mat.empty(); + cvRun(() => ccalib3d.cv_convertPointsToHomogeneous(src.ref, dst!.ref, ffi.nullptr)); + return dst; +} + +// void cv::correctMatches (InputArray F, InputArray points1, InputArray points2, OutputArray newPoints1, OutputArray newPoints2); +(Mat newPoints1, Mat newPoints2) correctMatches( + Mat F, + InputArray points1, + InputArray points2, { + OutputArray? newPoints1, + OutputArray? newPoints2, +}) { + newPoints1 ??= Mat.empty(); + newPoints2 ??= Mat.empty(); cvRun( - () => ccalib3d.cv_undistortPoints( - src.ref, - dst!.ref, - cameraMatrix.ref, - distCoeffs.ref, - R!.ref, - P!.ref, - tc.ref, + () => ccalib3d.cv_correctMatches( + F.ref, + points1.ref, + points2.ref, + newPoints1!.ref, + newPoints2!.ref, ffi.nullptr, ), ); - return dst; + return (newPoints1, newPoints2); } -// FindChessboardCorners finds the positions of internal corners of the chessboard. -// -// For further details, please see: -// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga93efa9b0aa890de240ca32b11253dd4a -(bool success, VecPoint2f corners) findChessboardCorners( - InputArray image, - (int, int) patternSize, { - VecPoint2f? corners, - int flags = CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE, +// void cv::decomposeEssentialMat (InputArray E, OutputArray R1, OutputArray R2, OutputArray t); +(Mat r1, Mat r2, Mat t) decomposeEssentialMat( + Mat E, { + OutputArray? R1, + OutputArray? R2, + OutputArray? t, }) { - corners ??= VecPoint2f(); - final r = calloc(); + R1 ??= Mat.empty(); + R2 ??= Mat.empty(); + t ??= Mat.empty(); cvRun( - () => ccalib3d.cv_findChessboardCorners( - image.ref, - patternSize.cvd.ref, - corners!.ptr, - flags, - r, + () => ccalib3d.cv_decomposeEssentialMat( + E.ref, + R1!.ref, + R2!.ref, + t!.ref, ffi.nullptr, ), ); - final rval = r.value; - calloc.free(r); - return (rval, corners); + return (R1, R2, t); } -// Finds the positions of internal corners of the chessboard using a sector based approach. -// https://docs.opencv.org/4.x/d9/d0c/group__calib3d.html#gadc5bcb05cb21cf1e50963df26986d7c9 -(bool, VecPoint2f corners) findChessboardCornersSB( - InputArray image, - (int, int) patternSize, { - int flags = 0, - VecPoint2f? corners, +// int cv::decomposeHomographyMat (InputArray H, InputArray K, OutputArrayOfArrays rotations, OutputArrayOfArrays translations, OutputArrayOfArrays normals) +(int rval, VecMat rotations, VecMat translations, VecMat normals) decomposeHomographyMat( + Mat H, + Mat K, { + VecMat? rotations, + VecMat? translations, + VecMat? normals, }) { - final corners = VecPoint2f(); - final p = calloc(); + rotations ??= VecMat(); + translations ??= VecMat(); + normals ??= VecMat(); + final prval = calloc(); cvRun( - () => ccalib3d.cv_findChessboardCornersSB( - image.ref, - patternSize.toSize().ref, - corners.ptr, - flags, - p, + () => ccalib3d.cv_decomposeHomographyMat( + H.ref, + K.ref, + rotations!.ref, + translations!.ref, + normals!.ref, + prval, ffi.nullptr, ), ); - final rval = p.value; - calloc.free(p); - return (rval, corners); + final rval = prval.value; + calloc.free(prval); + return (rval, rotations, translations, normals); } -// Finds the positions of internal corners of the chessboard using a sector based approach. -// https://docs.opencv.org/4.x/d9/d0c/group__calib3d.html#gadc5bcb05cb21cf1e50963df26986d7c9 -(bool, VecPoint2f corners, Mat meta) findChessboardCornersSBWithMeta( - InputArray image, - (int, int) patternSize, - int flags, { - VecPoint2f? corners, - OutputArray? meta, +// void cv::decomposeProjectionMatrix (InputArray projMatrix, OutputArray cameraMatrix, OutputArray rotMatrix, OutputArray transVect, OutputArray rotMatrixX=noArray(), OutputArray rotMatrixY=noArray(), OutputArray rotMatrixZ=noArray(), OutputArray eulerAngles=noArray()) +(Mat cameraMatrix, Mat rotMatrix, Mat transVect) decomposeProjectionMatrix( + Mat projMatrix, { + OutputArray? cameraMatrix, + OutputArray? rotMatrix, + OutputArray? transVect, + OutputArray? rotMatrixX, + OutputArray? rotMatrixY, + OutputArray? rotMatrixZ, + OutputArray? eulerAngles, }) { - corners ??= VecPoint2f(); - meta ??= Mat.empty(); - final b = calloc(); + cameraMatrix ??= Mat.empty(); + rotMatrix ??= Mat.empty(); + transVect ??= Mat.empty(); + rotMatrixX ??= Mat.empty(); + rotMatrixY ??= Mat.empty(); + rotMatrixZ ??= Mat.empty(); + eulerAngles ??= Mat.empty(); cvRun( - () => ccalib3d.cv_findChessboardCornersSB_1( - image.ref, - patternSize.cvd.ref, - corners!.ptr, - flags, - meta!.ref, - b, + () => ccalib3d.cv_decomposeProjectionMatrix( + projMatrix.ref, + cameraMatrix!.ref, + rotMatrix!.ref, + transVect!.ref, + rotMatrixX!.ref, + rotMatrixY!.ref, + rotMatrixZ!.ref, + eulerAngles!.ref, ffi.nullptr, ), ); - final rval = b.value; - calloc.free(b); - return (rval, corners, meta); + return (cameraMatrix, rotMatrix, transVect); } -// DrawChessboardCorners renders the detected chessboard corners. -// -// For further details, please see: -// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga6a10b0bb120c4907e5eabbcd22319022 +/// DrawChessboardCorners renders the detected chessboard corners. +/// +/// For further details, please see: +/// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga6a10b0bb120c4907e5eabbcd22319022 Mat drawChessboardCorners( InputOutputArray image, (int, int) patternSize, @@ -282,11 +221,38 @@ Mat drawChessboardCorners( return image; } -// EstimateAffinePartial2D computes an optimal limited affine transformation -// with 4 degrees of freedom between two 2D point sets. -// -// For further details, please see: -// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#gad767faff73e9cbd8b9d92b955b50062d +/// Draw axes of the world/object coordinate system from pose estimation. +/// +/// For further details, please see: +/// https://docs.opencv.org/4.11.0/d9/d0c/group__calib3d.html#gab3ab7bb2bdfe7d5d9745bb92d13f9564 +void drawFrameAxes( + Mat image, + Mat cameraMatrix, + Mat distCoeffs, + Mat rvec, + Mat tvec, + double length, { + int thickness = 3, +}) { + return cvRun( + () => ccalib3d.cv_drawFrameAxes( + image.ref, + cameraMatrix.ref, + distCoeffs.ref, + rvec.ref, + tvec.ref, + length, + thickness, + ffi.nullptr, + ), + ); +} + +/// EstimateAffinePartial2D computes an optimal limited affine transformation +/// with 4 degrees of freedom between two 2D point sets. +/// +/// For further details, please see: +/// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#gad767faff73e9cbd8b9d92b955b50062d (Mat, Mat inliers) estimateAffinePartial2D( VecPoint2f from, VecPoint2f to, { @@ -316,10 +282,10 @@ Mat drawChessboardCorners( return (rval, inliers); } -// EstimateAffine2D Computes an optimal affine transformation between two 2D point sets. -// -// For further details, please see: -// https://docs.opencv.org/4.0.0/d9/d0c/group__calib3d.html#ga27865b1d26bac9ce91efaee83e94d4dd +/// EstimateAffine2D Computes an optimal affine transformation between two 2D point sets. +/// +/// For further details, please see: +/// https://docs.opencv.org/4.0.0/d9/d0c/group__calib3d.html#ga27865b1d26bac9ce91efaee83e94d4dd (Mat, Mat inliers) estimateAffine2D( VecPoint2f from, VecPoint2f to, { @@ -349,33 +315,1360 @@ Mat drawChessboardCorners( return (rval, inliers); } -/// FindHomography finds an optimal homography matrix using 4 or more point pairs (as opposed to GetPerspectiveTransform, which uses exactly 4) +/// Computes an optimal affine transformation between two 3D point sets. /// -/// For further details, please see: -/// https:///docs.opencv.org/master/d9/d0c/group__calib3d.html#ga4abc2ece9fab9398f2e560d53c8c9780 -Mat findHomography( - InputArray srcPoints, - InputArray dstPoints, { - int method = 0, - double ransacReprojThreshold = 3, - OutputArray? mask, - int maxIters = 2000, - double confidence = 0.995, +/// https://docs.opencv.org/4.11.0/d9/d0c/group__calib3d.html#gac12d1f05b3bb951288e7250713ce98f0 +(int rval, Mat, Mat inliers) estimateAffine3D( + Mat src, + Mat dst, { + Mat? out, + Mat? inliers, + double ransacThreshold = 3, + double confidence = 0.99, }) { - mask ??= Mat.empty(); - final mat = Mat.empty(); + out ??= Mat.empty(); + inliers ??= Mat.empty(); + final p = calloc(); cvRun( - () => ccalib3d.cv_findHomography( - srcPoints.ref, - dstPoints.ref, - method, - ransacReprojThreshold, - mask!.ref, - maxIters, + () => ccalib3d.cv_estimateAffine3D_1( + src.ref, + dst.ref, + out!.ref, + inliers!.ref, + ransacThreshold, confidence, - mat.ptr, + p, ffi.nullptr, ), ); - return mat; + final rval = p.value; + calloc.free(p); + return (rval, out, inliers); +} + +// Scalar cv::estimateChessboardSharpness (InputArray image, Size patternSize, InputArray corners, float rise_distance=0.8F, bool vertical=false, OutputArray sharpness=noArray()) +Scalar estimateChessboardSharpness( + InputArray image, + (int, int) patternSize, + InputArray corners, { + double riseDistance = 0.8, + bool vertical = false, + OutputArray? sharpness, +}) { + sharpness ??= Mat.empty(); + final prval = calloc(); + cvRun( + () => ccalib3d.cv_estimateChessboardSharpness( + image.ref, + patternSize.cvd.ref, + corners.ref, + riseDistance, + vertical, + sharpness!.ref, + prval, + ffi.nullptr, + ), + ); + return Scalar.fromPointer(prval); } + +// int cv::estimateTranslation3D (InputArray src, InputArray dst, OutputArray out, OutputArray inliers, double ransacThreshold=3, double confidence=0.99) +(int rval, Mat out, Mat inliers) estimateTranslation3D( + InputArray src, + InputArray dst, { + OutputArray? out, + OutputArray? inliers, + double ransacThreshold = 3, + double confidence = 0.99, +}) { + out ??= Mat.empty(); + inliers ??= Mat.empty(); + final prval = calloc(); + cvRun( + () => ccalib3d.cv_estimateTranslation3D( + src.ref, + dst.ref, + out!.ref, + inliers!.ref, + ransacThreshold, + confidence, + prval, + ffi.nullptr, + ), + ); + return (prval.value, out, inliers); +} + +// void cv::filterHomographyDecompByVisibleRefpoints (InputArrayOfArrays rotations, InputArrayOfArrays normals, InputArray beforePoints, InputArray afterPoints, OutputArray possibleSolutions, InputArray pointsMask=noArray()) +Mat filterHomographyDecompByVisibleRefpoints( + VecMat rotations, + VecMat normals, + InputArray beforePoints, + InputArray afterPoints, { + OutputArray? possibleSolutions, + InputArray? pointsMask, +}) { + possibleSolutions ??= Mat.empty(); + pointsMask ??= Mat.empty(); + cvRun( + () => ccalib3d.cv_filterHomographyDecompByVisibleRefpoints( + rotations.ref, + normals.ref, + beforePoints.ref, + afterPoints.ref, + possibleSolutions!.ref, + pointsMask!.ref, + ffi.nullptr, + ), + ); + return possibleSolutions; +} + +// void cv::filterSpeckles (InputOutputArray img, double newVal, int maxSpeckleSize, double maxDiff, InputOutputArray buf=noArray()) +void filterSpeckles( + InputOutputArray img, + double newVal, + int maxSpeckleSize, + double maxDiff, { + OutputArray? buf, +}) { + buf ??= Mat.empty(); + return cvRun( + () => ccalib3d.cv_filterSpeckles( + img.ref, + newVal, + maxSpeckleSize, + maxDiff, + buf!.ref, + ffi.nullptr, + ), + ); +} + +// bool cv::find4QuadCornerSubpix (InputArray img, InputOutputArray corners, Size region_size) +bool find4QuadCornerSubpix( + InputArray img, + InputOutputArray corners, + (int, int) regionSize, +) { + final prval = calloc(); + cvRun( + () => ccalib3d.cv_find4QuadCornerSubpix( + img.ref, + corners.ref, + regionSize.cvd.ref, + prval, + ffi.nullptr, + ), + ); + final rval = prval.value; + calloc.free(prval); + return rval; +} + +/// FindChessboardCorners finds the positions of internal corners of the chessboard. +/// +/// For further details, please see: +/// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga93efa9b0aa890de240ca32b11253dd4a +(bool success, VecPoint2f corners) findChessboardCorners( + InputArray image, + (int, int) patternSize, { + VecPoint2f? corners, + int flags = CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE, +}) { + corners ??= VecPoint2f(); + final r = calloc(); + cvRun( + () => ccalib3d.cv_findChessboardCorners( + image.ref, + patternSize.cvd.ref, + corners!.ptr, + flags, + r, + ffi.nullptr, + ), + ); + final rval = r.value; + calloc.free(r); + return (rval, corners); +} + +/// Finds the positions of internal corners of the chessboard using a sector based approach. +/// +/// https://docs.opencv.org/4.x/d9/d0c/group__calib3d.html#gadc5bcb05cb21cf1e50963df26986d7c9 +(bool, VecPoint2f corners) findChessboardCornersSB( + InputArray image, + (int, int) patternSize, { + int flags = 0, + VecPoint2f? corners, +}) { + final corners = VecPoint2f(); + final p = calloc(); + cvRun( + () => ccalib3d.cv_findChessboardCornersSB( + image.ref, + patternSize.toSize().ref, + corners.ptr, + flags, + p, + ffi.nullptr, + ), + ); + final rval = p.value; + calloc.free(p); + return (rval, corners); +} + +/// Finds the positions of internal corners of the chessboard using a sector based approach. +/// +/// https://docs.opencv.org/4.x/d9/d0c/group__calib3d.html#gadc5bcb05cb21cf1e50963df26986d7c9 +(bool, VecPoint2f corners, Mat meta) findChessboardCornersSBWithMeta( + InputArray image, + (int, int) patternSize, + int flags, { + VecPoint2f? corners, + OutputArray? meta, +}) { + corners ??= VecPoint2f(); + meta ??= Mat.empty(); + final b = calloc(); + cvRun( + () => ccalib3d.cv_findChessboardCornersSB_1( + image.ref, + patternSize.cvd.ref, + corners!.ptr, + flags, + meta!.ref, + b, + ffi.nullptr, + ), + ); + final rval = b.value; + calloc.free(b); + return (rval, corners, meta); +} + +// bool cv::findCirclesGrid (InputArray image, Size patternSize, OutputArray centers, int flags=CALIB_CB_SYMMETRIC_GRID, const Ptr< FeatureDetector > &blobDetector=SimpleBlobDetector::create()) +(bool, Mat) findCirclesGrid( + InputArray image, + Size patternSize, { + int flags = CALIB_CB_SYMMETRIC_GRID, + OutputArray? centers, +}) { + centers ??= Mat.empty(); + final prval = calloc(); + cvRun( + () => ccalib3d.cv_findCirclesGrid( + image.ref, + patternSize.ref, + centers!.ref, + flags, + prval, + ffi.nullptr, + ), + ); + final rval = prval.value; + calloc.free(prval); + return (rval, centers); +} + +// Mat cv::findEssentialMat (InputArray points1, InputArray points2, double focal=1.0, Point2d pp=Point2d(0, 0), int method=RANSAC, double prob=0.999, double threshold=1.0, int maxIters=1000, OutputArray mask=noArray()) +(Mat rval, Mat mask) findEssentialMat( + InputArray points1, + InputArray points2, { + double focal = 1.0, + Point2d? pp, + int method = RANSAC, + double prob = 0.999, + double threshold = 1.0, + int maxIters = 1000, + OutputArray? mask, +}) { + mask ??= Mat.empty(); + final prval = calloc(); + pp ??= Point2d(0, 0); + cvRun( + () => ccalib3d.cv_findEssentialMat( + points1.ref, + points2.ref, + focal, + pp!.ref, + method, + prob, + threshold, + maxIters, + mask!.ref, + prval, + ffi.nullptr, + ), + ); + return (Mat.fromPointer(prval), mask); +} + +// Mat cv::findEssentialMat (InputArray points1, InputArray points2, InputArray cameraMatrix, int method=RANSAC, double prob=0.999, double threshold=1.0, int maxIters=1000, OutputArray mask=noArray()) +Mat findEssentialMatCameraMatrix( + InputArray points1, + InputArray points2, + InputArray cameraMatrix, { + int method = RANSAC, + double prob = 0.999, + double threshold = 1.0, + int maxIters = 1000, + OutputArray? mask, +}) { + mask ??= Mat.empty(); + final prval = calloc(); + cvRun( + () => ccalib3d.cv_findEssentialMat_1( + points1.ref, + points2.ref, + cameraMatrix.ref, + method, + prob, + threshold, + maxIters, + mask!.ref, + prval, + ffi.nullptr, + ), + ); + return Mat.fromPointer(prval); +} + +// Mat cv::findFundamentalMat (InputArray points1, InputArray points2, int method=FM_RANSAC, double ransacReprojThreshold=3., double confidence=0.99, OutputArray mask=noArray()) +Mat findFundamentalMat( + InputArray points1, + InputArray points2, { + int method = FM_RANSAC, + double ransacReprojThreshold = 3, + double confidence = 0.99, + int maxIters = 1000, + OutputArray? mask, +}) { + mask ??= Mat.empty(); + final prval = calloc(); + cvRun( + () => ccalib3d.cv_findFundamentalMat( + points1.ref, + points2.ref, + method, + ransacReprojThreshold, + confidence, + maxIters, + mask!.ref, + prval, + ffi.nullptr, + ), + ); + return Mat.fromPointer(prval); +} + +// Mat cv::findFundamentalMat (InputArray points1, InputArray points2, OutputArray mask, const UsacParams ¶ms) +Mat findFundamentalMatByUsacParams( + InputArray points1, + InputArray points2, + UsacParams params, { + OutputArray? mask, +}) { + mask ??= Mat.empty(); + final prval = calloc(); + cvRun( + () => ccalib3d.cv_findFundamentalMat_1( + points1.ref, + points2.ref, + mask!.ref, + params.ref, + prval, + ffi.nullptr, + ), + ); + return Mat.fromPointer(prval); +} + +/// FindHomography finds an optimal homography matrix using 4 or more point pairs (as opposed to GetPerspectiveTransform, which uses exactly 4) +/// +/// For further details, please see: +/// https:///docs.opencv.org/master/d9/d0c/group__calib3d.html#ga4abc2ece9fab9398f2e560d53c8c9780 +Mat findHomography( + InputArray srcPoints, + InputArray dstPoints, { + int method = 0, + double ransacReprojThreshold = 3, + OutputArray? mask, + int maxIters = 2000, + double confidence = 0.995, +}) { + mask ??= Mat.empty(); + final mat = Mat.empty(); + cvRun( + () => ccalib3d.cv_findHomography( + srcPoints.ref, + dstPoints.ref, + method, + ransacReprojThreshold, + mask!.ref, + maxIters, + confidence, + mat.ptr, + ffi.nullptr, + ), + ); + return mat; +} + +// Mat cv::findHomography (InputArray srcPoints, InputArray dstPoints, OutputArray mask, const UsacParams ¶ms) +Mat findHomographyUsac( + InputArray srcPoints, + InputArray dstPoints, + UsacParams params, { + OutputArray? mask, +}) { + mask ??= Mat.empty(); + final prval = calloc(); + cvRun( + () => ccalib3d.cv_findHomography_1( + srcPoints.ref, + dstPoints.ref, + mask!.ref, + params.ref, + prval, + ffi.nullptr, + ), + ); + return Mat.fromPointer(prval); +} + +/// Returns the default new camera matrix. +/// +/// The function returns the camera matrix that is either an exact copy of the input cameraMatrix (when centerPrinicipalPoint=false ), +/// or the modified one (when centerPrincipalPoint=true). +/// +/// https://docs.opencv.org/4.11.0/d9/d0c/group__calib3d.html#ga744529385e88ef7bc841cbe04b35bfbf +Mat getDefaultNewCameraMatrix(InputArray cameraMatrix, {Size? imgsize, bool centerPrincipalPoint = false}) { + final prval = calloc(); + imgsize ??= Size(0, 0); + cvRun( + () => ccalib3d.cv_getDefaultNewCameraMatrix( + cameraMatrix.ref, + imgsize!.ref, + centerPrincipalPoint, + prval, + ffi.nullptr, + ), + ); + return Mat.fromPointer(prval); +} + +/// GetOptimalNewCameraMatrixWithParams computes and returns the optimal new camera matrix based on the free scaling parameter. +/// +/// For further details, please see: +/// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga7a6c4e032c97f03ba747966e6ad862b1 +(Mat rval, Rect validPixROI) getOptimalNewCameraMatrix( + InputArray cameraMatrix, + InputArray distCoeffs, + (int, int) imageSize, + double alpha, { + (int, int) newImgSize = (0, 0), + bool centerPrincipalPoint = false, +}) { + final validPixROI = calloc(); + final rval = Mat.empty(); + cvRun( + () => ccalib3d.cv_getOptimalNewCameraMatrix( + cameraMatrix.ref, + distCoeffs.ref, + imageSize.cvd.ref, + alpha, + newImgSize.cvd.ref, + validPixROI, + centerPrincipalPoint, + rval.ptr, + ffi.nullptr, + ), + ); + return (rval, Rect.fromPointer(validPixROI)); +} + +/// InitUndistortRectifyMap computes the joint undistortion and rectification transformation and represents the result in the form of maps for remap +/// +/// For further details, please see: +/// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga7dfb72c9cf9780a347fbe3d1c47e5d5a +(Mat map1, Mat map2) initUndistortRectifyMap( + InputArray cameraMatrix, + InputArray distCoeffs, + InputArray R, + InputArray newCameraMatrix, + (int, int) size, + int m1type, { + OutputArray? map1, + OutputArray? map2, +}) { + map1 ??= Mat.empty(); + map2 ??= Mat.empty(); + cvRun( + () => ccalib3d.cv_initUndistortRectifyMap( + cameraMatrix.ref, + distCoeffs.ref, + R.ref, + newCameraMatrix.ref, + size.cvd.ref, + m1type, + map1!.ref, + map2!.ref, + ffi.nullptr, + ), + ); + return (map1, map2); +} + +/// initializes maps for remap for wide-angle +/// +/// https://docs.opencv.org/4.11.0/d9/d0c/group__calib3d.html#ga9185f4fbe1ad74af2c56a392393cf9f4 +(double rval, Mat map1, Mat map2) initWideAngleProjMap( + InputArray cameraMatrix, + InputArray distCoeffs, + Size imageSize, + int destImageWidth, + int m1type, { + OutputArray? map1, + OutputArray? map2, + int projType = PROJ_SPHERICAL_EQRECT, + double alpha = 0, +}) { + map1 ??= Mat.empty(); + map2 ??= Mat.empty(); + final prval = calloc(); + cvRun( + () => ccalib3d.cv_initWideAngleProjMap( + cameraMatrix.ref, + distCoeffs.ref, + imageSize.ref, + destImageWidth, + m1type, + map1!.ref, + map2!.ref, + projType, + alpha, + prval, + ffi.nullptr, + ), + ); + final rval = prval.value; + calloc.free(prval); + return (rval, map1, map2); +} + +/// Computes partial derivatives of the matrix product for each multiplied matrix. +/// +/// https://docs.opencv.org/4.11.0/d9/d0c/group__calib3d.html#ga905541c1275852eabff7dbdfbc10d160 +(Mat dABdA, Mat dABdB) matMulDeriv(InputArray A, InputArray B, {OutputArray? dABdA, OutputArray? dABdB}) { + dABdA ??= Mat.empty(); + dABdB ??= Mat.empty(); + cvRun( + () => ccalib3d.cv_matMulDeriv( + A.ref, + B.ref, + dABdA!.ref, + dABdB!.ref, + ffi.nullptr, + ), + ); + return (dABdA, dABdB); +} + +/// Projects 3D points to an image plane. +/// +/// https://docs.opencv.org/4.11.0/d9/d0c/group__calib3d.html#ga1019495a2c8d1743ed5cc23fa0daff8c +(Mat imagePoints, Mat jacobian) projectPoints( + InputArray objectPoints, + InputArray rvec, + InputArray tvec, + InputArray cameraMatrix, + InputArray distCoeffs, { + OutputArray? imagePoints, + OutputArray? jacobian, + double aspectRatio = 0, +}) { + imagePoints ??= Mat.empty(); + jacobian ??= Mat.empty(); + cvRun( + () => ccalib3d.cv_projectPoints( + objectPoints.ref, + rvec.ref, + tvec.ref, + cameraMatrix.ref, + distCoeffs.ref, + imagePoints!.ref, + jacobian!.ref, + aspectRatio, + ffi.nullptr, + ), + ); + return (imagePoints, jacobian); +} + +// int cv::recoverPose (InputArray E, InputArray points1, InputArray points2, InputArray cameraMatrix, OutputArray R, OutputArray t, double distanceThresh, InputOutputArray mask=noArray(), OutputArray triangulatedPoints=noArray()) +(int rval, Mat R, Mat t, Mat triangulatedPoints) recoverPoseWithCameraMatrix( + InputArray E, + InputArray points1, + InputArray points2, + InputArray cameraMatrix, { + OutputArray? R, + OutputArray? t, + double distanceThresh = 1, + InputOutputArray? mask, + OutputArray? triangulatedPoints, +}) { + R ??= Mat.empty(); + t ??= Mat.empty(); + mask ??= Mat.empty(); + triangulatedPoints ??= Mat.empty(); + final prval = calloc(); + cvRun( + () => ccalib3d.cv_recoverPose( + E.ref, + points1.ref, + points2.ref, + cameraMatrix.ref, + R!.ref, + t!.ref, + distanceThresh, + mask!.ref, + triangulatedPoints!.ref, + prval, + ffi.nullptr, + ), + ); + final rval = prval.value; + calloc.free(prval); + return (rval, R, t, triangulatedPoints); +} + +// int cv::recoverPose (InputArray E, InputArray points1, InputArray points2, OutputArray R, OutputArray t, double focal=1.0, Point2d pp=Point2d(0, 0), InputOutputArray mask=noArray()) +(int rval, Mat R, Mat t) recoverPose( + InputArray E, + InputArray points1, + InputArray points2, { + OutputArray? R, + OutputArray? t, + double focal = 1, + Point2d? pp, + InputOutputArray? mask, +}) { + R ??= Mat.empty(); + t ??= Mat.empty(); + mask ??= Mat.empty(); + pp ??= Point2d(0, 0); + final prval = calloc(); + cvRun( + () => ccalib3d.cv_recoverPose_1( + E.ref, + points1.ref, + points2.ref, + R!.ref, + t!.ref, + focal, + pp!.ref, + mask!.ref, + prval, + ffi.nullptr, + ), + ); + final rval = prval.value; + calloc.free(prval); + return (rval, R, t); +} + +// void cv::reprojectImageTo3D (InputArray disparity, OutputArray _3dImage, InputArray Q, bool handleMissingValues=false, int ddepth=-1) +Mat reprojectImageTo3D( + InputArray disparity, + InputArray Q, { + OutputArray? out3dImage, + bool handleMissingValues = false, + int ddepth = -1, +}) { + out3dImage ??= Mat.empty(); + cvRun( + () => ccalib3d.cv_reprojectImageTo3D( + disparity.ref, + out3dImage!.ref, + Q.ref, + handleMissingValues, + ddepth, + ffi.nullptr, + ), + ); + return out3dImage; +} + +// void cv::Rodrigues (InputArray src, OutputArray dst, OutputArray jacobian=noArray()) +Mat Rodrigues( + InputArray src, { + OutputArray? dst, + OutputArray? jacobian, +}) { + dst ??= Mat.empty(); + jacobian ??= Mat.empty(); + cvRun( + () => ccalib3d.cv_Rodrigues( + src.ref, + dst!.ref, + jacobian!.ref, + ffi.nullptr, + ), + ); + return dst; +} + +// Vec3d cv::RQDecomp3x3 (InputArray src, OutputArray mtxR, OutputArray mtxQ, OutputArray Qx=noArray(), OutputArray Qy=noArray(), OutputArray Qz=noArray()) +(Vec3d rval, Mat mtxR, Mat mtxQ) RQDecomp3x3( + InputArray src, { + OutputArray? mtxR, + OutputArray? mtxQ, + OutputArray? Qx, + OutputArray? Qy, + OutputArray? Qz, +}) { + mtxR ??= Mat.empty(); + mtxQ ??= Mat.empty(); + Qx ??= Mat.empty(); + Qy ??= Mat.empty(); + Qz ??= Mat.empty(); + final prval = calloc(); + cvRun( + () => ccalib3d.cv_RQDecomp3x3( + src.ref, + mtxR!.ref, + mtxQ!.ref, + Qx!.ref, + Qy!.ref, + Qz!.ref, + prval, + ffi.nullptr, + ), + ); + return (Vec3d.fromPointer(prval), mtxR, mtxQ); +} + +double sampsonDistance(InputArray pt1, InputArray pt2, InputArray F) => + ccalib3d.cv_sampsonDistance(pt1.ref, pt2.ref, F.ref); + +/// Finds an object pose from 3 3D-2D point correspondences. +/// +/// https://docs.opencv.org/4.11.0/d9/d0c/group__calib3d.html#gae5af86788e99948d40b39a03f6acf623 +(int rval, VecMat rvecs, VecMat tvecs) solveP3P( + InputArray objectPoints, + InputArray imagePoints, + InputArray cameraMatrix, + InputArray distCoeffs, + int flags, { + VecMat? rvecs, + VecMat? tvecs, +}) { + rvecs ??= VecMat(); + tvecs ??= VecMat(); + final prval = calloc(); + cvRun( + () => ccalib3d.cv_solveP3P( + objectPoints.ref, + imagePoints.ref, + cameraMatrix.ref, + distCoeffs.ref, + rvecs!.ptr, + tvecs!.ptr, + flags, + prval, + ffi.nullptr, + ), + ); + final rval = prval.value; + calloc.free(prval); + return (rval, rvecs, tvecs); +} + +/// Finds an object pose from 3D-2D point correspondences. +/// +/// https://docs.opencv.org/4.11.0/d9/d0c/group__calib3d.html#ga549c2075fac14829ff4a58bc931c033d +(bool rval, Mat rvec, Mat tvec) solvePnP( + InputArray objectPoints, + InputArray imagePoints, + InputArray cameraMatrix, + InputArray distCoeffs, { + OutputArray? rvec, + OutputArray? tvec, + bool useExtrinsicGuess = false, + int flags = SOLVEPNP_ITERATIVE, +}) { + rvec ??= Mat.empty(); + tvec ??= Mat.empty(); + final prval = calloc(); + cvRun( + () => ccalib3d.cv_solvePnP( + objectPoints.ref, + imagePoints.ref, + cameraMatrix.ref, + distCoeffs.ref, + rvec!.ref, + tvec!.ref, + useExtrinsicGuess, + flags, + prval, + ffi.nullptr, + ), + ); + final rval = prval.value; + calloc.free(prval); + return (rval, rvec, tvec); +} + +/// Finds an object pose from 3D-2D point correspondences. +/// +/// https://docs.opencv.org/4.11.0/d9/d0c/group__calib3d.html#ga624af8a6641b9bdb487f63f694e8bb90 +(int rval, VecMat rvecs, VecMat tvecs, Mat rvec, Mat tvec, Mat reprojectionError) solvePnPGeneric( + InputArray objectPoints, + InputArray imagePoints, + InputArray cameraMatrix, + InputArray distCoeffs, { + VecMat? rvecs, + VecMat? tvecs, + bool useExtrinsicGuess = false, + int flags = SOLVEPNP_ITERATIVE, + InputArray? rvec, + InputArray? tvec, + OutputArray? reprojectionError, +}) { + rvecs ??= VecMat(); + tvecs ??= VecMat(); + rvec ??= Mat.empty(); + tvec ??= Mat.empty(); + reprojectionError ??= Mat.empty(); + final prval = calloc(); + cvRun( + () => ccalib3d.cv_solvePnPGeneric( + objectPoints.ref, + imagePoints.ref, + cameraMatrix.ref, + distCoeffs.ref, + rvecs!.ptr, + tvecs!.ptr, + useExtrinsicGuess, + flags, + rvec!.ref, + tvec!.ref, + reprojectionError!.ref, + prval, + ffi.nullptr, + ), + ); + final rval = prval.value; + calloc.free(prval); + return (rval, rvecs, tvecs, rvec, tvec, reprojectionError); +} + +/// Finds an object pose from 3D-2D point correspondences using the RANSAC scheme. +/// +/// https://docs.opencv.org/4.11.0/d9/d0c/group__calib3d.html#ga50620f0e26e02caa2e9adc07b5fbf24e +(bool rval, Mat rvec, Mat tvec, Mat inliers) solvePnPRansac( + InputArray objectPoints, + InputArray imagePoints, + InputArray cameraMatrix, + InputArray distCoeffs, { + OutputArray? rvec, + OutputArray? tvec, + bool useExtrinsicGuess = false, + int iterationsCount = 100, + double reprojectionError = 8.0, + double confidence = 0.99, + OutputArray? inliers, + int flags = SOLVEPNP_ITERATIVE, +}) { + rvec ??= Mat.empty(); + tvec ??= Mat.empty(); + inliers ??= Mat.empty(); + final prval = calloc(); + cvRun( + () => ccalib3d.cv_solvePnPRansac( + objectPoints.ref, + imagePoints.ref, + cameraMatrix.ref, + distCoeffs.ref, + rvec!.ref, + tvec!.ref, + useExtrinsicGuess, + iterationsCount, + reprojectionError, + confidence, + inliers!.ref, + flags, + prval, + ffi.nullptr, + ), + ); + final rval = prval.value; + calloc.free(prval); + return (rval, rvec, tvec, inliers); +} + +/// Finds an object pose from 3D-2D point correspondences using the RANSAC scheme. +/// +/// https://docs.opencv.org/4.11.0/d9/d0c/group__calib3d.html#gab14667ec49eda61b4a3f14eb9704373b +(bool rval, Mat rvec, Mat tvec, Mat inliers) solvePnPRansacCameraMatrix( + InputArray objectPoints, + InputArray imagePoints, + InputOutputArray cameraMatrix, + InputArray distCoeffs, { + OutputArray? rvec, + OutputArray? tvec, + OutputArray? inliers, + UsacParams? params, +}) { + rvec ??= Mat.empty(); + tvec ??= Mat.empty(); + inliers ??= Mat.empty(); + params ??= UsacParams(); + final prval = calloc(); + cvRun( + () => ccalib3d.cv_solvePnPRansac_1( + objectPoints.ref, + imagePoints.ref, + cameraMatrix.ref, + distCoeffs.ref, + rvec!.ref, + tvec!.ref, + inliers!.ref, + params!.ref, + prval, + ffi.nullptr, + ), + ); + final rval = prval.value; + calloc.free(prval); + return (rval, rvec, tvec, inliers); +} + +/// Refine a pose (the translation and the rotation that transform a 3D point expressed in the +/// object coordinate frame to the camera coordinate frame) from a 3D-2D point correspondences and +/// starting from an initial solution. +/// +/// https://docs.opencv.org/4.11.0/d9/d0c/group__calib3d.html#ga650ba4d286a96d992f82c3e6dfa525fa +void solvePnPRefineLM( + InputArray objectPoints, + InputArray imagePoints, + InputArray cameraMatrix, + InputArray distCoeffs, + InputOutputArray rvec, + InputOutputArray tvec, { + TermCriteria? criteria, +}) { + // in opencv, this is TermCriteria(TermCriteria::EPS+TermCriteria::COUNT, 20, FLT_EPSILON) + // FLT_EPSILON depends on the platform, here we use 1e-7 to simplify this. + // which may get different results on than opencv c++. + criteria ??= TermCriteria(TERM_EPS + TERM_COUNT, 20, 1e-7); + return cvRun( + () => ccalib3d.cv_solvePnPRefineLM( + objectPoints.ref, + imagePoints.ref, + cameraMatrix.ref, + distCoeffs.ref, + rvec.ref, + tvec.ref, + criteria!.ref, + ffi.nullptr, + ), + ); +} + +/// Refine a pose (the translation and the rotation that transform a 3D point expressed in the +/// object coordinate frame to the camera coordinate frame) from a 3D-2D point correspondences and +/// starting from an initial solution. +/// +/// https://docs.opencv.org/4.11.0/d9/d0c/group__calib3d.html#ga17491c0282e4af874f6206a9166774a5 +void solvePnPRefineVVS( + InputArray objectPoints, + InputArray imagePoints, + InputArray cameraMatrix, + InputArray distCoeffs, + InputOutputArray rvec, + InputOutputArray tvec, { + TermCriteria? criteria, + double VVSlambda = 1.0, +}) { + criteria ??= TermCriteria(TERM_EPS + TERM_COUNT, 20, 1e-7); + return cvRun( + () => ccalib3d.cv_solvePnPRefineVVS( + objectPoints.ref, + imagePoints.ref, + cameraMatrix.ref, + distCoeffs.ref, + rvec.ref, + tvec.ref, + criteria!.ref, + VVSlambda, + ffi.nullptr, + ), + ); +} + +// double cv::stereoCalibrate (InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2, InputOutputArray cameraMatrix1, InputOutputArray distCoeffs1, InputOutputArray cameraMatrix2, InputOutputArray distCoeffs2, Size imageSize, InputOutputArray R, InputOutputArray T, OutputArray E, OutputArray F, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, OutputArray perViewErrors, int flags=CALIB_FIX_INTRINSIC, TermCriteria criteria=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6)); +( + double rval, + Mat cameraMatrix1, + Mat distCoeffs1, + Mat cameraMatrix2, + Mat distCoeffs2, + Mat R, + Mat T, + Mat E, + Mat F, + VecMat rvecs, + VecMat tvecs, + Mat perViewErrors +) stereoCalibrate( + VecMat objectPoints, + VecMat imagePoints1, + VecMat imagePoints2, + InputOutputArray cameraMatrix1, + InputOutputArray distCoeffs1, + InputOutputArray cameraMatrix2, + InputOutputArray distCoeffs2, + Size imageSize, + InputOutputArray R, + InputOutputArray T, { + OutputArray? E, + OutputArray? F, + VecMat? rvecs, + VecMat? tvecs, + OutputArray? perViewErrors, + int flags = CALIB_FIX_INTRINSIC, + TermCriteria? criteria, +}) { + E ??= Mat.empty(); + F ??= Mat.empty(); + rvecs ??= VecMat(); + tvecs ??= VecMat(); + perViewErrors ??= Mat.empty(); + criteria ??= TermCriteria(TERM_EPS + TERM_COUNT, 30, 1e-6); + final prval = calloc(); + cvRun( + () => ccalib3d.cv_stereoCalibrate( + objectPoints.ref, + imagePoints1.ref, + imagePoints2.ref, + cameraMatrix1.ref, + distCoeffs1.ref, + cameraMatrix2.ref, + distCoeffs2.ref, + imageSize.ref, + R.ref, + T.ref, + E!.ref, + F!.ref, + rvecs!.ref, + tvecs!.ref, + perViewErrors!.ref, + flags, + criteria!.ref, + prval, + ffi.nullptr, + ), + ); + final rval = prval.value; + calloc.free(prval); + return ( + rval, + cameraMatrix1, + distCoeffs1, + cameraMatrix2, + distCoeffs2, + R, + T, + E, + F, + rvecs, + tvecs, + perViewErrors + ); +} + +// void cv::stereoRectify (InputArray cameraMatrix1, InputArray distCoeffs1, InputArray cameraMatrix2, InputArray distCoeffs2, Size imageSize, InputArray R, InputArray T, OutputArray R1, OutputArray R2, OutputArray P1, OutputArray P2, OutputArray Q, int flags=CALIB_ZERO_DISPARITY, double alpha=-1, Size newImageSize=Size(), Rect *validPixROI1=0, Rect *validPixROI2=0); +(Mat R1, Mat R2, Mat P1, Mat P2, Mat Q) stereoRectify( + InputArray cameraMatrix1, + InputArray distCoeffs1, + InputArray cameraMatrix2, + InputArray distCoeffs2, + Size imageSize, + InputArray R, + InputArray T, { + OutputArray? R1, + OutputArray? R2, + OutputArray? P1, + OutputArray? P2, + OutputArray? Q, + int flags = CALIB_ZERO_DISPARITY, + double alpha = -1, + Size? newImageSize, + Rect? validPixROI1, + Rect? validPixROI2, +}) { + R1 ??= Mat.empty(); + R2 ??= Mat.empty(); + P1 ??= Mat.empty(); + P2 ??= Mat.empty(); + Q ??= Mat.empty(); + newImageSize ??= Size(0, 0); + final pValidPixROI1 = validPixROI1 == null ? ffi.nullptr : calloc(); + final pValidPixROI2 = validPixROI2 == null ? ffi.nullptr : calloc(); + cvRun( + () => ccalib3d.cv_stereoRectify( + cameraMatrix1.ref, + distCoeffs1.ref, + cameraMatrix2.ref, + distCoeffs2.ref, + imageSize.ref, + R.ref, + T.ref, + R1!.ref, + R2!.ref, + P1!.ref, + P2!.ref, + Q!.ref, + flags, + alpha, + newImageSize!.ref, + pValidPixROI1, + pValidPixROI2, + ffi.nullptr, + ), + ); + if (validPixROI1 != null && pValidPixROI1 != ffi.nullptr) { + validPixROI1.x = pValidPixROI1.ref.x; + validPixROI1.y = pValidPixROI1.ref.y; + validPixROI1.width = pValidPixROI1.ref.width; + validPixROI1.height = pValidPixROI1.ref.height; + calloc.free(pValidPixROI1); + } + if (validPixROI2 != null && pValidPixROI2 != ffi.nullptr) { + validPixROI2.x = pValidPixROI2.ref.x; + validPixROI2.y = pValidPixROI2.ref.y; + validPixROI2.width = pValidPixROI2.ref.width; + validPixROI2.height = pValidPixROI2.ref.height; + calloc.free(pValidPixROI2); + } + return (R1, R2, P1, P2, Q); +} + +// bool cv::stereoRectifyUncalibrated (InputArray points1, InputArray points2, InputArray F, Size imgSize, OutputArray H1, OutputArray H2, double threshold=5); +(bool rval, Mat H1, Mat H2) stereoRectifyUncalibrated( + InputArray points1, + InputArray points2, + InputArray F, + Size imgSize, { + OutputArray? H1, + OutputArray? H2, + double threshold = 5, +}) { + H1 ??= Mat.empty(); + H2 ??= Mat.empty(); + final prval = calloc(); + cvRun( + () => ccalib3d.cv_stereoRectifyUncalibrated( + points1.ref, + points2.ref, + F.ref, + imgSize.ref, + H1!.ref, + H2!.ref, + threshold, + prval, + ffi.nullptr, + ), + ); + final rval = prval.value; + calloc.free(prval); + return (rval, H1, H2); +} + +// void cv::triangulatePoints (InputArray projMatr1, InputArray projMatr2, InputArray projPoints1, InputArray projPoints2, OutputArray points4D); +Mat triangulatePoints( + InputArray projMatr1, + InputArray projMatr2, + InputArray projPoints1, + InputArray projPoints2, { + OutputArray? points4D, +}) { + points4D ??= Mat.empty(); + cvRun( + () => ccalib3d.cv_triangulatePoints( + projMatr1.ref, + projMatr2.ref, + projPoints1.ref, + projPoints2.ref, + points4D!.ref, + ffi.nullptr, + ), + ); + return points4D; +} + +// Transforms an image to compensate for lens distortion. +// The function transforms an image to compensate radial and tangential lens distortion. +// The function is simply a combination of initUndistortRectifyMap (with unity R ) and remap (with bilinear interpolation). See the former function for details of the transformation being performed. +// Those pixels in the destination image, for which there is no correspondent pixels in the source image, are filled with zeros (black color). +// A particular subset of the source image that will be visible in the corrected image can be regulated by newCameraMatrix. You can use getOptimalNewCameraMatrix to compute the appropriate newCameraMatrix depending on your requirements. +// The camera matrix and the distortion parameters can be determined using calibrateCamera. If the resolution of images is different from the resolution used at the calibration stage, fx,fy,cx and cy need to be scaled accordingly, while the distortion coefficients remain the same. +Mat undistort( + InputArray src, + InputArray cameraMatrix, + InputArray distCoeffs, { + OutputArray? dst, + InputArray? newCameraMatrix, +}) { + dst ??= Mat.empty(); + newCameraMatrix ??= Mat.empty(); + cvRun( + () => ccalib3d.cv_undistort( + src.ref, + dst!.ref, + cameraMatrix.ref, + distCoeffs.ref, + newCameraMatrix!.ref, + ffi.nullptr, + ), + ); + return dst; +} + +/// Compute undistorted image points position. +/// +/// https://docs.opencv.org/4.11.0/d9/d0c/group__calib3d.html#ga6327c952253fd43f729c4008c2a45c17 +Mat undistortImagePoints( + InputArray src, + InputArray cameraMatrix, + InputArray distCoeffs, { + OutputArray? dst, + TermCriteria? criteria, +}) { + dst ??= Mat.empty(); + criteria ??= TermCriteria(TERM_MAX_ITER + TERM_EPS, 5, 0.01); + cvRun( + () => ccalib3d.cv_undistortImagePoints( + src.ref, + dst!.ref, + cameraMatrix.ref, + distCoeffs.ref, + criteria!.ref, + ffi.nullptr, + ), + ); + return dst; +} + +// UndistortPoints transforms points to compensate for lens distortion +// +// For further details, please see: +// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga55c716492470bfe86b0ee9bf3a1f0f7e +Mat undistortPoints( + InputArray src, + InputArray cameraMatrix, + InputArray distCoeffs, { + OutputArray? dst, + InputArray? R, + InputArray? P, + (int type, int count, double eps) criteria = (TERM_COUNT + TERM_EPS, 30, 1e-4), +}) { + R ??= Mat.empty(); + P ??= Mat.empty(); + dst ??= Mat.empty(); + final tc = criteria.cvd; + cvRun( + () => ccalib3d.cv_undistortPoints( + src.ref, + dst!.ref, + cameraMatrix.ref, + distCoeffs.ref, + R!.ref, + P!.ref, + tc.ref, + ffi.nullptr, + ), + ); + return dst; +} + +/// validates disparity using the left-right check. The matrix "cost" should be computed by the +/// stereo correspondence algorithm +/// +/// https://docs.opencv.org/4.11.0/d9/d0c/group__calib3d.html#ga214b498b8d01d0417e0d08be64c54eb5 +void validateDisparity( + InputOutputArray disparity, + InputArray cost, + int minDisparity, + int numberOfDisparities, { + int disp12MaxDisp = 1, +}) { + return cvRun( + () => ccalib3d.cv_validateDisparity( + disparity.ref, + cost.ref, + minDisparity, + numberOfDisparities, + disp12MaxDisp, + ffi.nullptr, + ), + ); +} + +// constants +const int PROJ_SPHERICAL_ORTHO = 0; +const int PROJ_SPHERICAL_EQRECT = 1; + +/// Pose refinement using non-linear Levenberg-Marquardt minimization scheme @cite Madsen04 @cite Eade13 \n +/// Initial solution for non-planar "objectPoints" needs at least 6 points and uses the DLT algorithm. \n +/// Initial solution for planar "objectPoints" needs at least 4 points and uses pose from homography decomposition. +const int SOLVEPNP_ITERATIVE = 0; + +/// EPnP: Efficient Perspective-n-Point Camera Pose Estimation @cite lepetit2009epnp +const int SOLVEPNP_EPNP = 1; + +/// Complete Solution Classification for the Perspective-Three-Point Problem @cite gao2003complete +const int SOLVEPNP_P3P = 2; + +/// **Broken implementation. Using this flag will fallback to EPnP.** \n +/// A Direct Least-Squares (DLS) Method for PnP @cite hesch2011direct +const int SOLVEPNP_DLS = 3; + +/// **Broken implementation. Using this flag will fallback to EPnP.** \n +/// Exhaustive Linearization for Robust Camera Pose and Focal Length Estimation @cite penate2013exhaustive +const int SOLVEPNP_UPNP = 4; + +/// An Efficient Algebraic Solution to the Perspective-Three-Point Problem @cite Ke17 +const int SOLVEPNP_AP3P = 5; + +/// Infinitesimal Plane-Based Pose Estimation @cite Collins14 \n +///Object points must be coplanar. +const int SOLVEPNP_IPPE = 6; + +/// Infinitesimal Plane-Based Pose Estimation @cite Collins14 \n +/// This is a special case suitable for marker pose estimation.\n +/// 4 coplanar object points must be defined in the following order: +/// - point 0: [-squareLength / 2, squareLength / 2, 0] +/// - point 1: [ squareLength / 2, squareLength / 2, 0] +/// - point 2: [ squareLength / 2, -squareLength / 2, 0] +/// - point 3: [-squareLength / 2, -squareLength / 2, 0] +const int SOLVEPNP_IPPE_SQUARE = 7; + +/// SQPnP: A Consistently Fast and Globally OptimalSolution to the Perspective-n-Point Problem @cite Terzakis2020SQPnP +const int SOLVEPNP_SQPNP = 8; + +// the algorithm for finding fundamental matrix +/// 7-point algorithm +const int FM_7POINT = 1; + +/// 8-point algorithm +const int FM_8POINT = 2; + +/// least-median algorithm. 7-point algorithm is used. +const int FM_LMEDS = 4; + +/// RANSAC algorithm. It needs at least 15 points. 7-point algorithm is used. +const int FM_RANSAC = 8; diff --git a/packages/dartcv/lib/src/calib3d/calib3d_async.dart b/packages/dartcv/lib/src/calib3d/calib3d_async.dart index 6d2f3d78..595a89cf 100644 --- a/packages/dartcv/lib/src/calib3d/calib3d_async.dart +++ b/packages/dartcv/lib/src/calib3d/calib3d_async.dart @@ -18,68 +18,7 @@ import '../core/termcriteria.dart'; import '../g/constants.g.dart'; import '../g/types.g.dart' as cvg; import '../native_lib.dart' show ccalib3d; - -/// InitUndistortRectifyMap computes the joint undistortion and rectification transformation and represents the result in the form of maps for remap -/// -/// For further details, please see: -/// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga7dfb72c9cf9780a347fbe3d1c47e5d5a -Future<(Mat, Mat)> initUndistortRectifyMapAsync( - InputArray cameraMatrix, - InputArray distCoeffs, - InputArray R, - InputArray newCameraMatrix, - (int, int) size, - int m1type, { - OutputArray? map1, - OutputArray? map2, -}) async { - map1 ??= Mat.empty(); - map2 ??= Mat.empty(); - return cvRunAsync0<(Mat, Mat)>( - (callback) => ccalib3d.cv_initUndistortRectifyMap( - cameraMatrix.ref, - distCoeffs.ref, - R.ref, - newCameraMatrix.ref, - size.cvd.ref, - m1type, - map1!.ref, - map2!.ref, - callback, - ), - (c) => c.complete((map1!, map2!)), - ); -} - -/// GetOptimalNewCameraMatrixWithParams computes and returns the optimal new camera matrix based on the free scaling parameter. -/// -/// For further details, please see: -/// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga7a6c4e032c97f03ba747966e6ad862b1 -Future<(Mat rval, Rect validPixROI)> getOptimalNewCameraMatrixAsync( - InputArray cameraMatrix, - InputArray distCoeffs, - (int, int) imageSize, - double alpha, { - (int, int) newImgSize = (0, 0), - bool centerPrincipalPoint = false, -}) async { - final validPixROI = calloc(); - final rval = Mat.empty(); - return cvRunAsync0<(Mat, Rect)>( - (callback) => ccalib3d.cv_getOptimalNewCameraMatrix( - cameraMatrix.ref, - distCoeffs.ref, - imageSize.cvd.ref, - alpha, - newImgSize.cvd.ref, - validPixROI, - centerPrincipalPoint, - rval.ptr, - callback, - ), - (c) => c.complete((rval, Rect.fromPointer(validPixROI))), - ); -} +import 'calib3d.dart'; // CalibrateCamera finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern. // @@ -120,148 +59,6 @@ Future<(double rmsErr, Mat cameraMatrix, Mat distCoeffs, Mat rvecs, Mat tvecs)> }); } -// Transforms an image to compensate for lens distortion. -// The function transforms an image to compensate radial and tangential lens distortion. -// The function is simply a combination of initUndistortRectifyMap (with unity R ) and remap (with bilinear interpolation). See the former function for details of the transformation being performed. -// Those pixels in the destination image, for which there is no correspondent pixels in the source image, are filled with zeros (black color). -// A particular subset of the source image that will be visible in the corrected image can be regulated by newCameraMatrix. You can use getOptimalNewCameraMatrix to compute the appropriate newCameraMatrix depending on your requirements. -// The camera matrix and the distortion parameters can be determined using calibrateCamera. If the resolution of images is different from the resolution used at the calibration stage, fx,fy,cx and cy need to be scaled accordingly, while the distortion coefficients remain the same. -Future undistortAsync( - InputArray src, - InputArray cameraMatrix, - InputArray distCoeffs, { - OutputArray? dst, - InputArray? newCameraMatrix, -}) async { - dst ??= Mat.empty(); - newCameraMatrix ??= Mat.empty(); - return cvRunAsync0( - (callback) => ccalib3d.cv_undistort( - src.ref, - dst!.ref, - cameraMatrix.ref, - distCoeffs.ref, - newCameraMatrix!.ref, - callback, - ), - (c) => c.complete(dst!), - ); -} - -// UndistortPoints transforms points to compensate for lens distortion -// -// For further details, please see: -// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga55c716492470bfe86b0ee9bf3a1f0f7e -Future undistortPointsAsync( - InputArray src, - InputArray cameraMatrix, - InputArray distCoeffs, { - OutputArray? dst, - InputArray? R, - InputArray? P, - (int type, int count, double eps) criteria = (TERM_COUNT + TERM_EPS, 30, 1e-4), -}) async { - R ??= Mat.empty(); - P ??= Mat.empty(); - dst ??= Mat.empty(); - final tc = criteria.cvd; - return cvRunAsync0( - (callback) => ccalib3d.cv_undistortPoints( - src.ref, - dst!.ref, - cameraMatrix.ref, - distCoeffs.ref, - R!.ref, - P!.ref, - tc.ref, - callback, - ), - (c) => c.complete(dst!), - ); -} - -// FindChessboardCorners finds the positions of internal corners of the chessboard. -// -// For further details, please see: -// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga93efa9b0aa890de240ca32b11253dd4a -Future<(bool success, VecPoint2f corners)> findChessboardCornersAsync( - InputArray image, - (int, int) patternSize, { - VecPoint2f? corners, - int flags = CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE, -}) async { - corners ??= VecPoint2f(); - final r = calloc(); - return cvRunAsync0( - (callback) => ccalib3d.cv_findChessboardCorners( - image.ref, - patternSize.cvd.ref, - corners!.ptr, - flags, - r, - callback, - ), (c) { - final rval = r.value; - calloc.free(r); - return c.complete((rval, corners!)); - }); -} - -// Finds the positions of internal corners of the chessboard using a sector based approach. -// https://docs.opencv.org/4.x/d9/d0c/group__calib3d.html#gadc5bcb05cb21cf1e50963df26986d7c9 -Future<(bool, VecPoint2f corners)> findChessboardCornersSBAsync( - InputArray image, - (int, int) patternSize, - int flags, { - VecPoint2f? corners, -}) async { - corners ??= VecPoint2f(); - final b = calloc(); - return cvRunAsync0( - (callback) => ccalib3d.cv_findChessboardCornersSB( - image.ref, - patternSize.cvd.ref, - corners!.ptr, - flags, - b, - callback, - ), (c) { - final rval = b.value; - calloc.free(b); - return c.complete((rval, corners!)); - }); -} - -// Finds the positions of internal corners of the chessboard using a sector based approach. -// https://docs.opencv.org/4.x/d9/d0c/group__calib3d.html#gadc5bcb05cb21cf1e50963df26986d7c9 -Future<(bool, VecPoint2f corners, Mat meta)> findChessboardCornersSBWithMetaAsync( - InputArray image, - (int, int) patternSize, - int flags, { - VecPoint2f? corners, - OutputArray? meta, -}) async { - corners ??= VecPoint2f(); - meta ??= Mat.empty(); - final b = calloc(); - return cvRunAsync0( - (callback) => ccalib3d.cv_findChessboardCornersSB_1( - image.ref, - patternSize.cvd.ref, - corners!.ptr, - flags, - meta!.ref, - b, - callback, - ), - (c) { - final rval = b.value; - calloc.free(b); - return c.complete((rval, corners!, meta!)); - }, - ); -} - // DrawChessboardCorners renders the detected chessboard corners. // // For further details, please see: @@ -284,6 +81,34 @@ Future drawChessboardCornersAsync( ); } +/// Draw axes of the world/object coordinate system from pose estimation. +/// +/// For further details, please see: +/// https://docs.opencv.org/4.11.0/d9/d0c/group__calib3d.html#gab3ab7bb2bdfe7d5d9745bb92d13f9564 +Future drawFrameAxesAsync( + Mat image, + Mat cameraMatrix, + Mat distCoeffs, + Mat rvec, + Mat tvec, + double length, { + int thickness = 3, +}) async { + return cvRunAsync0( + (callback) => ccalib3d.cv_drawFrameAxes( + image.ref, + cameraMatrix.ref, + distCoeffs.ref, + rvec.ref, + tvec.ref, + length, + thickness, + callback, + ), + (c) => c.complete(), + ); +} + // EstimateAffinePartial2D computes an optimal limited affine transformation // with 4 degrees of freedom between two 2D point sets. // @@ -351,6 +176,37 @@ Future<(Mat, Mat inliers)> estimateAffine2DAsync( ); } +/// Computes an optimal affine transformation between two 3D point sets. +/// +/// https://docs.opencv.org/4.11.0/d9/d0c/group__calib3d.html#gac12d1f05b3bb951288e7250713ce98f0 +Future<(int rval, Mat, Mat inliers)> estimateAffine3DAsync( + Mat src, + Mat dst, { + Mat? out, + Mat? inliers, + double ransacThreshold = 3, + double confidence = 0.99, +}) async { + out ??= Mat.empty(); + inliers ??= Mat.empty(); + final p = calloc(); + return cvRunAsync0( + (callback) => ccalib3d.cv_estimateAffine3D_1( + src.ref, + dst.ref, + out!.ref, + inliers!.ref, + ransacThreshold, + confidence, + p, + callback, + ), (c) { + final rval = p.value; + calloc.free(p); + return c.complete((rval, out!, inliers!)); + }); +} + /// FindHomography finds an optimal homography matrix using 4 or more point pairs (as opposed to GetPerspectiveTransform, which uses exactly 4) /// /// For further details, please see: @@ -381,3 +237,628 @@ Future<(Mat, Mat)> findHomographyAsync( (c) => c.complete((mat, mask!)), ); } + +// FindChessboardCorners finds the positions of internal corners of the chessboard. +// +// For further details, please see: +// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga93efa9b0aa890de240ca32b11253dd4a +Future<(bool success, VecPoint2f corners)> findChessboardCornersAsync( + InputArray image, + (int, int) patternSize, { + VecPoint2f? corners, + int flags = CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE, +}) async { + corners ??= VecPoint2f(); + final r = calloc(); + return cvRunAsync0( + (callback) => ccalib3d.cv_findChessboardCorners( + image.ref, + patternSize.cvd.ref, + corners!.ptr, + flags, + r, + callback, + ), (c) { + final rval = r.value; + calloc.free(r); + return c.complete((rval, corners!)); + }); +} + +// Finds the positions of internal corners of the chessboard using a sector based approach. +// https://docs.opencv.org/4.x/d9/d0c/group__calib3d.html#gadc5bcb05cb21cf1e50963df26986d7c9 +Future<(bool, VecPoint2f corners)> findChessboardCornersSBAsync( + InputArray image, + (int, int) patternSize, + int flags, { + VecPoint2f? corners, +}) async { + corners ??= VecPoint2f(); + final b = calloc(); + return cvRunAsync0( + (callback) => ccalib3d.cv_findChessboardCornersSB( + image.ref, + patternSize.cvd.ref, + corners!.ptr, + flags, + b, + callback, + ), (c) { + final rval = b.value; + calloc.free(b); + return c.complete((rval, corners!)); + }); +} + +// Finds the positions of internal corners of the chessboard using a sector based approach. +// https://docs.opencv.org/4.x/d9/d0c/group__calib3d.html#gadc5bcb05cb21cf1e50963df26986d7c9 +Future<(bool, VecPoint2f corners, Mat meta)> findChessboardCornersSBWithMetaAsync( + InputArray image, + (int, int) patternSize, + int flags, { + VecPoint2f? corners, + OutputArray? meta, +}) async { + corners ??= VecPoint2f(); + meta ??= Mat.empty(); + final b = calloc(); + return cvRunAsync0( + (callback) => ccalib3d.cv_findChessboardCornersSB_1( + image.ref, + patternSize.cvd.ref, + corners!.ptr, + flags, + meta!.ref, + b, + callback, + ), + (c) { + final rval = b.value; + calloc.free(b); + return c.complete((rval, corners!, meta!)); + }, + ); +} + +/// Returns the default new camera matrix. +/// +/// The function returns the camera matrix that is either an exact copy of the input cameraMatrix (when centerPrinicipalPoint=false ), +/// or the modified one (when centerPrincipalPoint=true). +/// +/// https://docs.opencv.org/4.11.0/d9/d0c/group__calib3d.html#ga744529385e88ef7bc841cbe04b35bfbf +Future getDefaultNewCameraMatrixAsync( + InputArray cameraMatrix, { + Size? imgsize, + bool centerPrincipalPoint = false, +}) async { + final prval = calloc(); + imgsize ??= Size(0, 0); + return cvRunAsync0( + (callback) => ccalib3d.cv_getDefaultNewCameraMatrix( + cameraMatrix.ref, + imgsize!.ref, + centerPrincipalPoint, + prval, + callback, + ), + (c) => c.complete(Mat.fromPointer(prval)), + ); +} + +/// GetOptimalNewCameraMatrixWithParams computes and returns the optimal new camera matrix based on the free scaling parameter. +/// +/// For further details, please see: +/// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga7a6c4e032c97f03ba747966e6ad862b1 +Future<(Mat rval, Rect validPixROI)> getOptimalNewCameraMatrixAsync( + InputArray cameraMatrix, + InputArray distCoeffs, + (int, int) imageSize, + double alpha, { + (int, int) newImgSize = (0, 0), + bool centerPrincipalPoint = false, +}) async { + final validPixROI = calloc(); + final rval = Mat.empty(); + return cvRunAsync0<(Mat, Rect)>( + (callback) => ccalib3d.cv_getOptimalNewCameraMatrix( + cameraMatrix.ref, + distCoeffs.ref, + imageSize.cvd.ref, + alpha, + newImgSize.cvd.ref, + validPixROI, + centerPrincipalPoint, + rval.ptr, + callback, + ), + (c) => c.complete((rval, Rect.fromPointer(validPixROI))), + ); +} + +/// InitUndistortRectifyMap computes the joint undistortion and rectification transformation and represents the result in the form of maps for remap +/// +/// For further details, please see: +/// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga7dfb72c9cf9780a347fbe3d1c47e5d5a +Future<(Mat, Mat)> initUndistortRectifyMapAsync( + InputArray cameraMatrix, + InputArray distCoeffs, + InputArray R, + InputArray newCameraMatrix, + (int, int) size, + int m1type, { + OutputArray? map1, + OutputArray? map2, +}) async { + map1 ??= Mat.empty(); + map2 ??= Mat.empty(); + return cvRunAsync0<(Mat, Mat)>( + (callback) => ccalib3d.cv_initUndistortRectifyMap( + cameraMatrix.ref, + distCoeffs.ref, + R.ref, + newCameraMatrix.ref, + size.cvd.ref, + m1type, + map1!.ref, + map2!.ref, + callback, + ), + (c) => c.complete((map1!, map2!)), + ); +} + +/// initializes maps for remap for wide-angle +/// +/// https://docs.opencv.org/4.11.0/d9/d0c/group__calib3d.html#ga9185f4fbe1ad74af2c56a392393cf9f4 +Future<(double rval, Mat map1, Mat map2)> initWideAngleProjMapAsync( + InputArray cameraMatrix, + InputArray distCoeffs, + Size imageSize, + int destImageWidth, + int m1type, { + OutputArray? map1, + OutputArray? map2, + int projType = PROJ_SPHERICAL_EQRECT, + double alpha = 0, +}) async { + map1 ??= Mat.empty(); + map2 ??= Mat.empty(); + final prval = calloc(); + return cvRunAsync0( + (callback) => ccalib3d.cv_initWideAngleProjMap( + cameraMatrix.ref, + distCoeffs.ref, + imageSize.ref, + destImageWidth, + m1type, + map1!.ref, + map2!.ref, + projType, + alpha, + prval, + callback, + ), (c) { + final rval = prval.value; + calloc.free(prval); + return c.complete((rval, map1!, map2!)); + }); +} + +/// Computes partial derivatives of the matrix product for each multiplied matrix. +/// +/// https://docs.opencv.org/4.11.0/d9/d0c/group__calib3d.html#ga905541c1275852eabff7dbdfbc10d160 +Future<(Mat dABdA, Mat dABdB)> matMulDerivAsync( + InputArray A, + InputArray B, { + OutputArray? dABdA, + OutputArray? dABdB, +}) async { + dABdA ??= Mat.empty(); + dABdB ??= Mat.empty(); + return cvRunAsync0( + (callback) => ccalib3d.cv_matMulDeriv( + A.ref, + B.ref, + dABdA!.ref, + dABdB!.ref, + callback, + ), + (c) => c.complete((dABdA!, dABdB!)), + ); +} + +/// Projects 3D points to an image plane. +/// +/// https://docs.opencv.org/4.11.0/d9/d0c/group__calib3d.html#ga1019495a2c8d1743ed5cc23fa0daff8c +Future<(Mat imagePoints, Mat jacobian)> projectPointsAsync( + InputArray objectPoints, + InputArray rvec, + InputArray tvec, + InputArray cameraMatrix, + InputArray distCoeffs, { + OutputArray? imagePoints, + OutputArray? jacobian, + double aspectRatio = 0, +}) async { + imagePoints ??= Mat.empty(); + jacobian ??= Mat.empty(); + return cvRunAsync0( + (callback) => ccalib3d.cv_projectPoints( + objectPoints.ref, + rvec.ref, + tvec.ref, + cameraMatrix.ref, + distCoeffs.ref, + imagePoints!.ref, + jacobian!.ref, + aspectRatio, + callback, + ), + (c) => c.complete((imagePoints!, jacobian!)), + ); +} + +/// Finds an object pose from 3 3D-2D point correspondences. +/// +/// https://docs.opencv.org/4.11.0/d9/d0c/group__calib3d.html#gae5af86788e99948d40b39a03f6acf623 +Future<(int rval, VecMat rvecs, VecMat tvecs)> solveP3PAsync( + InputArray objectPoints, + InputArray imagePoints, + InputArray cameraMatrix, + InputArray distCoeffs, + int flags, { + VecMat? rvecs, + VecMat? tvecs, +}) async { + rvecs ??= VecMat(); + tvecs ??= VecMat(); + final prval = calloc(); + return cvRunAsync0( + (callback) => ccalib3d.cv_solveP3P( + objectPoints.ref, + imagePoints.ref, + cameraMatrix.ref, + distCoeffs.ref, + rvecs!.ptr, + tvecs!.ptr, + flags, + prval, + callback, + ), (c) { + final rval = prval.value; + calloc.free(prval); + return c.complete((rval, rvecs!, tvecs!)); + }); +} + +/// Finds an object pose from 3D-2D point correspondences. +/// +/// https://docs.opencv.org/4.11.0/d9/d0c/group__calib3d.html#ga549c2075fac14829ff4a58bc931c033d +Future<(bool rval, Mat rvec, Mat tvec)> solvePnPAsync( + InputArray objectPoints, + InputArray imagePoints, + InputArray cameraMatrix, + InputArray distCoeffs, { + OutputArray? rvec, + OutputArray? tvec, + bool useExtrinsicGuess = false, + int flags = SOLVEPNP_ITERATIVE, +}) async { + rvec ??= Mat.empty(); + tvec ??= Mat.empty(); + final prval = calloc(); + return cvRunAsync0( + (callback) => ccalib3d.cv_solvePnP( + objectPoints.ref, + imagePoints.ref, + cameraMatrix.ref, + distCoeffs.ref, + rvec!.ref, + tvec!.ref, + useExtrinsicGuess, + flags, + prval, + callback, + ), (c) { + final rval = prval.value; + calloc.free(prval); + return c.complete((rval, rvec!, tvec!)); + }); +} + +// /// Finds an object pose from 3D-2D point correspondences. +// /// +// /// https://docs.opencv.org/4.11.0/d9/d0c/group__calib3d.html#ga624af8a6641b9bdb487f63f694e8bb90 +// (int rval, VecMat rvecs, VecMat tvecs, Mat rvec, Mat tvec, Mat reprojectionError) solvePnPGeneric( +// InputArray objectPoints, +// InputArray imagePoints, +// InputArray cameraMatrix, +// InputArray distCoeffs, { +// VecMat? rvecs, +// VecMat? tvecs, +// bool useExtrinsicGuess = false, +// int flags = SOLVEPNP_ITERATIVE, +// InputArray? rvec, +// InputArray? tvec, +// OutputArray? reprojectionError, +// }) { +// rvecs ??= VecMat(); +// tvecs ??= VecMat(); +// rvec ??= Mat.empty(); +// tvec ??= Mat.empty(); +// reprojectionError ??= Mat.empty(); +// final prval = calloc(); +// cvRun( +// () => ccalib3d.cv_solvePnPGeneric( +// objectPoints.ref, +// imagePoints.ref, +// cameraMatrix.ref, +// distCoeffs.ref, +// rvecs!.ptr, +// tvecs!.ptr, +// useExtrinsicGuess, +// flags, +// rvec!.ref, +// tvec!.ref, +// reprojectionError!.ref, +// prval, +// callback, +// ), +// ); +// final rval = prval.value; +// calloc.free(prval); +// return (rval, rvecs, tvecs, rvec, tvec, reprojectionError); +// } + +// /// Finds an object pose from 3D-2D point correspondences using the RANSAC scheme. +// /// +// /// https://docs.opencv.org/4.11.0/d9/d0c/group__calib3d.html#ga50620f0e26e02caa2e9adc07b5fbf24e +// (bool rval, Mat rvec, Mat tvec, Mat inliers) solvePnPRansac( +// InputArray objectPoints, +// InputArray imagePoints, +// InputArray cameraMatrix, +// InputArray distCoeffs, { +// OutputArray? rvec, +// OutputArray? tvec, +// bool useExtrinsicGuess = false, +// int iterationsCount = 100, +// double reprojectionError = 8.0, +// double confidence = 0.99, +// OutputArray? inliers, +// int flags = SOLVEPNP_ITERATIVE, +// }) { +// rvec ??= Mat.empty(); +// tvec ??= Mat.empty(); +// inliers ??= Mat.empty(); +// final prval = calloc(); +// cvRun( +// () => ccalib3d.cv_solvePnPRansac( +// objectPoints.ref, +// imagePoints.ref, +// cameraMatrix.ref, +// distCoeffs.ref, +// rvec!.ref, +// tvec!.ref, +// useExtrinsicGuess, +// iterationsCount, +// reprojectionError, +// confidence, +// inliers!.ref, +// flags, +// prval, +// callback, +// ), +// ); +// final rval = prval.value; +// calloc.free(prval); +// return (rval, rvec, tvec, inliers); +// } + +// /// Finds an object pose from 3D-2D point correspondences using the RANSAC scheme. +// /// +// /// https://docs.opencv.org/4.11.0/d9/d0c/group__calib3d.html#gab14667ec49eda61b4a3f14eb9704373b +// (bool rval, Mat rvec, Mat tvec, Mat inliers) solvePnPRansacCameraMatrixAsync( +// InputArray objectPoints, +// InputArray imagePoints, +// InputOutputArray cameraMatrix, +// InputArray distCoeffs, { +// OutputArray? rvec, +// OutputArray? tvec, +// OutputArray? inliers, +// UsacParams? params, +// }) { +// rvec ??= Mat.empty(); +// tvec ??= Mat.empty(); +// inliers ??= Mat.empty(); +// params ??= UsacParams(); +// final prval = calloc(); +// cvRun( +// () => ccalib3d.cv_solvePnPRansac_1( +// objectPoints.ref, +// imagePoints.ref, +// cameraMatrix.ref, +// distCoeffs.ref, +// rvec!.ref, +// tvec!.ref, +// inliers!.ref, +// params!.ref, +// prval, +// callback, +// ), +// ); +// final rval = prval.value; +// calloc.free(prval); +// return (rval, rvec, tvec, inliers); +// } + +// /// Refine a pose (the translation and the rotation that transform a 3D point expressed in the +// /// object coordinate frame to the camera coordinate frame) from a 3D-2D point correspondences and +// /// starting from an initial solution. +// /// +// /// https://docs.opencv.org/4.11.0/d9/d0c/group__calib3d.html#ga650ba4d286a96d992f82c3e6dfa525fa +// void solvePnPRefineLM( +// InputArray objectPoints, +// InputArray imagePoints, +// InputArray cameraMatrix, +// InputArray distCoeffs, +// InputOutputArray rvec, +// InputOutputArray tvec, { +// TermCriteria? criteria, +// }) { +// // in opencv, this is TermCriteria(TermCriteria::EPS+TermCriteria::COUNT, 20, FLT_EPSILON) +// // FLT_EPSILON depends on the platform, here we use 1e-7 to simplify this. +// // which may get different results on than opencv c++. +// criteria ??= TermCriteria(TERM_EPS + TERM_COUNT, 20, 1e-7); +// return cvRun( +// () => ccalib3d.cv_solvePnPRefineLM( +// objectPoints.ref, +// imagePoints.ref, +// cameraMatrix.ref, +// distCoeffs.ref, +// rvec.ref, +// tvec.ref, +// criteria!.ref, +// callback, +// ), +// ); +// } + +// /// Refine a pose (the translation and the rotation that transform a 3D point expressed in the +// /// object coordinate frame to the camera coordinate frame) from a 3D-2D point correspondences and +// /// starting from an initial solution. +// /// +// /// https://docs.opencv.org/4.11.0/d9/d0c/group__calib3d.html#ga17491c0282e4af874f6206a9166774a5 +// void solvePnPRefineVVS( +// InputArray objectPoints, +// InputArray imagePoints, +// InputArray cameraMatrix, +// InputArray distCoeffs, +// InputOutputArray rvec, +// InputOutputArray tvec, { +// TermCriteria? criteria, +// double VVSlambda = 1.0, +// }) { +// criteria ??= TermCriteria(TERM_EPS + TERM_COUNT, 20, 1e-7); +// return cvRun( +// () => ccalib3d.cv_solvePnPRefineVVS( +// objectPoints.ref, +// imagePoints.ref, +// cameraMatrix.ref, +// distCoeffs.ref, +// rvec.ref, +// tvec.ref, +// criteria!.ref, +// VVSlambda, +// callback, +// ), +// ); +// } + +// Transforms an image to compensate for lens distortion. +// The function transforms an image to compensate radial and tangential lens distortion. +// The function is simply a combination of initUndistortRectifyMap (with unity R ) and remap (with bilinear interpolation). See the former function for details of the transformation being performed. +// Those pixels in the destination image, for which there is no correspondent pixels in the source image, are filled with zeros (black color). +// A particular subset of the source image that will be visible in the corrected image can be regulated by newCameraMatrix. You can use getOptimalNewCameraMatrix to compute the appropriate newCameraMatrix depending on your requirements. +// The camera matrix and the distortion parameters can be determined using calibrateCamera. If the resolution of images is different from the resolution used at the calibration stage, fx,fy,cx and cy need to be scaled accordingly, while the distortion coefficients remain the same. +Future undistortAsync( + InputArray src, + InputArray cameraMatrix, + InputArray distCoeffs, { + OutputArray? dst, + InputArray? newCameraMatrix, +}) async { + dst ??= Mat.empty(); + newCameraMatrix ??= Mat.empty(); + return cvRunAsync0( + (callback) => ccalib3d.cv_undistort( + src.ref, + dst!.ref, + cameraMatrix.ref, + distCoeffs.ref, + newCameraMatrix!.ref, + callback, + ), + (c) => c.complete(dst!), + ); +} + +/// Compute undistorted image points position. +/// +/// https://docs.opencv.org/4.11.0/d9/d0c/group__calib3d.html#ga6327c952253fd43f729c4008c2a45c17 +Future undistortImagePointsAsync( + InputArray src, + InputArray cameraMatrix, + InputArray distCoeffs, { + OutputArray? dst, + TermCriteria? criteria, +}) async { + dst ??= Mat.empty(); + criteria ??= TermCriteria(TERM_MAX_ITER + TERM_EPS, 5, 0.01); + return cvRunAsync0( + (callback) => ccalib3d.cv_undistortImagePoints( + src.ref, + dst!.ref, + cameraMatrix.ref, + distCoeffs.ref, + criteria!.ref, + callback, + ), + (c) => c.complete(dst!), + ); +} + +// UndistortPoints transforms points to compensate for lens distortion +// +// For further details, please see: +// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga55c716492470bfe86b0ee9bf3a1f0f7e +Future undistortPointsAsync( + InputArray src, + InputArray cameraMatrix, + InputArray distCoeffs, { + OutputArray? dst, + InputArray? R, + InputArray? P, + (int type, int count, double eps) criteria = (TERM_COUNT + TERM_EPS, 30, 1e-4), +}) async { + R ??= Mat.empty(); + P ??= Mat.empty(); + dst ??= Mat.empty(); + final tc = criteria.cvd; + return cvRunAsync0( + (callback) => ccalib3d.cv_undistortPoints( + src.ref, + dst!.ref, + cameraMatrix.ref, + distCoeffs.ref, + R!.ref, + P!.ref, + tc.ref, + callback, + ), + (c) => c.complete(dst!), + ); +} + +/// validates disparity using the left-right check. The matrix "cost" should be computed by the +/// stereo correspondence algorithm +/// +/// https://docs.opencv.org/4.11.0/d9/d0c/group__calib3d.html#ga214b498b8d01d0417e0d08be64c54eb5 +Future validateDisparityAsync( + InputOutputArray disparity, + InputArray cost, + int minDisparity, + int numberOfDisparities, { + int disp12MaxDisp = 1, +}) async { + return cvRunAsync0( + (callback) => ccalib3d.cv_validateDisparity( + disparity.ref, + cost.ref, + minDisparity, + numberOfDisparities, + disp12MaxDisp, + callback, + ), + (c) => c.complete(), + ); +} diff --git a/packages/dartcv/lib/src/calib3d/fisheye.dart b/packages/dartcv/lib/src/calib3d/fisheye.dart index c0366859..82e0fedf 100644 --- a/packages/dartcv/lib/src/calib3d/fisheye.dart +++ b/packages/dartcv/lib/src/calib3d/fisheye.dart @@ -6,12 +6,416 @@ library cv.calib3d.fisheye; import 'dart:ffi' as ffi; +import 'package:ffi/ffi.dart'; + import '../core/base.dart'; import '../core/mat.dart'; import '../core/size.dart'; +import '../core/termcriteria.dart'; +import '../g/constants.g.dart'; import '../native_lib.dart' show ccalib3d; +import 'calib3d.dart'; class Fisheye { + /// Performs camera calibration. + /// + /// https://docs.opencv.org/4.11.0/db/d58/group__calib3d__fisheye.html#gad626a78de2b1dae7489e152a5a5a89e1 + static (double rval, VecMat rvecs, VecMat tvecs) calibrate( + VecMat objectPoints, + VecMat imagePoints, + Size imageSize, + InputOutputArray K, + InputOutputArray D, { + VecMat? rvecs, + VecMat? tvecs, + int flags = 0, + TermCriteria? criteria, + }) { + criteria ??= TermCriteria(TERM_COUNT + TERM_EPS, 100, 2.0e-16); + rvecs ??= VecMat(); + tvecs ??= VecMat(); + final prval = calloc(); + cvRun( + () => ccalib3d.cv_fisheye_calibrate( + objectPoints.ref, + imagePoints.ref, + imageSize.ref, + K.ref, + D.ref, + rvecs!.ref, + tvecs!.ref, + flags, + criteria!.ref, + prval, + ffi.nullptr, + ), + ); + final rval = prval.value; + calloc.free(prval); + return (rval, rvecs, tvecs); + } + + /// async version of [calibrate] + static Future<(double rval, VecMat rvecs, VecMat tvecs)> calibrateAsync( + VecMat objectPoints, + VecMat imagePoints, + Size imageSize, + InputOutputArray K, + InputOutputArray D, { + VecMat? rvecs, + VecMat? tvecs, + int flags = 0, + TermCriteria? criteria, + }) async { + criteria ??= TermCriteria(TERM_COUNT + TERM_EPS, 100, 2.0e-16); + rvecs ??= VecMat(); + tvecs ??= VecMat(); + final prval = calloc(); + return cvRunAsync0( + (callback) => ccalib3d.cv_fisheye_calibrate( + objectPoints.ref, + imagePoints.ref, + imageSize.ref, + K.ref, + D.ref, + rvecs!.ref, + tvecs!.ref, + flags, + criteria!.ref, + prval, + callback, + ), (c) { + final rval = prval.value; + calloc.free(prval); + return c.complete((rval, rvecs!, tvecs!)); + }); + } + + /// Distorts 2D points using fisheye model. + /// + /// https://docs.opencv.org/4.11.0/db/d58/group__calib3d__fisheye.html#ga75d8877a98e38d0b29b6892c5f8d7765 + static Mat distortPoints( + InputArray undistorted, + InputArray K, + InputArray D, { + InputOutputArray? Kundistorted, + OutputArray? distorted, + double alpha = 0, + }) { + distorted ??= Mat.empty(); + + cvRun( + () => Kundistorted == null + ? ccalib3d.cv_fisheye_distortPoints( + undistorted.ref, + distorted!.ref, + K.ref, + D.ref, + alpha, + ffi.nullptr, + ) + : ccalib3d.cv_fisheye_distortPoints_1( + undistorted.ref, + distorted!.ref, + Kundistorted.ref, + K.ref, + D.ref, + alpha, + ffi.nullptr, + ), + ); + return distorted; + } + + /// async version of [distortPoints] + static Future distortPointsAsync( + InputArray undistorted, + InputArray K, + InputArray D, { + InputOutputArray? Kundistorted, + OutputArray? distorted, + double alpha = 0, + }) async { + distorted ??= Mat.empty(); + + return cvRunAsync0( + (callback) => Kundistorted == null + ? ccalib3d.cv_fisheye_distortPoints( + undistorted.ref, + distorted!.ref, + K.ref, + D.ref, + alpha, + callback, + ) + : ccalib3d.cv_fisheye_distortPoints_1( + undistorted.ref, + distorted!.ref, + Kundistorted.ref, + K.ref, + D.ref, + alpha, + callback, + ), + (c) => c.complete(distorted), + ); + } + + /// EstimateNewCameraMatrixForUndistortRectify estimates new camera matrix for undistortion or rectification. + /// + /// For further details, please see: + /// https://docs.opencv.org/master/db/d58/group__calib3d__fisheye.html#ga384940fdf04c03e362e94b6eb9b673c9 + static Mat estimateNewCameraMatrixForUndistortRectify( + InputArray K, + InputArray D, + (int, int) imageSize, + InputArray R, { + OutputArray? P, + double balance = 0.0, + (int, int) newSize = (0, 0), + double fovScale = 1.0, + }) { + P ??= Mat.empty(); + cvRun( + () => ccalib3d.cv_fisheye_estimateNewCameraMatrixForUndistortRectify( + K.ref, + D.ref, + imageSize.cvd.ref, + R.ref, + P!.ref, + balance, + newSize.cvd.ref, + fovScale, + ffi.nullptr, + ), + ); + return P; + } + + /// async version of [estimateNewCameraMatrixForUndistortRectify] + static Future estimateNewCameraMatrixForUndistortRectifyAsync( + InputArray K, + InputArray D, + (int, int) imageSize, + InputArray R, { + OutputArray? P, + double balance = 0.0, + (int, int) newSize = (0, 0), + double fovScale = 1.0, + }) async { + P ??= Mat.empty(); + return cvRunAsync0( + (callback) => ccalib3d.cv_fisheye_estimateNewCameraMatrixForUndistortRectify( + K.ref, + D.ref, + imageSize.cvd.ref, + R.ref, + P!.ref, + balance, + newSize.cvd.ref, + fovScale, + callback, + ), + (c) => c.complete(P), + ); + } + + /// Computes undistortion and rectification maps for image transform by remap. If D is empty zero distortion is used, if R or P is empty identity matrixes are used. + /// + /// https://docs.opencv.org/4.11.0/db/d58/group__calib3d__fisheye.html#ga0d37b45f780b32f63ed19c21aa9fd333 + static (Mat map1, Mat map2) initUndistortRectifyMap( + InputArray K, + InputArray D, + InputArray R, + InputArray P, + Size size, + int m1type, { + OutputArray? map1, + OutputArray? map2, + }) { + map1 ??= Mat.empty(); + map2 ??= Mat.empty(); + cvRun( + () => ccalib3d.cv_fisheye_initUndistortRectifyMap( + K.ref, + D.ref, + R.ref, + P.ref, + size.ref, + m1type, + map1!.ref, + map2!.ref, + ffi.nullptr, + ), + ); + return (map1, map2); + } + + /// async version of [initUndistortRectifyMap] + static Future<(Mat map1, Mat map2)> initUndistortRectifyMapAsync( + InputArray K, + InputArray D, + InputArray R, + InputArray P, + Size size, + int m1type, { + OutputArray? map1, + OutputArray? map2, + }) async { + map1 ??= Mat.empty(); + map2 ??= Mat.empty(); + return cvRunAsync0( + (callback) => ccalib3d.cv_fisheye_initUndistortRectifyMap( + K.ref, + D.ref, + R.ref, + P.ref, + size.ref, + m1type, + map1!.ref, + map2!.ref, + callback, + ), + (c) => c.complete((map1!, map2!)), + ); + } + + /// Projects points using fisheye model. + /// + /// https://docs.opencv.org/4.11.0/db/d58/group__calib3d__fisheye.html#gab1ad1dc30c42ee1a50ce570019baf2c4 + static (Mat imagePoints, Mat jacobian) projectPoints( + InputArray objectPoints, + InputArray rvec, + InputArray tvec, + InputArray K, + InputArray D, { + OutputArray? imagePoints, + double alpha = 0, + OutputArray? jacobian, + }) { + imagePoints ??= Mat.empty(); + jacobian ??= Mat.empty(); + cvRun( + () => ccalib3d.cv_fisheye_projectPoints( + objectPoints.ref, + imagePoints!.ref, + rvec.ref, + tvec.ref, + K.ref, + D.ref, + alpha, + jacobian!.ref, + ffi.nullptr, + ), + ); + return (imagePoints, jacobian); + } + + /// async version of [projectPoints] + static Future<(Mat imagePoints, Mat jacobian)> projectPointsAsync( + InputArray objectPoints, + InputArray rvec, + InputArray tvec, + InputArray K, + InputArray D, { + OutputArray? imagePoints, + double alpha = 0, + OutputArray? jacobian, + }) async { + imagePoints ??= Mat.empty(); + jacobian ??= Mat.empty(); + return cvRunAsync0( + (callback) => ccalib3d.cv_fisheye_projectPoints( + objectPoints.ref, + imagePoints!.ref, + rvec.ref, + tvec.ref, + K.ref, + D.ref, + alpha, + jacobian!.ref, + callback, + ), + (c) => c.complete((imagePoints!, jacobian!)), + ); + } + + /// Finds an object pose from 3D-2D point correspondences for fisheye camera moodel. + /// + /// https://docs.opencv.org/4.11.0/db/d58/group__calib3d__fisheye.html#gab1ad1dc30c42ee1a50ce570019baf2c4 + static (bool rval, Mat rvec, Mat tvec) solvePnP( + InputArray objectPoints, + InputArray imagePoints, + InputArray cameraMatrix, + InputArray distCoeffs, { + OutputArray? rvec, + OutputArray? tvec, + bool useExtrinsicGuess = false, + int flags = SOLVEPNP_ITERATIVE, + TermCriteria? criteria, + }) { + criteria ??= TermCriteria(TERM_MAX_ITER + TERM_EPS, 10, 1e-8); + rvec ??= Mat.empty(); + tvec ??= Mat.empty(); + final prval = calloc(); + cvRun( + () => ccalib3d.cv_fisheye_solvePnP( + objectPoints.ref, + imagePoints.ref, + cameraMatrix.ref, + distCoeffs.ref, + rvec!.ref, + tvec!.ref, + useExtrinsicGuess, + flags, + criteria!.ref, + prval, + ffi.nullptr, + ), + ); + final rval = prval.value; + calloc.free(prval); + return (rval, rvec, tvec); + } + + /// async version of [solvePnP] + static Future<(bool rval, Mat rvec, Mat tvec)> solvePnPAsync( + InputArray objectPoints, + InputArray imagePoints, + InputArray cameraMatrix, + InputArray distCoeffs, { + OutputArray? rvec, + OutputArray? tvec, + bool useExtrinsicGuess = false, + int flags = SOLVEPNP_ITERATIVE, + TermCriteria? criteria, + }) async { + criteria ??= TermCriteria(TERM_MAX_ITER + TERM_EPS, 10, 1e-8); + rvec ??= Mat.empty(); + tvec ??= Mat.empty(); + final prval = calloc(); + return cvRunAsync0( + (callback) => ccalib3d.cv_fisheye_solvePnP( + objectPoints.ref, + imagePoints.ref, + cameraMatrix.ref, + distCoeffs.ref, + rvec!.ref, + tvec!.ref, + useExtrinsicGuess, + flags, + criteria!.ref, + prval, + callback, + ), (c) { + final rval = prval.value; + calloc.free(prval); + return c.complete((rval, rvec!, tvec!)); + }); + } + + /// void distortPoints (InputArray undistorted, InputArray Kundistorted, InputArray K, InputArray D, OutputArray distorted, double alpha=0) + /// /// FisheyeUndistortImage transforms an image to compensate for fisheye lens distortion /// https://docs.opencv.org/3.4/db/d58/group__calib3d__fisheye.html#ga167df4b00a6fd55287ba829fbf9913b9 static Mat undistortImage( @@ -117,63 +521,4 @@ class Fisheye { (c) => c.complete(undistorted), ); } - - /// EstimateNewCameraMatrixForUndistortRectify estimates new camera matrix for undistortion or rectification. - /// - /// For further details, please see: - /// https://docs.opencv.org/master/db/d58/group__calib3d__fisheye.html#ga384940fdf04c03e362e94b6eb9b673c9 - static Mat estimateNewCameraMatrixForUndistortRectify( - InputArray K, - InputArray D, - (int, int) imageSize, - InputArray R, { - OutputArray? P, - double balance = 0.0, - (int, int) newSize = (0, 0), - double fovScale = 1.0, - }) { - P ??= Mat.empty(); - cvRun( - () => ccalib3d.cv_fisheye_estimateNewCameraMatrixForUndistortRectify( - K.ref, - D.ref, - imageSize.cvd.ref, - R.ref, - P!.ref, - balance, - newSize.cvd.ref, - fovScale, - ffi.nullptr, - ), - ); - return P; - } - - /// async version of [estimateNewCameraMatrixForUndistortRectify] - static Future estimateNewCameraMatrixForUndistortRectifyAsync( - InputArray K, - InputArray D, - (int, int) imageSize, - InputArray R, { - OutputArray? P, - double balance = 0.0, - (int, int) newSize = (0, 0), - double fovScale = 1.0, - }) async { - P ??= Mat.empty(); - return cvRunAsync0( - (callback) => ccalib3d.cv_fisheye_estimateNewCameraMatrixForUndistortRectify( - K.ref, - D.ref, - imageSize.cvd.ref, - R.ref, - P!.ref, - balance, - newSize.cvd.ref, - fovScale, - callback, - ), - (c) => c.complete(P), - ); - } } diff --git a/packages/dartcv/lib/src/calib3d/usac_params.dart b/packages/dartcv/lib/src/calib3d/usac_params.dart new file mode 100644 index 00000000..da3ed97f --- /dev/null +++ b/packages/dartcv/lib/src/calib3d/usac_params.dart @@ -0,0 +1,132 @@ +import 'dart:ffi' as ffi; + +import 'package:ffi/ffi.dart'; + +import '../core/base.dart'; +import '../g/types.g.dart' as cvg; + +class UsacParams extends CvStruct { + UsacParams.fromPointer(ffi.Pointer ptr, [bool attach = true]) : super.fromPointer(ptr) { + if (attach) { + finalizer.attach(this, ptr.cast(), detach: this); + } + } + factory UsacParams({ + double confidence = 0, + bool isParallel = false, + int loIterations = 0, + int loMethod = LOCAL_OPTIM_NULL, + int loSampleSize = 0, + int maxIterations = 0, + int neighborsSearch = NEIGH_FLANN_KNN, + int randomGeneratorState = 0, + int sampler = SAMPLING_UNIFORM, + int score = SCORE_METHOD_RANSAC, + double threshold = 0, + int finalPolisher = NONE_POLISHER, + int finalPolisherIterations = 0, + }) { + final p = calloc() + ..ref.confidence = confidence + ..ref.isParallel = isParallel + ..ref.loIterations = loIterations + ..ref.loMethod = loMethod + ..ref.loSampleSize = loSampleSize + ..ref.maxIterations = maxIterations + ..ref.neighborsSearch = neighborsSearch + ..ref.randomGeneratorState = randomGeneratorState + ..ref.sampler = sampler + ..ref.score = score + ..ref.threshold = threshold + ..ref.final_polisher = finalPolisher + ..ref.final_polisher_iterations = finalPolisherIterations; + return UsacParams.fromPointer(p); + } + + static final finalizer = ffi.NativeFinalizer(calloc.nativeFree); + + @override + cvg.UsacParams get ref => ptr.ref; + + /// double confidence; + double get confidence => ref.confidence; + set confidence(double value) => ref.confidence = value; + + /// bool isParallel; + bool get isParallel => ref.isParallel; + set isParallel(bool value) => ref.isParallel = value; + + /// int loIterations; + int get loIterations => ref.loIterations; + set loIterations(int value) => ref.loIterations = value; + + /// int loMethod; + int get loMethod => ref.loMethod; + set loMethod(int value) => ref.loMethod = value; + + /// int loSampleSize; + int get loSampleSize => ref.loSampleSize; + set loSampleSize(int value) => ref.loSampleSize = value; + + /// int maxIterations; + int get maxIterations => ref.maxIterations; + set maxIterations(int value) => ref.maxIterations = value; + + /// int neighborsSearch; + int get neighborsSearch => ref.neighborsSearch; + set neighborsSearch(int value) => ref.neighborsSearch = value; + + /// int randomGeneratorState; + int get randomGeneratorState => ref.randomGeneratorState; + set randomGeneratorState(int value) => ref.randomGeneratorState = value; + + /// int sampler; + int get sampler => ref.sampler; + set sampler(int value) => ref.sampler = value; + + /// int score; + int get score => ref.score; + set score(int value) => ref.score = value; + + /// double threshold; + double get threshold => ref.threshold; + set threshold(double value) => ref.threshold = value; + + /// int finalPolisher; + int get finalPolisher => ref.final_polisher; + set finalPolisher(int value) => ref.final_polisher = value; + + /// int finalPolisherIterations; + int get finalPolisherIterations => ref.final_polisher_iterations; + set finalPolisherIterations(int value) => ref.final_polisher_iterations = value; +} + +// enum SamplingMethod { SAMPLING_UNIFORM=0, SAMPLING_PROGRESSIVE_NAPSAC=1, SAMPLING_NAPSAC=2, SAMPLING_PROSAC=3 }; +const int SAMPLING_UNIFORM = 0; +const int SAMPLING_PROGRESSIVE_NAPSAC = 1; +const int SAMPLING_NAPSAC = 2; +const int SAMPLING_PROSAC = 3; + +// enum LocalOptimMethod {LOCAL_OPTIM_NULL=0, LOCAL_OPTIM_INNER_LO=1, LOCAL_OPTIM_INNER_AND_ITER_LO=2, LOCAL_OPTIM_GC=3, LOCAL_OPTIM_SIGMA=4}; +const int LOCAL_OPTIM_NULL = 0; +const int LOCAL_OPTIM_INNER_LO = 1; +const int LOCAL_OPTIM_INNER_AND_ITER_LO = 2; +const int LOCAL_OPTIM_GC = 3; +const int LOCAL_OPTIM_SIGMA = 4; + +// enum ScoreMethod {SCORE_METHOD_RANSAC=0, SCORE_METHOD_MSAC=1, SCORE_METHOD_MAGSAC=2, SCORE_METHOD_LMEDS=3}; +const int SCORE_METHOD_RANSAC = 0; +const int SCORE_METHOD_MSAC = 1; +const int SCORE_METHOD_MAGSAC = 2; +const int SCORE_METHOD_LMEDS = 3; + +// enum NeighborSearchMethod { NEIGH_FLANN_KNN=0, NEIGH_GRID=1, NEIGH_FLANN_RADIUS=2 }; +const int NEIGH_FLANN_KNN = 0; +const int NEIGH_GRID = 1; +const int NEIGH_FLANN_RADIUS = 2; + +// enum PolishingMethod { NONE_POLISHER=0, LSQ_POLISHER=1, MAGSAC=2, COV_POLISHER=3 }; +const int NONE_POLISHER = 0; +const int LSQ_POLISHER = 1; +const int MAGSAC = 2; +const int COV_POLISHER = 3; diff --git a/packages/dartcv/lib/src/core/point.dart b/packages/dartcv/lib/src/core/point.dart index 8ced670c..c5daae00 100644 --- a/packages/dartcv/lib/src/core/point.dart +++ b/packages/dartcv/lib/src/core/point.dart @@ -82,6 +82,41 @@ class Point2f extends CvStruct { List get props => [x, y]; } +class Point2d extends CvStruct { + Point2d.fromPointer(ffi.Pointer ptr, [bool attach = true]) : super.fromPointer(ptr) { + if (attach) { + finalizer.attach(this, ptr.cast(), detach: this); + } + } + factory Point2d(double x, double y) { + final ptr = calloc() + ..ref.x = x + ..ref.y = y; + return Point2d.fromPointer(ptr); + } + factory Point2d.fromNative(cvg.CvPoint2d p) => Point2d(p.x, p.y); + + static final finalizer = ffi.NativeFinalizer(calloc.nativeFree); + + void dispose() { + finalizer.detach(this); + calloc.free(ptr); + } + + double get x => ptr.ref.x; + set x(double value) => ref.x = value; + + double get y => ptr.ref.y; + set y(double value) => ref.y = value; + + @override + cvg.CvPoint2d get ref => ptr.ref; + @override + String toString() => 'Point2d(${x.toStringAsFixed(3)}, ${y.toStringAsFixed(3)})'; + @override + List get props => [x, y]; +} + class Point3f extends CvStruct { Point3f.fromPointer(ffi.Pointer ptr, [bool attach = true]) : super.fromPointer(ptr) { if (attach) { diff --git a/packages/dartcv/lib/src/g/calib3d.g.dart b/packages/dartcv/lib/src/g/calib3d.g.dart index 28a78320..d87d5dc1 100644 --- a/packages/dartcv/lib/src/g/calib3d.g.dart +++ b/packages/dartcv/lib/src/g/calib3d.g.dart @@ -30,6 +30,72 @@ class CvNativeCalib3d { lookup) : _lookup = lookup; + ffi.Pointer cv_RQDecomp3x3( + imp1.MatIn src, + imp1.MatOut mtxR, + imp1.MatOut mtxQ, + imp1.MatOut Qx, + imp1.MatOut Qy, + imp1.MatOut Qz, + ffi.Pointer rval, + imp1.CvCallback_0 callback, + ) { + return _cv_RQDecomp3x3( + src, + mtxR, + mtxQ, + Qx, + Qy, + Qz, + rval, + callback, + ); + } + + late final _cv_RQDecomp3x3Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatOut, + imp1.MatOut, + imp1.MatOut, + imp1.MatOut, + imp1.MatOut, + ffi.Pointer, + imp1.CvCallback_0)>>('cv_RQDecomp3x3'); + late final _cv_RQDecomp3x3 = _cv_RQDecomp3x3Ptr.asFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatOut, + imp1.MatOut, + imp1.MatOut, + imp1.MatOut, + imp1.MatOut, + ffi.Pointer, + imp1.CvCallback_0)>(); + + ffi.Pointer cv_Rodrigues( + imp1.MatIn src, + imp1.MatOut dst, + imp1.MatOut jacobian, + imp1.CvCallback_0 callback, + ) { + return _cv_Rodrigues( + src, + dst, + jacobian, + callback, + ); + } + + late final _cv_RodriguesPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(imp1.MatIn, imp1.MatOut, imp1.MatOut, + imp1.CvCallback_0)>>('cv_Rodrigues'); + late final _cv_Rodrigues = _cv_RodriguesPtr.asFunction< + ffi.Pointer Function( + imp1.MatIn, imp1.MatOut, imp1.MatOut, imp1.CvCallback_0)>(); + ffi.Pointer cv_calibrateCamera( VecVecPoint3f objectPoints, VecVecPoint2f imagePoints, @@ -86,6 +152,250 @@ class CvNativeCalib3d { ffi.Pointer, imp1.CvCallback_0)>(); + bool cv_checkChessboard( + Mat img, + CvSize size, + ) { + return _cv_checkChessboard( + img, + size, + ); + } + + late final _cv_checkChessboardPtr = + _lookup>( + 'cv_checkChessboard'); + late final _cv_checkChessboard = + _cv_checkChessboardPtr.asFunction(); + + ffi.Pointer cv_computeCorrespondEpilines( + imp1.MatIn src, + int whichImage, + imp1.MatIn F, + imp1.MatOut lines, + imp1.CvCallback_0 callback, + ) { + return _cv_computeCorrespondEpilines( + src, + whichImage, + F, + lines, + callback, + ); + } + + late final _cv_computeCorrespondEpilinesPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(imp1.MatIn, ffi.Int, imp1.MatIn, + imp1.MatOut, imp1.CvCallback_0)>>('cv_computeCorrespondEpilines'); + late final _cv_computeCorrespondEpilines = + _cv_computeCorrespondEpilinesPtr.asFunction< + ffi.Pointer Function( + imp1.MatIn, int, imp1.MatIn, imp1.MatOut, imp1.CvCallback_0)>(); + + ffi.Pointer cv_convertPointsFromHomogeneous( + imp1.MatIn src, + imp1.MatOut dst, + imp1.CvCallback_0 callback, + ) { + return _cv_convertPointsFromHomogeneous( + src, + dst, + callback, + ); + } + + late final _cv_convertPointsFromHomogeneousPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(imp1.MatIn, imp1.MatOut, + imp1.CvCallback_0)>>('cv_convertPointsFromHomogeneous'); + late final _cv_convertPointsFromHomogeneous = + _cv_convertPointsFromHomogeneousPtr.asFunction< + ffi.Pointer Function( + imp1.MatIn, imp1.MatOut, imp1.CvCallback_0)>(); + + ffi.Pointer cv_convertPointsHomogeneous( + imp1.MatIn src, + imp1.MatOut dst, + imp1.CvCallback_0 callback, + ) { + return _cv_convertPointsHomogeneous( + src, + dst, + callback, + ); + } + + late final _cv_convertPointsHomogeneousPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(imp1.MatIn, imp1.MatOut, + imp1.CvCallback_0)>>('cv_convertPointsHomogeneous'); + late final _cv_convertPointsHomogeneous = + _cv_convertPointsHomogeneousPtr.asFunction< + ffi.Pointer Function( + imp1.MatIn, imp1.MatOut, imp1.CvCallback_0)>(); + + ffi.Pointer cv_convertPointsToHomogeneous( + imp1.MatIn src, + imp1.MatOut dst, + imp1.CvCallback_0 callback, + ) { + return _cv_convertPointsToHomogeneous( + src, + dst, + callback, + ); + } + + late final _cv_convertPointsToHomogeneousPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(imp1.MatIn, imp1.MatOut, + imp1.CvCallback_0)>>('cv_convertPointsToHomogeneous'); + late final _cv_convertPointsToHomogeneous = + _cv_convertPointsToHomogeneousPtr.asFunction< + ffi.Pointer Function( + imp1.MatIn, imp1.MatOut, imp1.CvCallback_0)>(); + + ffi.Pointer cv_correctMatches( + imp1.MatIn F, + imp1.MatIn points1, + imp1.MatIn points2, + imp1.MatOut newPoints1, + imp1.MatOut newPoints2, + imp1.CvCallback_0 callback, + ) { + return _cv_correctMatches( + F, + points1, + points2, + newPoints1, + newPoints2, + callback, + ); + } + + late final _cv_correctMatchesPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatOut, + imp1.MatOut, + imp1.CvCallback_0)>>('cv_correctMatches'); + late final _cv_correctMatches = _cv_correctMatchesPtr.asFunction< + ffi.Pointer Function(imp1.MatIn, imp1.MatIn, imp1.MatIn, + imp1.MatOut, imp1.MatOut, imp1.CvCallback_0)>(); + + ffi.Pointer cv_decomposeEssentialMat( + imp1.MatIn E, + imp1.MatOut R1, + imp1.MatOut R2, + imp1.MatOut t, + imp1.CvCallback_0 callback, + ) { + return _cv_decomposeEssentialMat( + E, + R1, + R2, + t, + callback, + ); + } + + late final _cv_decomposeEssentialMatPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(imp1.MatIn, imp1.MatOut, imp1.MatOut, + imp1.MatOut, imp1.CvCallback_0)>>('cv_decomposeEssentialMat'); + late final _cv_decomposeEssentialMat = + _cv_decomposeEssentialMatPtr.asFunction< + ffi.Pointer Function(imp1.MatIn, imp1.MatOut, imp1.MatOut, + imp1.MatOut, imp1.CvCallback_0)>(); + + ffi.Pointer cv_decomposeHomographyMat( + imp1.MatIn H, + imp1.MatIn K, + VecMat rotations, + VecMat translations, + VecMat normals, + ffi.Pointer rval, + imp1.CvCallback_0 callback, + ) { + return _cv_decomposeHomographyMat( + H, + K, + rotations, + translations, + normals, + rval, + callback, + ); + } + + late final _cv_decomposeHomographyMatPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + VecMat, + VecMat, + VecMat, + ffi.Pointer, + imp1.CvCallback_0)>>('cv_decomposeHomographyMat'); + late final _cv_decomposeHomographyMat = + _cv_decomposeHomographyMatPtr.asFunction< + ffi.Pointer Function(imp1.MatIn, imp1.MatIn, VecMat, VecMat, + VecMat, ffi.Pointer, imp1.CvCallback_0)>(); + + ffi.Pointer cv_decomposeProjectionMatrix( + imp1.MatIn projMatrix, + imp1.MatOut cameraMatrix, + imp1.MatOut rotMatrix, + imp1.MatOut transVect, + imp1.MatOut rotMatrixX, + imp1.MatOut rotMatrixY, + imp1.MatOut rotMatrixZ, + imp1.MatOut eulerAngles, + imp1.CvCallback_0 callback, + ) { + return _cv_decomposeProjectionMatrix( + projMatrix, + cameraMatrix, + rotMatrix, + transVect, + rotMatrixX, + rotMatrixY, + rotMatrixZ, + eulerAngles, + callback, + ); + } + + late final _cv_decomposeProjectionMatrixPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatOut, + imp1.MatOut, + imp1.MatOut, + imp1.MatOut, + imp1.MatOut, + imp1.MatOut, + imp1.MatOut, + imp1.CvCallback_0)>>('cv_decomposeProjectionMatrix'); + late final _cv_decomposeProjectionMatrix = + _cv_decomposeProjectionMatrixPtr.asFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatOut, + imp1.MatOut, + imp1.MatOut, + imp1.MatOut, + imp1.MatOut, + imp1.MatOut, + imp1.MatOut, + imp1.CvCallback_0)>(); + ffi.Pointer cv_drawChessboardCorners( Mat image, CvSize patternSize, @@ -111,6 +421,43 @@ class CvNativeCalib3d { ffi.Pointer Function( Mat, CvSize, VecPoint2f, bool, imp1.CvCallback_0)>(); + ffi.Pointer cv_drawFrameAxes( + imp1.MatInOut image, + imp1.MatIn cameraMatrix, + imp1.MatIn distCoeffs, + imp1.MatIn rvec, + imp1.MatIn tvec, + double length, + int thickness, + imp1.CvCallback_0 callback, + ) { + return _cv_drawFrameAxes( + image, + cameraMatrix, + distCoeffs, + rvec, + tvec, + length, + thickness, + callback, + ); + } + + late final _cv_drawFrameAxesPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + imp1.MatInOut, + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + ffi.Float, + ffi.Int, + imp1.CvCallback_0)>>('cv_drawFrameAxes'); + late final _cv_drawFrameAxes = _cv_drawFrameAxesPtr.asFunction< + ffi.Pointer Function(imp1.MatInOut, imp1.MatIn, imp1.MatIn, + imp1.MatIn, imp1.MatIn, double, int, imp1.CvCallback_0)>(); + ffi.Pointer cv_estimateAffine2D( VecPoint2f from, VecPoint2f to, @@ -176,6 +523,74 @@ class CvNativeCalib3d { ffi.Pointer Function(VecPoint2f, VecPoint2f, Mat, int, double, int, double, int, ffi.Pointer, imp1.CvCallback_0)>(); + ffi.Pointer cv_estimateAffine3D( + Mat src, + Mat dst, + ffi.Pointer scale, + bool force_rotation, + ffi.Pointer rval, + imp1.CvCallback_0 callback, + ) { + return _cv_estimateAffine3D( + src, + dst, + scale, + force_rotation, + rval, + callback, + ); + } + + late final _cv_estimateAffine3DPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + Mat, + Mat, + ffi.Pointer, + ffi.Bool, + ffi.Pointer, + imp1.CvCallback_0)>>('cv_estimateAffine3D'); + late final _cv_estimateAffine3D = _cv_estimateAffine3DPtr.asFunction< + ffi.Pointer Function(Mat, Mat, ffi.Pointer, bool, + ffi.Pointer, imp1.CvCallback_0)>(); + + ffi.Pointer cv_estimateAffine3D_1( + Mat src, + Mat dst, + Mat out, + Mat inliers, + double ransacThreshold, + double confidence, + ffi.Pointer rval, + imp1.CvCallback_0 callback, + ) { + return _cv_estimateAffine3D_1( + src, + dst, + out, + inliers, + ransacThreshold, + confidence, + rval, + callback, + ); + } + + late final _cv_estimateAffine3D_1Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + Mat, + Mat, + Mat, + Mat, + ffi.Double, + ffi.Double, + ffi.Pointer, + imp1.CvCallback_0)>>('cv_estimateAffine3D_1'); + late final _cv_estimateAffine3D_1 = _cv_estimateAffine3D_1Ptr.asFunction< + ffi.Pointer Function(Mat, Mat, Mat, Mat, double, double, + ffi.Pointer, imp1.CvCallback_0)>(); + ffi.Pointer cv_estimateAffinePartial2D( VecPoint2f from, VecPoint2f to, @@ -246,6 +661,179 @@ class CvNativeCalib3d { ffi.Pointer Function(VecPoint2f, VecPoint2f, Mat, int, double, int, double, int, ffi.Pointer, imp1.CvCallback_0)>(); + ffi.Pointer cv_estimateChessboardSharpness( + imp1.MatIn image, + CvSize patternSize, + imp1.MatIn corners, + double rise_distance, + bool vertical, + imp1.MatOut sharpness, + ffi.Pointer rval, + imp1.CvCallback_0 callback, + ) { + return _cv_estimateChessboardSharpness( + image, + patternSize, + corners, + rise_distance, + vertical, + sharpness, + rval, + callback, + ); + } + + late final _cv_estimateChessboardSharpnessPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + imp1.MatIn, + CvSize, + imp1.MatIn, + ffi.Float, + ffi.Bool, + imp1.MatOut, + ffi.Pointer, + imp1.CvCallback_0)>>('cv_estimateChessboardSharpness'); + late final _cv_estimateChessboardSharpness = + _cv_estimateChessboardSharpnessPtr.asFunction< + ffi.Pointer Function(imp1.MatIn, CvSize, imp1.MatIn, double, + bool, imp1.MatOut, ffi.Pointer, imp1.CvCallback_0)>(); + + ffi.Pointer cv_estimateTranslation3D( + imp1.MatIn src, + imp1.MatIn dst, + imp1.MatOut out, + imp1.MatOut inliers, + double ransacThreshold, + double confidence, + ffi.Pointer rval, + imp1.CvCallback_0 callback, + ) { + return _cv_estimateTranslation3D( + src, + dst, + out, + inliers, + ransacThreshold, + confidence, + rval, + callback, + ); + } + + late final _cv_estimateTranslation3DPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + imp1.MatOut, + imp1.MatOut, + ffi.Double, + ffi.Double, + ffi.Pointer, + imp1.CvCallback_0)>>('cv_estimateTranslation3D'); + late final _cv_estimateTranslation3D = + _cv_estimateTranslation3DPtr.asFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + imp1.MatOut, + imp1.MatOut, + double, + double, + ffi.Pointer, + imp1.CvCallback_0)>(); + + ffi.Pointer cv_filterHomographyDecompByVisibleRefpoints( + VecMat rotations, + VecMat normals, + imp1.MatIn beforePoints, + imp1.MatIn afterPoints, + imp1.MatOut possibleSolutions, + imp1.MatIn pointsMask, + imp1.CvCallback_0 callback, + ) { + return _cv_filterHomographyDecompByVisibleRefpoints( + rotations, + normals, + beforePoints, + afterPoints, + possibleSolutions, + pointsMask, + callback, + ); + } + + late final _cv_filterHomographyDecompByVisibleRefpointsPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(VecMat, VecMat, imp1.MatIn, + imp1.MatIn, imp1.MatOut, imp1.MatIn, imp1.CvCallback_0)>>( + 'cv_filterHomographyDecompByVisibleRefpoints'); + late final _cv_filterHomographyDecompByVisibleRefpoints = + _cv_filterHomographyDecompByVisibleRefpointsPtr.asFunction< + ffi.Pointer Function(VecMat, VecMat, imp1.MatIn, imp1.MatIn, + imp1.MatOut, imp1.MatIn, imp1.CvCallback_0)>(); + + ffi.Pointer cv_filterSpeckles( + imp1.MatInOut img, + double newVal, + int maxSpeckleSize, + double maxDiff, + imp1.MatInOut buf, + imp1.CvCallback_0 callback, + ) { + return _cv_filterSpeckles( + img, + newVal, + maxSpeckleSize, + maxDiff, + buf, + callback, + ); + } + + late final _cv_filterSpecklesPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + imp1.MatInOut, + ffi.Double, + ffi.Int, + ffi.Double, + imp1.MatInOut, + imp1.CvCallback_0)>>('cv_filterSpeckles'); + late final _cv_filterSpeckles = _cv_filterSpecklesPtr.asFunction< + ffi.Pointer Function(imp1.MatInOut, double, int, double, + imp1.MatInOut, imp1.CvCallback_0)>(); + + ffi.Pointer cv_find4QuadCornerSubpix( + imp1.MatIn img, + imp1.MatInOut corners, + CvSize region_size, + ffi.Pointer rval, + imp1.CvCallback_0 callback, + ) { + return _cv_find4QuadCornerSubpix( + img, + corners, + region_size, + rval, + callback, + ); + } + + late final _cv_find4QuadCornerSubpixPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatInOut, + CvSize, + ffi.Pointer, + imp1.CvCallback_0)>>('cv_find4QuadCornerSubpix'); + late final _cv_find4QuadCornerSubpix = + _cv_find4QuadCornerSubpixPtr.asFunction< + ffi.Pointer Function(imp1.MatIn, imp1.MatInOut, CvSize, + ffi.Pointer, imp1.CvCallback_0)>(); + ffi.Pointer cv_findChessboardCorners( Mat image, CvSize patternSize, @@ -345,21 +933,268 @@ class CvNativeCalib3d { ffi.Pointer Function(Mat, CvSize, ffi.Pointer, int, Mat, ffi.Pointer, imp1.CvCallback_0)>(); - ffi.Pointer cv_findHomography( - Mat src, - Mat dst, - int method, - double ransacReprojThreshold, - Mat mask, - int maxIters, - double confidence, - ffi.Pointer rval, + ffi.Pointer cv_findCirclesGrid( + imp1.MatIn image, + CvSize patternSize, + imp1.MatOut centers, + int flags, + ffi.Pointer rval, imp1.CvCallback_0 callback, ) { - return _cv_findHomography( - src, - dst, - method, + return _cv_findCirclesGrid( + image, + patternSize, + centers, + flags, + rval, + callback, + ); + } + + late final _cv_findCirclesGridPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + imp1.MatIn, + CvSize, + imp1.MatOut, + ffi.Int, + ffi.Pointer, + imp1.CvCallback_0)>>('cv_findCirclesGrid'); + late final _cv_findCirclesGrid = _cv_findCirclesGridPtr.asFunction< + ffi.Pointer Function(imp1.MatIn, CvSize, imp1.MatOut, int, + ffi.Pointer, imp1.CvCallback_0)>(); + + ffi.Pointer cv_findEssentialMat( + imp1.MatIn points1, + imp1.MatIn points2, + double focal, + CvPoint2d pp, + int method, + double prob, + double threshold, + int maxIters, + imp1.MatOut mask, + ffi.Pointer rval, + imp1.CvCallback_0 callback, + ) { + return _cv_findEssentialMat( + points1, + points2, + focal, + pp, + method, + prob, + threshold, + maxIters, + mask, + rval, + callback, + ); + } + + late final _cv_findEssentialMatPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + ffi.Double, + CvPoint2d, + ffi.Int, + ffi.Double, + ffi.Double, + ffi.Int, + imp1.MatOut, + ffi.Pointer, + imp1.CvCallback_0)>>('cv_findEssentialMat'); + late final _cv_findEssentialMat = _cv_findEssentialMatPtr.asFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + double, + CvPoint2d, + int, + double, + double, + int, + imp1.MatOut, + ffi.Pointer, + imp1.CvCallback_0)>(); + + ffi.Pointer cv_findEssentialMat_1( + imp1.MatIn points1, + imp1.MatIn points2, + imp1.MatIn cameraMatrix, + int method, + double prob, + double threshold, + int maxIters, + imp1.MatOut mask, + ffi.Pointer rval, + imp1.CvCallback_0 callback, + ) { + return _cv_findEssentialMat_1( + points1, + points2, + cameraMatrix, + method, + prob, + threshold, + maxIters, + mask, + rval, + callback, + ); + } + + late final _cv_findEssentialMat_1Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + ffi.Int, + ffi.Double, + ffi.Double, + ffi.Int, + imp1.MatOut, + ffi.Pointer, + imp1.CvCallback_0)>>('cv_findEssentialMat_1'); + late final _cv_findEssentialMat_1 = _cv_findEssentialMat_1Ptr.asFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + int, + double, + double, + int, + imp1.MatOut, + ffi.Pointer, + imp1.CvCallback_0)>(); + + ffi.Pointer cv_findFundamentalMat( + imp1.MatIn points1, + imp1.MatIn points2, + int method, + double ransacReprojThreshold, + double confidence, + int maxIters, + imp1.MatOut mask, + ffi.Pointer rval, + imp1.CvCallback_0 callback, + ) { + return _cv_findFundamentalMat( + points1, + points2, + method, + ransacReprojThreshold, + confidence, + maxIters, + mask, + rval, + callback, + ); + } + + late final _cv_findFundamentalMatPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + ffi.Int, + ffi.Double, + ffi.Double, + ffi.Int, + imp1.MatOut, + ffi.Pointer, + imp1.CvCallback_0)>>('cv_findFundamentalMat'); + late final _cv_findFundamentalMat = _cv_findFundamentalMatPtr.asFunction< + ffi.Pointer Function(imp1.MatIn, imp1.MatIn, int, double, + double, int, imp1.MatOut, ffi.Pointer, imp1.CvCallback_0)>(); + + ffi.Pointer cv_findFundamentalMat_1( + imp1.MatIn points1, + imp1.MatIn points2, + imp1.MatOut mask, + UsacParams params, + ffi.Pointer rval, + imp1.CvCallback_0 callback, + ) { + return _cv_findFundamentalMat_1( + points1, + points2, + mask, + params, + rval, + callback, + ); + } + + late final _cv_findFundamentalMat_1Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + imp1.MatOut, + UsacParams, + ffi.Pointer, + imp1.CvCallback_0)>>('cv_findFundamentalMat_1'); + late final _cv_findFundamentalMat_1 = _cv_findFundamentalMat_1Ptr.asFunction< + ffi.Pointer Function(imp1.MatIn, imp1.MatIn, imp1.MatOut, + UsacParams, ffi.Pointer, imp1.CvCallback_0)>(); + + ffi.Pointer cv_findFundamentalMat_2( + imp1.MatIn points1, + imp1.MatIn points2, + imp1.MatOut mask, + int method, + double ransacReprojThreshold, + double confidence, + ffi.Pointer rval, + imp1.CvCallback_0 callback, + ) { + return _cv_findFundamentalMat_2( + points1, + points2, + mask, + method, + ransacReprojThreshold, + confidence, + rval, + callback, + ); + } + + late final _cv_findFundamentalMat_2Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + imp1.MatOut, + ffi.Int, + ffi.Double, + ffi.Double, + ffi.Pointer, + imp1.CvCallback_0)>>('cv_findFundamentalMat_2'); + late final _cv_findFundamentalMat_2 = _cv_findFundamentalMat_2Ptr.asFunction< + ffi.Pointer Function(imp1.MatIn, imp1.MatIn, imp1.MatOut, int, + double, double, ffi.Pointer, imp1.CvCallback_0)>(); + + ffi.Pointer cv_findHomography( + Mat src, + Mat dst, + int method, + double ransacReprojThreshold, + imp1.MatOut mask, + int maxIters, + double confidence, + ffi.Pointer rval, + imp1.CvCallback_0 callback, + ) { + return _cv_findHomography( + src, + dst, + method, ransacReprojThreshold, mask, maxIters, @@ -369,221 +1204,1452 @@ class CvNativeCalib3d { ); } - late final _cv_findHomographyPtr = _lookup< + late final _cv_findHomographyPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + Mat, + Mat, + ffi.Int, + ffi.Double, + imp1.MatOut, + ffi.Int, + ffi.Double, + ffi.Pointer, + imp1.CvCallback_0)>>('cv_findHomography'); + late final _cv_findHomography = _cv_findHomographyPtr.asFunction< + ffi.Pointer Function(Mat, Mat, int, double, imp1.MatOut, int, + double, ffi.Pointer, imp1.CvCallback_0)>(); + + ffi.Pointer cv_findHomography_1( + imp1.MatIn srcPoints, + imp1.MatIn dstPoints, + imp1.MatOut mask, + UsacParams params, + ffi.Pointer rval, + imp1.CvCallback_0 callback, + ) { + return _cv_findHomography_1( + srcPoints, + dstPoints, + mask, + params, + rval, + callback, + ); + } + + late final _cv_findHomography_1Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + imp1.MatOut, + UsacParams, + ffi.Pointer, + imp1.CvCallback_0)>>('cv_findHomography_1'); + late final _cv_findHomography_1 = _cv_findHomography_1Ptr.asFunction< + ffi.Pointer Function(imp1.MatIn, imp1.MatIn, imp1.MatOut, + UsacParams, ffi.Pointer, imp1.CvCallback_0)>(); + + ffi.Pointer cv_fisheye_calibrate( + VecMat objectPoints, + VecMat imagePoints, + CvSize imageSize, + imp1.MatInOut k, + imp1.MatInOut d, + VecMat rvecs, + VecMat tvecs, + int flags, + TermCriteria criteria, + ffi.Pointer rval, + imp1.CvCallback_0 callback, + ) { + return _cv_fisheye_calibrate( + objectPoints, + imagePoints, + imageSize, + k, + d, + rvecs, + tvecs, + flags, + criteria, + rval, + callback, + ); + } + + late final _cv_fisheye_calibratePtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + VecMat, + VecMat, + CvSize, + imp1.MatInOut, + imp1.MatInOut, + VecMat, + VecMat, + ffi.Int, + TermCriteria, + ffi.Pointer, + imp1.CvCallback_0)>>('cv_fisheye_calibrate'); + late final _cv_fisheye_calibrate = _cv_fisheye_calibratePtr.asFunction< + ffi.Pointer Function( + VecMat, + VecMat, + CvSize, + imp1.MatInOut, + imp1.MatInOut, + VecMat, + VecMat, + int, + TermCriteria, + ffi.Pointer, + imp1.CvCallback_0)>(); + + ffi.Pointer cv_fisheye_distortPoints( + imp1.MatIn undistorted, + imp1.MatOut distorted, + imp1.MatIn K, + imp1.MatIn D, + double alpha, + imp1.CvCallback_0 callback, + ) { + return _cv_fisheye_distortPoints( + undistorted, + distorted, + K, + D, + alpha, + callback, + ); + } + + late final _cv_fisheye_distortPointsPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatOut, + imp1.MatIn, + imp1.MatIn, + ffi.Double, + imp1.CvCallback_0)>>('cv_fisheye_distortPoints'); + late final _cv_fisheye_distortPoints = + _cv_fisheye_distortPointsPtr.asFunction< + ffi.Pointer Function(imp1.MatIn, imp1.MatOut, imp1.MatIn, + imp1.MatIn, double, imp1.CvCallback_0)>(); + + ffi.Pointer cv_fisheye_distortPoints_1( + imp1.MatIn undistorted, + imp1.MatOut distorted, + imp1.MatInOut Kundistorted, + imp1.MatIn K, + imp1.MatIn D, + double alpha, + imp1.CvCallback_0 callback, + ) { + return _cv_fisheye_distortPoints_1( + undistorted, + distorted, + Kundistorted, + K, + D, + alpha, + callback, + ); + } + + late final _cv_fisheye_distortPoints_1Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatOut, + imp1.MatInOut, + imp1.MatIn, + imp1.MatIn, + ffi.Double, + imp1.CvCallback_0)>>('cv_fisheye_distortPoints_1'); + late final _cv_fisheye_distortPoints_1 = + _cv_fisheye_distortPoints_1Ptr.asFunction< + ffi.Pointer Function(imp1.MatIn, imp1.MatOut, imp1.MatInOut, + imp1.MatIn, imp1.MatIn, double, imp1.CvCallback_0)>(); + + ffi.Pointer cv_fisheye_estimateNewCameraMatrixForUndistortRectify( + imp1.MatIn k, + imp1.MatIn d, + CvSize imgSize, + imp1.MatIn r, + imp1.MatOut p, + double balance, + CvSize newSize, + double fovScale, + imp1.CvCallback_0 callback, + ) { + return _cv_fisheye_estimateNewCameraMatrixForUndistortRectify( + k, + d, + imgSize, + r, + p, + balance, + newSize, + fovScale, + callback, + ); + } + + late final _cv_fisheye_estimateNewCameraMatrixForUndistortRectifyPtr = + _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + CvSize, + imp1.MatIn, + imp1.MatOut, + ffi.Double, + CvSize, + ffi.Double, + imp1.CvCallback_0)>>( + 'cv_fisheye_estimateNewCameraMatrixForUndistortRectify'); + late final _cv_fisheye_estimateNewCameraMatrixForUndistortRectify = + _cv_fisheye_estimateNewCameraMatrixForUndistortRectifyPtr.asFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + CvSize, + imp1.MatIn, + imp1.MatOut, + double, + CvSize, + double, + imp1.CvCallback_0)>(); + + ffi.Pointer cv_fisheye_initUndistortRectifyMap( + imp1.MatIn k, + imp1.MatIn d, + imp1.MatIn r, + imp1.MatIn p, + CvSize imgSize, + int m1type, + imp1.MatOut map1, + imp1.MatOut map2, + imp1.CvCallback_0 callback, + ) { + return _cv_fisheye_initUndistortRectifyMap( + k, + d, + r, + p, + imgSize, + m1type, + map1, + map2, + callback, + ); + } + + late final _cv_fisheye_initUndistortRectifyMapPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + CvSize, + ffi.Int, + imp1.MatOut, + imp1.MatOut, + imp1.CvCallback_0)>>('cv_fisheye_initUndistortRectifyMap'); + late final _cv_fisheye_initUndistortRectifyMap = + _cv_fisheye_initUndistortRectifyMapPtr.asFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + CvSize, + int, + imp1.MatOut, + imp1.MatOut, + imp1.CvCallback_0)>(); + + ffi.Pointer cv_fisheye_projectPoints( + imp1.MatIn objectPoints, + imp1.MatOut imagePoints, + imp1.MatIn rvec, + imp1.MatIn tvec, + imp1.MatIn k, + imp1.MatIn d, + double alpha, + imp1.MatOut jacobian, + imp1.CvCallback_0 callback, + ) { + return _cv_fisheye_projectPoints( + objectPoints, + imagePoints, + rvec, + tvec, + k, + d, + alpha, + jacobian, + callback, + ); + } + + late final _cv_fisheye_projectPointsPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatOut, + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + ffi.Double, + imp1.MatOut, + imp1.CvCallback_0)>>('cv_fisheye_projectPoints'); + late final _cv_fisheye_projectPoints = + _cv_fisheye_projectPointsPtr.asFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatOut, + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + double, + imp1.MatOut, + imp1.CvCallback_0)>(); + + ffi.Pointer cv_fisheye_solvePnP( + imp1.MatIn objectPoints, + imp1.MatIn imagePoints, + imp1.MatIn cameraMatrix, + imp1.MatIn distCoeffs, + imp1.MatOut rvec, + imp1.MatOut tvec, + bool useExtrinsicGuess, + int flags, + TermCriteria criteria, + ffi.Pointer rval, + imp1.CvCallback_0 callback, + ) { + return _cv_fisheye_solvePnP( + objectPoints, + imagePoints, + cameraMatrix, + distCoeffs, + rvec, + tvec, + useExtrinsicGuess, + flags, + criteria, + rval, + callback, + ); + } + + late final _cv_fisheye_solvePnPPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatOut, + imp1.MatOut, + ffi.Bool, + ffi.Int, + TermCriteria, + ffi.Pointer, + imp1.CvCallback_0)>>('cv_fisheye_solvePnP'); + late final _cv_fisheye_solvePnP = _cv_fisheye_solvePnPPtr.asFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatOut, + imp1.MatOut, + bool, + int, + TermCriteria, + ffi.Pointer, + imp1.CvCallback_0)>(); + + ffi.Pointer cv_fisheye_undistortImage( + Mat distorted, + Mat undistorted, + Mat k, + Mat d, + imp1.CvCallback_0 callback, + ) { + return _cv_fisheye_undistortImage( + distorted, + undistorted, + k, + d, + callback, + ); + } + + late final _cv_fisheye_undistortImagePtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(Mat, Mat, Mat, Mat, + imp1.CvCallback_0)>>('cv_fisheye_undistortImage'); + late final _cv_fisheye_undistortImage = + _cv_fisheye_undistortImagePtr.asFunction< + ffi.Pointer Function( + Mat, Mat, Mat, Mat, imp1.CvCallback_0)>(); + + ffi.Pointer cv_fisheye_undistortImage_1( + Mat distorted, + Mat undistorted, + Mat k, + Mat d, + Mat knew, + CvSize size, + imp1.CvCallback_0 callback, + ) { + return _cv_fisheye_undistortImage_1( + distorted, + undistorted, + k, + d, + knew, + size, + callback, + ); + } + + late final _cv_fisheye_undistortImage_1Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(Mat, Mat, Mat, Mat, Mat, CvSize, + imp1.CvCallback_0)>>('cv_fisheye_undistortImage_1'); + late final _cv_fisheye_undistortImage_1 = + _cv_fisheye_undistortImage_1Ptr.asFunction< + ffi.Pointer Function( + Mat, Mat, Mat, Mat, Mat, CvSize, imp1.CvCallback_0)>(); + + ffi.Pointer cv_fisheye_undistortPoints( + Mat distorted, + Mat undistorted, + Mat k, + Mat d, + Mat R, + Mat P, + imp1.CvCallback_0 callback, + ) { + return _cv_fisheye_undistortPoints( + distorted, + undistorted, + k, + d, + R, + P, + callback, + ); + } + + late final _cv_fisheye_undistortPointsPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(Mat, Mat, Mat, Mat, Mat, Mat, + imp1.CvCallback_0)>>('cv_fisheye_undistortPoints'); + late final _cv_fisheye_undistortPoints = + _cv_fisheye_undistortPointsPtr.asFunction< + ffi.Pointer Function( + Mat, Mat, Mat, Mat, Mat, Mat, imp1.CvCallback_0)>(); + + ffi.Pointer cv_getDefaultNewCameraMatrix( + Mat cameraMatrix, + CvSize size, + bool centerPrincipalPoint, + ffi.Pointer rval, + imp1.CvCallback_0 callback, + ) { + return _cv_getDefaultNewCameraMatrix( + cameraMatrix, + size, + centerPrincipalPoint, + rval, + callback, + ); + } + + late final _cv_getDefaultNewCameraMatrixPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + Mat, + CvSize, + ffi.Bool, + ffi.Pointer, + imp1.CvCallback_0)>>('cv_getDefaultNewCameraMatrix'); + late final _cv_getDefaultNewCameraMatrix = + _cv_getDefaultNewCameraMatrixPtr.asFunction< + ffi.Pointer Function( + Mat, CvSize, bool, ffi.Pointer, imp1.CvCallback_0)>(); + + ffi.Pointer cv_getOptimalNewCameraMatrix( + Mat cameraMatrix, + Mat distCoeffs, + CvSize size, + double alpha, + CvSize newImgSize, + ffi.Pointer validPixROI, + bool centerPrincipalPoint, + ffi.Pointer rval, + imp1.CvCallback_0 callback, + ) { + return _cv_getOptimalNewCameraMatrix( + cameraMatrix, + distCoeffs, + size, + alpha, + newImgSize, + validPixROI, + centerPrincipalPoint, + rval, + callback, + ); + } + + late final _cv_getOptimalNewCameraMatrixPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + Mat, + Mat, + CvSize, + ffi.Double, + CvSize, + ffi.Pointer, + ffi.Bool, + ffi.Pointer, + imp1.CvCallback_0)>>('cv_getOptimalNewCameraMatrix'); + late final _cv_getOptimalNewCameraMatrix = + _cv_getOptimalNewCameraMatrixPtr.asFunction< + ffi.Pointer Function( + Mat, + Mat, + CvSize, + double, + CvSize, + ffi.Pointer, + bool, + ffi.Pointer, + imp1.CvCallback_0)>(); + + ffi.Pointer cv_initUndistortRectifyMap( + Mat cameraMatrix, + Mat distCoeffs, + Mat r, + Mat newCameraMatrix, + CvSize size, + int m1type, + Mat map1, + Mat map2, + imp1.CvCallback_0 callback, + ) { + return _cv_initUndistortRectifyMap( + cameraMatrix, + distCoeffs, + r, + newCameraMatrix, + size, + m1type, + map1, + map2, + callback, + ); + } + + late final _cv_initUndistortRectifyMapPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(Mat, Mat, Mat, Mat, CvSize, ffi.Int, + Mat, Mat, imp1.CvCallback_0)>>('cv_initUndistortRectifyMap'); + late final _cv_initUndistortRectifyMap = + _cv_initUndistortRectifyMapPtr.asFunction< + ffi.Pointer Function( + Mat, Mat, Mat, Mat, CvSize, int, Mat, Mat, imp1.CvCallback_0)>(); + + ffi.Pointer cv_initWideAngleProjMap( + imp1.MatIn cameraMatrix, + imp1.MatIn distCoeffs, + CvSize size, + int destImageWidth, + int m1type, + imp1.MatOut map1, + imp1.MatOut map2, + int projType, + double alpha, + ffi.Pointer rval, + imp1.CvCallback_0 callback, + ) { + return _cv_initWideAngleProjMap( + cameraMatrix, + distCoeffs, + size, + destImageWidth, + m1type, + map1, + map2, + projType, + alpha, + rval, + callback, + ); + } + + late final _cv_initWideAngleProjMapPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + CvSize, + ffi.Int, + ffi.Int, + imp1.MatOut, + imp1.MatOut, + ffi.Int, + ffi.Double, + ffi.Pointer, + imp1.CvCallback_0)>>('cv_initWideAngleProjMap'); + late final _cv_initWideAngleProjMap = _cv_initWideAngleProjMapPtr.asFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + CvSize, + int, + int, + imp1.MatOut, + imp1.MatOut, + int, + double, + ffi.Pointer, + imp1.CvCallback_0)>(); + + ffi.Pointer cv_matMulDeriv( + imp1.MatIn A, + imp1.MatIn B, + imp1.MatOut dABdA, + imp1.MatOut dABdB, + imp1.CvCallback_0 callback, + ) { + return _cv_matMulDeriv( + A, + B, + dABdA, + dABdB, + callback, + ); + } + + late final _cv_matMulDerivPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(imp1.MatIn, imp1.MatIn, imp1.MatOut, + imp1.MatOut, imp1.CvCallback_0)>>('cv_matMulDeriv'); + late final _cv_matMulDeriv = _cv_matMulDerivPtr.asFunction< + ffi.Pointer Function(imp1.MatIn, imp1.MatIn, imp1.MatOut, + imp1.MatOut, imp1.CvCallback_0)>(); + + ffi.Pointer cv_projectPoints( + imp1.MatIn objectPoints, + imp1.MatIn rvec, + imp1.MatIn tvec, + imp1.MatIn cameraMatrix, + imp1.MatIn distCoeffs, + imp1.MatOut imagePoints, + imp1.MatOut jacobian, + double aspectRatio, + imp1.CvCallback_0 callback, + ) { + return _cv_projectPoints( + objectPoints, + rvec, + tvec, + cameraMatrix, + distCoeffs, + imagePoints, + jacobian, + aspectRatio, + callback, + ); + } + + late final _cv_projectPointsPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatOut, + imp1.MatOut, + ffi.Double, + imp1.CvCallback_0)>>('cv_projectPoints'); + late final _cv_projectPoints = _cv_projectPointsPtr.asFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatOut, + imp1.MatOut, + double, + imp1.CvCallback_0)>(); + + ffi.Pointer cv_recoverPose( + imp1.MatIn E, + imp1.MatIn points1, + imp1.MatIn points2, + imp1.MatIn cameraMatrix, + imp1.MatOut R, + imp1.MatOut t, + double distanceThresh, + imp1.MatInOut mask, + imp1.MatOut triangulatedPoints, + ffi.Pointer rval, + imp1.CvCallback_0 callback, + ) { + return _cv_recoverPose( + E, + points1, + points2, + cameraMatrix, + R, + t, + distanceThresh, + mask, + triangulatedPoints, + rval, + callback, + ); + } + + late final _cv_recoverPosePtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatOut, + imp1.MatOut, + ffi.Double, + imp1.MatInOut, + imp1.MatOut, + ffi.Pointer, + imp1.CvCallback_0)>>('cv_recoverPose'); + late final _cv_recoverPose = _cv_recoverPosePtr.asFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatOut, + imp1.MatOut, + double, + imp1.MatInOut, + imp1.MatOut, + ffi.Pointer, + imp1.CvCallback_0)>(); + + ffi.Pointer cv_recoverPose_1( + imp1.MatIn E, + imp1.MatIn points1, + imp1.MatIn points2, + imp1.MatOut R, + imp1.MatOut t, + double focal, + CvPoint2d pp, + imp1.MatInOut mask, + ffi.Pointer rval, + imp1.CvCallback_0 callback, + ) { + return _cv_recoverPose_1( + E, + points1, + points2, + R, + t, + focal, + pp, + mask, + rval, + callback, + ); + } + + late final _cv_recoverPose_1Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatOut, + imp1.MatOut, + ffi.Double, + CvPoint2d, + imp1.MatInOut, + ffi.Pointer, + imp1.CvCallback_0)>>('cv_recoverPose_1'); + late final _cv_recoverPose_1 = _cv_recoverPose_1Ptr.asFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatOut, + imp1.MatOut, + double, + CvPoint2d, + imp1.MatInOut, + ffi.Pointer, + imp1.CvCallback_0)>(); + + ffi.Pointer cv_reprojectImageTo3D( + imp1.MatIn disparity, + imp1.MatOut _3dImage, + imp1.MatIn Q, + bool handleMissingValues, + int ddepth, + imp1.CvCallback_0 callback, + ) { + return _cv_reprojectImageTo3D( + disparity, + _3dImage, + Q, + handleMissingValues, + ddepth, + callback, + ); + } + + late final _cv_reprojectImageTo3DPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(imp1.MatIn, imp1.MatOut, imp1.MatIn, + ffi.Bool, ffi.Int, imp1.CvCallback_0)>>('cv_reprojectImageTo3D'); + late final _cv_reprojectImageTo3D = _cv_reprojectImageTo3DPtr.asFunction< + ffi.Pointer Function( + imp1.MatIn, imp1.MatOut, imp1.MatIn, bool, int, imp1.CvCallback_0)>(); + + double cv_sampsonDistance( + imp1.MatIn pt1, + imp1.MatIn pt2, + imp1.MatIn F, + ) { + return _cv_sampsonDistance( + pt1, + pt2, + F, + ); + } + + late final _cv_sampsonDistancePtr = _lookup< + ffi.NativeFunction< + ffi.Double Function( + imp1.MatIn, imp1.MatIn, imp1.MatIn)>>('cv_sampsonDistance'); + late final _cv_sampsonDistance = _cv_sampsonDistancePtr + .asFunction(); + + ffi.Pointer cv_solveP3P( + imp1.MatIn objectPoints, + imp1.MatIn imagePoints, + imp1.MatIn cameraMatrix, + imp1.MatIn distCoeffs, + ffi.Pointer rvecs, + ffi.Pointer tvecs, + int flags, + ffi.Pointer rval, + imp1.CvCallback_0 callback, + ) { + return _cv_solveP3P( + objectPoints, + imagePoints, + cameraMatrix, + distCoeffs, + rvecs, + tvecs, + flags, + rval, + callback, + ); + } + + late final _cv_solveP3PPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + ffi.Pointer, + ffi.Pointer, + ffi.Int, + ffi.Pointer, + imp1.CvCallback_0)>>('cv_solveP3P'); + late final _cv_solveP3P = _cv_solveP3PPtr.asFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + ffi.Pointer, + ffi.Pointer, + int, + ffi.Pointer, + imp1.CvCallback_0)>(); + + ffi.Pointer cv_solvePnP( + imp1.MatIn objectPoints, + imp1.MatIn imagePoints, + imp1.MatIn cameraMatrix, + imp1.MatIn distCoeffs, + imp1.MatOut rvec, + imp1.MatOut tvec, + bool useExtrinsicGuess, + int flags, + ffi.Pointer rval, + imp1.CvCallback_0 callback, + ) { + return _cv_solvePnP( + objectPoints, + imagePoints, + cameraMatrix, + distCoeffs, + rvec, + tvec, + useExtrinsicGuess, + flags, + rval, + callback, + ); + } + + late final _cv_solvePnPPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatOut, + imp1.MatOut, + ffi.Bool, + ffi.Int, + ffi.Pointer, + imp1.CvCallback_0)>>('cv_solvePnP'); + late final _cv_solvePnP = _cv_solvePnPPtr.asFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatOut, + imp1.MatOut, + bool, + int, + ffi.Pointer, + imp1.CvCallback_0)>(); + + ffi.Pointer cv_solvePnPGeneric( + imp1.MatIn objectPoints, + imp1.MatIn imagePoints, + imp1.MatIn cameraMatrix, + imp1.MatIn distCoeffs, + ffi.Pointer rvecs, + ffi.Pointer tvecs, + bool useExtrinsicGuess, + int flags, + imp1.MatIn rvec, + imp1.MatIn tvec, + imp1.MatOut reprojectionError, + ffi.Pointer rval, + imp1.CvCallback_0 callback, + ) { + return _cv_solvePnPGeneric( + objectPoints, + imagePoints, + cameraMatrix, + distCoeffs, + rvecs, + tvecs, + useExtrinsicGuess, + flags, + rvec, + tvec, + reprojectionError, + rval, + callback, + ); + } + + late final _cv_solvePnPGenericPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + ffi.Pointer, + ffi.Pointer, + ffi.Bool, + ffi.Int, + imp1.MatIn, + imp1.MatIn, + imp1.MatOut, + ffi.Pointer, + imp1.CvCallback_0)>>('cv_solvePnPGeneric'); + late final _cv_solvePnPGeneric = _cv_solvePnPGenericPtr.asFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + ffi.Pointer, + ffi.Pointer, + bool, + int, + imp1.MatIn, + imp1.MatIn, + imp1.MatOut, + ffi.Pointer, + imp1.CvCallback_0)>(); + + ffi.Pointer cv_solvePnPRansac( + imp1.MatIn objectPoints, + imp1.MatIn imagePoints, + imp1.MatIn cameraMatrix, + imp1.MatIn distCoeffs, + imp1.MatOut rvec, + imp1.MatOut tvec, + bool useExtrinsicGuess, + int iterationsCount, + double reprojectionError, + double confidence, + imp1.MatOut inliers, + int flags, + ffi.Pointer rval, + imp1.CvCallback_0 callback, + ) { + return _cv_solvePnPRansac( + objectPoints, + imagePoints, + cameraMatrix, + distCoeffs, + rvec, + tvec, + useExtrinsicGuess, + iterationsCount, + reprojectionError, + confidence, + inliers, + flags, + rval, + callback, + ); + } + + late final _cv_solvePnPRansacPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatOut, + imp1.MatOut, + ffi.Bool, + ffi.Int, + ffi.Float, + ffi.Double, + imp1.MatOut, + ffi.Int, + ffi.Pointer, + imp1.CvCallback_0)>>('cv_solvePnPRansac'); + late final _cv_solvePnPRansac = _cv_solvePnPRansacPtr.asFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatOut, + imp1.MatOut, + bool, + int, + double, + double, + imp1.MatOut, + int, + ffi.Pointer, + imp1.CvCallback_0)>(); + + ffi.Pointer cv_solvePnPRansac_1( + Mat objectPoints, + Mat imagePoints, + Mat cameraMatrix, + Mat distCoeffs, + Mat rvec, + Mat tvec, + Mat inliers, + imp1.UsacParams params, + ffi.Pointer rval, + imp1.CvCallback_0 callback, + ) { + return _cv_solvePnPRansac_1( + objectPoints, + imagePoints, + cameraMatrix, + distCoeffs, + rvec, + tvec, + inliers, + params, + rval, + callback, + ); + } + + late final _cv_solvePnPRansac_1Ptr = _lookup< ffi.NativeFunction< ffi.Pointer Function( Mat, Mat, - ffi.Int, - ffi.Double, Mat, - ffi.Int, - ffi.Double, - ffi.Pointer, - imp1.CvCallback_0)>>('cv_findHomography'); - late final _cv_findHomography = _cv_findHomographyPtr.asFunction< - ffi.Pointer Function(Mat, Mat, int, double, Mat, int, double, - ffi.Pointer, imp1.CvCallback_0)>(); + Mat, + Mat, + Mat, + Mat, + imp1.UsacParams, + ffi.Pointer, + imp1.CvCallback_0)>>('cv_solvePnPRansac_1'); + late final _cv_solvePnPRansac_1 = _cv_solvePnPRansac_1Ptr.asFunction< + ffi.Pointer Function(Mat, Mat, Mat, Mat, Mat, Mat, Mat, + imp1.UsacParams, ffi.Pointer, imp1.CvCallback_0)>(); - ffi.Pointer cv_fisheye_estimateNewCameraMatrixForUndistortRectify( - Mat k, - Mat d, - CvSize imgSize, - Mat r, - Mat p, - double balance, - CvSize newSize, - double fovScale, + ffi.Pointer cv_solvePnPRefineLM( + Mat objectPoints, + Mat imagePoints, + Mat cameraMatrix, + Mat distCoeffs, + Mat rvec, + Mat tvec, + imp1.TermCriteria criteria, imp1.CvCallback_0 callback, ) { - return _cv_fisheye_estimateNewCameraMatrixForUndistortRectify( - k, - d, - imgSize, - r, - p, - balance, - newSize, - fovScale, + return _cv_solvePnPRefineLM( + objectPoints, + imagePoints, + cameraMatrix, + distCoeffs, + rvec, + tvec, + criteria, callback, ); } - late final _cv_fisheye_estimateNewCameraMatrixForUndistortRectifyPtr = - _lookup< - ffi.NativeFunction< - ffi.Pointer Function(Mat, Mat, CvSize, Mat, Mat, - ffi.Double, CvSize, ffi.Double, imp1.CvCallback_0)>>( - 'cv_fisheye_estimateNewCameraMatrixForUndistortRectify'); - late final _cv_fisheye_estimateNewCameraMatrixForUndistortRectify = - _cv_fisheye_estimateNewCameraMatrixForUndistortRectifyPtr.asFunction< - ffi.Pointer Function(Mat, Mat, CvSize, Mat, Mat, double, - CvSize, double, imp1.CvCallback_0)>(); + late final _cv_solvePnPRefineLMPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(Mat, Mat, Mat, Mat, Mat, Mat, + imp1.TermCriteria, imp1.CvCallback_0)>>('cv_solvePnPRefineLM'); + late final _cv_solvePnPRefineLM = _cv_solvePnPRefineLMPtr.asFunction< + ffi.Pointer Function(Mat, Mat, Mat, Mat, Mat, Mat, + imp1.TermCriteria, imp1.CvCallback_0)>(); - ffi.Pointer cv_fisheye_undistortImage( - Mat distorted, - Mat undistorted, - Mat k, - Mat d, + ffi.Pointer cv_solvePnPRefineVVS( + Mat objectPoints, + Mat imagePoints, + Mat cameraMatrix, + Mat distCoeffs, + Mat rvec, + Mat tvec, + imp1.TermCriteria criteria, + double VVSlambda, imp1.CvCallback_0 callback, ) { - return _cv_fisheye_undistortImage( - distorted, - undistorted, - k, - d, + return _cv_solvePnPRefineVVS( + objectPoints, + imagePoints, + cameraMatrix, + distCoeffs, + rvec, + tvec, + criteria, + VVSlambda, callback, ); } - late final _cv_fisheye_undistortImagePtr = _lookup< + late final _cv_solvePnPRefineVVSPtr = _lookup< ffi.NativeFunction< - ffi.Pointer Function(Mat, Mat, Mat, Mat, - imp1.CvCallback_0)>>('cv_fisheye_undistortImage'); - late final _cv_fisheye_undistortImage = - _cv_fisheye_undistortImagePtr.asFunction< ffi.Pointer Function( - Mat, Mat, Mat, Mat, imp1.CvCallback_0)>(); + Mat, + Mat, + Mat, + Mat, + Mat, + Mat, + imp1.TermCriteria, + ffi.Double, + imp1.CvCallback_0)>>('cv_solvePnPRefineVVS'); + late final _cv_solvePnPRefineVVS = _cv_solvePnPRefineVVSPtr.asFunction< + ffi.Pointer Function(Mat, Mat, Mat, Mat, Mat, Mat, + imp1.TermCriteria, double, imp1.CvCallback_0)>(); - ffi.Pointer cv_fisheye_undistortImage_1( - Mat distorted, - Mat undistorted, - Mat k, - Mat d, - Mat knew, - CvSize size, + ffi.Pointer cv_stereoCalibrate( + VecMat objectPoints, + VecMat imagePoints1, + VecMat imagePoints2, + imp1.MatInOut cameraMatrix1, + imp1.MatInOut distCoeffs1, + imp1.MatInOut cameraMatrix2, + imp1.MatInOut distCoeffs2, + CvSize imageSize, + imp1.MatInOut R, + imp1.MatInOut T, + imp1.MatOut E, + imp1.MatOut F, + VecMat rvecs, + VecMat tvecs, + imp1.MatOut perViewErrors, + int flags, + imp1.TermCriteria criteria, + ffi.Pointer rval, imp1.CvCallback_0 callback, ) { - return _cv_fisheye_undistortImage_1( - distorted, - undistorted, - k, - d, - knew, - size, + return _cv_stereoCalibrate( + objectPoints, + imagePoints1, + imagePoints2, + cameraMatrix1, + distCoeffs1, + cameraMatrix2, + distCoeffs2, + imageSize, + R, + T, + E, + F, + rvecs, + tvecs, + perViewErrors, + flags, + criteria, + rval, callback, ); } - late final _cv_fisheye_undistortImage_1Ptr = _lookup< + late final _cv_stereoCalibratePtr = _lookup< ffi.NativeFunction< - ffi.Pointer Function(Mat, Mat, Mat, Mat, Mat, CvSize, - imp1.CvCallback_0)>>('cv_fisheye_undistortImage_1'); - late final _cv_fisheye_undistortImage_1 = - _cv_fisheye_undistortImage_1Ptr.asFunction< ffi.Pointer Function( - Mat, Mat, Mat, Mat, Mat, CvSize, imp1.CvCallback_0)>(); + VecMat, + VecMat, + VecMat, + imp1.MatInOut, + imp1.MatInOut, + imp1.MatInOut, + imp1.MatInOut, + CvSize, + imp1.MatInOut, + imp1.MatInOut, + imp1.MatOut, + imp1.MatOut, + VecMat, + VecMat, + imp1.MatOut, + ffi.Int, + imp1.TermCriteria, + ffi.Pointer, + imp1.CvCallback_0)>>('cv_stereoCalibrate'); + late final _cv_stereoCalibrate = _cv_stereoCalibratePtr.asFunction< + ffi.Pointer Function( + VecMat, + VecMat, + VecMat, + imp1.MatInOut, + imp1.MatInOut, + imp1.MatInOut, + imp1.MatInOut, + CvSize, + imp1.MatInOut, + imp1.MatInOut, + imp1.MatOut, + imp1.MatOut, + VecMat, + VecMat, + imp1.MatOut, + int, + imp1.TermCriteria, + ffi.Pointer, + imp1.CvCallback_0)>(); - ffi.Pointer cv_fisheye_undistortPoints( - Mat distorted, - Mat undistorted, - Mat k, - Mat d, - Mat R, - Mat P, + ffi.Pointer cv_stereoRectify( + imp1.MatIn cameraMatrix1, + imp1.MatIn distCoeffs1, + imp1.MatIn cameraMatrix2, + imp1.MatIn distCoeffs2, + CvSize imageSize, + imp1.MatIn R, + imp1.MatIn T, + imp1.MatOut R1, + imp1.MatOut R2, + imp1.MatOut P1, + imp1.MatOut P2, + imp1.MatOut Q, + int flags, + double alpha, + CvSize newImageSize, + ffi.Pointer validPixROI1, + ffi.Pointer validPixROI2, imp1.CvCallback_0 callback, ) { - return _cv_fisheye_undistortPoints( - distorted, - undistorted, - k, - d, + return _cv_stereoRectify( + cameraMatrix1, + distCoeffs1, + cameraMatrix2, + distCoeffs2, + imageSize, R, - P, + T, + R1, + R2, + P1, + P2, + Q, + flags, + alpha, + newImageSize, + validPixROI1, + validPixROI2, callback, ); } - late final _cv_fisheye_undistortPointsPtr = _lookup< + late final _cv_stereoRectifyPtr = _lookup< ffi.NativeFunction< - ffi.Pointer Function(Mat, Mat, Mat, Mat, Mat, Mat, - imp1.CvCallback_0)>>('cv_fisheye_undistortPoints'); - late final _cv_fisheye_undistortPoints = - _cv_fisheye_undistortPointsPtr.asFunction< ffi.Pointer Function( - Mat, Mat, Mat, Mat, Mat, Mat, imp1.CvCallback_0)>(); + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + CvSize, + imp1.MatIn, + imp1.MatIn, + imp1.MatOut, + imp1.MatOut, + imp1.MatOut, + imp1.MatOut, + imp1.MatOut, + ffi.Int, + ffi.Double, + CvSize, + ffi.Pointer, + ffi.Pointer, + imp1.CvCallback_0)>>('cv_stereoRectify'); + late final _cv_stereoRectify = _cv_stereoRectifyPtr.asFunction< + ffi.Pointer Function( + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + CvSize, + imp1.MatIn, + imp1.MatIn, + imp1.MatOut, + imp1.MatOut, + imp1.MatOut, + imp1.MatOut, + imp1.MatOut, + int, + double, + CvSize, + ffi.Pointer, + ffi.Pointer, + imp1.CvCallback_0)>(); - ffi.Pointer cv_getOptimalNewCameraMatrix( - Mat cameraMatrix, - Mat distCoeffs, - CvSize size, - double alpha, - CvSize newImgSize, - ffi.Pointer validPixROI, - bool centerPrincipalPoint, - ffi.Pointer rval, + ffi.Pointer cv_stereoRectifyUncalibrated( + imp1.MatIn points1, + imp1.MatIn points2, + imp1.MatIn F, + CvSize imgSize, + imp1.MatOut H1, + imp1.MatOut H2, + double threshold, + ffi.Pointer rval, imp1.CvCallback_0 callback, ) { - return _cv_getOptimalNewCameraMatrix( - cameraMatrix, - distCoeffs, - size, - alpha, - newImgSize, - validPixROI, - centerPrincipalPoint, + return _cv_stereoRectifyUncalibrated( + points1, + points2, + F, + imgSize, + H1, + H2, + threshold, rval, callback, ); } - late final _cv_getOptimalNewCameraMatrixPtr = _lookup< + late final _cv_stereoRectifyUncalibratedPtr = _lookup< ffi.NativeFunction< ffi.Pointer Function( - Mat, - Mat, + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, CvSize, + imp1.MatOut, + imp1.MatOut, ffi.Double, - CvSize, - ffi.Pointer, - ffi.Bool, - ffi.Pointer, - imp1.CvCallback_0)>>('cv_getOptimalNewCameraMatrix'); - late final _cv_getOptimalNewCameraMatrix = - _cv_getOptimalNewCameraMatrixPtr.asFunction< + ffi.Pointer, + imp1.CvCallback_0)>>('cv_stereoRectifyUncalibrated'); + late final _cv_stereoRectifyUncalibrated = + _cv_stereoRectifyUncalibratedPtr.asFunction< ffi.Pointer Function( - Mat, - Mat, + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, CvSize, + imp1.MatOut, + imp1.MatOut, double, - CvSize, - ffi.Pointer, - bool, - ffi.Pointer, + ffi.Pointer, imp1.CvCallback_0)>(); - ffi.Pointer cv_initUndistortRectifyMap( - Mat cameraMatrix, - Mat distCoeffs, - Mat r, - Mat newCameraMatrix, - CvSize size, - int m1type, - Mat map1, - Mat map2, + ffi.Pointer cv_triangulatePoints( + imp1.MatIn projMatr1, + imp1.MatIn projMatr2, + imp1.MatIn projPoints1, + imp1.MatIn projPoints2, + imp1.MatOut points4D, imp1.CvCallback_0 callback, ) { - return _cv_initUndistortRectifyMap( - cameraMatrix, - distCoeffs, - r, - newCameraMatrix, - size, - m1type, - map1, - map2, + return _cv_triangulatePoints( + projMatr1, + projMatr2, + projPoints1, + projPoints2, + points4D, callback, ); } - late final _cv_initUndistortRectifyMapPtr = _lookup< + late final _cv_triangulatePointsPtr = _lookup< ffi.NativeFunction< - ffi.Pointer Function(Mat, Mat, Mat, Mat, CvSize, ffi.Int, - Mat, Mat, imp1.CvCallback_0)>>('cv_initUndistortRectifyMap'); - late final _cv_initUndistortRectifyMap = - _cv_initUndistortRectifyMapPtr.asFunction< ffi.Pointer Function( - Mat, Mat, Mat, Mat, CvSize, int, Mat, Mat, imp1.CvCallback_0)>(); + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatIn, + imp1.MatOut, + imp1.CvCallback_0)>>('cv_triangulatePoints'); + late final _cv_triangulatePoints = _cv_triangulatePointsPtr.asFunction< + ffi.Pointer Function(imp1.MatIn, imp1.MatIn, imp1.MatIn, + imp1.MatIn, imp1.MatOut, imp1.CvCallback_0)>(); ffi.Pointer cv_undistort( Mat src, @@ -611,6 +2677,32 @@ class CvNativeCalib3d { ffi.Pointer Function( Mat, Mat, Mat, Mat, Mat, imp1.CvCallback_0)>(); + ffi.Pointer cv_undistortImagePoints( + Mat src, + Mat dst, + Mat cameraMatrix, + Mat distCoeffs, + imp1.TermCriteria criteria, + imp1.CvCallback_0 callback, + ) { + return _cv_undistortImagePoints( + src, + dst, + cameraMatrix, + distCoeffs, + criteria, + callback, + ); + } + + late final _cv_undistortImagePointsPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(Mat, Mat, Mat, Mat, imp1.TermCriteria, + imp1.CvCallback_0)>>('cv_undistortImagePoints'); + late final _cv_undistortImagePoints = _cv_undistortImagePointsPtr.asFunction< + ffi.Pointer Function( + Mat, Mat, Mat, Mat, imp1.TermCriteria, imp1.CvCallback_0)>(); + ffi.Pointer cv_undistortPoints( Mat distorted, Mat undistorted, @@ -640,13 +2732,44 @@ class CvNativeCalib3d { late final _cv_undistortPoints = _cv_undistortPointsPtr.asFunction< ffi.Pointer Function( Mat, Mat, Mat, Mat, Mat, Mat, TermCriteria, imp1.CvCallback_0)>(); + + ffi.Pointer cv_validateDisparity( + Mat disparity, + Mat cost, + int minDisparity, + int numberOfDisparities, + int disp12MaxDisp, + imp1.CvCallback_0 callback, + ) { + return _cv_validateDisparity( + disparity, + cost, + minDisparity, + numberOfDisparities, + disp12MaxDisp, + callback, + ); + } + + late final _cv_validateDisparityPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(Mat, Mat, ffi.Int, ffi.Int, ffi.Int, + imp1.CvCallback_0)>>('cv_validateDisparity'); + late final _cv_validateDisparity = _cv_validateDisparityPtr.asFunction< + ffi.Pointer Function( + Mat, Mat, int, int, int, imp1.CvCallback_0)>(); } +typedef CvPoint2d = imp1.CvPoint2d; typedef CvRect = imp1.CvRect; typedef CvSize = imp1.CvSize; typedef CvStatus = imp1.CvStatus; typedef Mat = imp1.Mat; +typedef Scalar = imp1.Scalar; typedef TermCriteria = imp1.TermCriteria; +typedef UsacParams = imp1.UsacParams; +typedef Vec3d = imp1.Vec3d; +typedef VecMat = imp1.VecMat; typedef VecPoint2f = imp1.VecPoint2f; typedef VecVecPoint2f = imp1.VecVecPoint2f; typedef VecVecPoint3f = imp1.VecVecPoint3f; diff --git a/packages/dartcv/lib/src/g/calib3d.yaml b/packages/dartcv/lib/src/g/calib3d.yaml index 255bf1d5..8b259862 100644 --- a/packages/dartcv/lib/src/g/calib3d.yaml +++ b/packages/dartcv/lib/src/g/calib3d.yaml @@ -4,42 +4,150 @@ files: used-config: ffi-native: false symbols: + c:@F@cv_RQDecomp3x3: + name: cv_RQDecomp3x3 + c:@F@cv_Rodrigues: + name: cv_Rodrigues c:@F@cv_calibrateCamera: name: cv_calibrateCamera + c:@F@cv_checkChessboard: + name: cv_checkChessboard + c:@F@cv_computeCorrespondEpilines: + name: cv_computeCorrespondEpilines + c:@F@cv_convertPointsFromHomogeneous: + name: cv_convertPointsFromHomogeneous + c:@F@cv_convertPointsHomogeneous: + name: cv_convertPointsHomogeneous + c:@F@cv_convertPointsToHomogeneous: + name: cv_convertPointsToHomogeneous + c:@F@cv_correctMatches: + name: cv_correctMatches + c:@F@cv_decomposeEssentialMat: + name: cv_decomposeEssentialMat + c:@F@cv_decomposeHomographyMat: + name: cv_decomposeHomographyMat + c:@F@cv_decomposeProjectionMatrix: + name: cv_decomposeProjectionMatrix c:@F@cv_drawChessboardCorners: name: cv_drawChessboardCorners + c:@F@cv_drawFrameAxes: + name: cv_drawFrameAxes c:@F@cv_estimateAffine2D: name: cv_estimateAffine2D c:@F@cv_estimateAffine2D_1: name: cv_estimateAffine2D_1 + c:@F@cv_estimateAffine3D: + name: cv_estimateAffine3D + c:@F@cv_estimateAffine3D_1: + name: cv_estimateAffine3D_1 c:@F@cv_estimateAffinePartial2D: name: cv_estimateAffinePartial2D c:@F@cv_estimateAffinePartial2D_1: name: cv_estimateAffinePartial2D_1 + c:@F@cv_estimateChessboardSharpness: + name: cv_estimateChessboardSharpness + c:@F@cv_estimateTranslation3D: + name: cv_estimateTranslation3D + c:@F@cv_filterHomographyDecompByVisibleRefpoints: + name: cv_filterHomographyDecompByVisibleRefpoints + c:@F@cv_filterSpeckles: + name: cv_filterSpeckles + c:@F@cv_find4QuadCornerSubpix: + name: cv_find4QuadCornerSubpix c:@F@cv_findChessboardCorners: name: cv_findChessboardCorners c:@F@cv_findChessboardCornersSB: name: cv_findChessboardCornersSB c:@F@cv_findChessboardCornersSB_1: name: cv_findChessboardCornersSB_1 + c:@F@cv_findCirclesGrid: + name: cv_findCirclesGrid + c:@F@cv_findEssentialMat: + name: cv_findEssentialMat + c:@F@cv_findEssentialMat_1: + name: cv_findEssentialMat_1 + c:@F@cv_findFundamentalMat: + name: cv_findFundamentalMat + c:@F@cv_findFundamentalMat_1: + name: cv_findFundamentalMat_1 + c:@F@cv_findFundamentalMat_2: + name: cv_findFundamentalMat_2 c:@F@cv_findHomography: name: cv_findHomography + c:@F@cv_findHomography_1: + name: cv_findHomography_1 + c:@F@cv_fisheye_calibrate: + name: cv_fisheye_calibrate + c:@F@cv_fisheye_distortPoints: + name: cv_fisheye_distortPoints + c:@F@cv_fisheye_distortPoints_1: + name: cv_fisheye_distortPoints_1 c:@F@cv_fisheye_estimateNewCameraMatrixForUndistortRectify: name: cv_fisheye_estimateNewCameraMatrixForUndistortRectify + c:@F@cv_fisheye_initUndistortRectifyMap: + name: cv_fisheye_initUndistortRectifyMap + c:@F@cv_fisheye_projectPoints: + name: cv_fisheye_projectPoints + c:@F@cv_fisheye_solvePnP: + name: cv_fisheye_solvePnP c:@F@cv_fisheye_undistortImage: name: cv_fisheye_undistortImage c:@F@cv_fisheye_undistortImage_1: name: cv_fisheye_undistortImage_1 c:@F@cv_fisheye_undistortPoints: name: cv_fisheye_undistortPoints + c:@F@cv_getDefaultNewCameraMatrix: + name: cv_getDefaultNewCameraMatrix c:@F@cv_getOptimalNewCameraMatrix: name: cv_getOptimalNewCameraMatrix c:@F@cv_initUndistortRectifyMap: name: cv_initUndistortRectifyMap + c:@F@cv_initWideAngleProjMap: + name: cv_initWideAngleProjMap + c:@F@cv_matMulDeriv: + name: cv_matMulDeriv + c:@F@cv_projectPoints: + name: cv_projectPoints + c:@F@cv_recoverPose: + name: cv_recoverPose + c:@F@cv_recoverPose_1: + name: cv_recoverPose_1 + c:@F@cv_reprojectImageTo3D: + name: cv_reprojectImageTo3D + c:@F@cv_sampsonDistance: + name: cv_sampsonDistance + c:@F@cv_solveP3P: + name: cv_solveP3P + c:@F@cv_solvePnP: + name: cv_solvePnP + c:@F@cv_solvePnPGeneric: + name: cv_solvePnPGeneric + c:@F@cv_solvePnPRansac: + name: cv_solvePnPRansac + c:@F@cv_solvePnPRansac_1: + name: cv_solvePnPRansac_1 + c:@F@cv_solvePnPRefineLM: + name: cv_solvePnPRefineLM + c:@F@cv_solvePnPRefineVVS: + name: cv_solvePnPRefineVVS + c:@F@cv_stereoCalibrate: + name: cv_stereoCalibrate + c:@F@cv_stereoRectify: + name: cv_stereoRectify + c:@F@cv_stereoRectifyUncalibrated: + name: cv_stereoRectifyUncalibrated + c:@F@cv_triangulatePoints: + name: cv_triangulatePoints c:@F@cv_undistort: name: cv_undistort + c:@F@cv_undistortImagePoints: + name: cv_undistortImagePoints c:@F@cv_undistortPoints: name: cv_undistortPoints + c:@F@cv_validateDisparity: + name: cv_validateDisparity + c:types.h@T@CvPoint2d: + name: CvPoint2d c:types.h@T@CvRect: name: CvRect c:types.h@T@CvSize: @@ -48,8 +156,16 @@ files: name: CvStatus c:types.h@T@Mat: name: Mat + c:types.h@T@Scalar: + name: Scalar c:types.h@T@TermCriteria: name: TermCriteria + c:types.h@T@UsacParams: + name: UsacParams + c:types.h@T@Vec3d: + name: Vec3d + c:types.h@T@VecMat: + name: VecMat c:types.h@T@VecPoint2f: name: VecPoint2f c:types.h@T@VecVecPoint2f: diff --git a/packages/dartcv/lib/src/g/constants.g.dart b/packages/dartcv/lib/src/g/constants.g.dart index 46527cce..25a3725e 100644 --- a/packages/dartcv/lib/src/g/constants.g.dart +++ b/packages/dartcv/lib/src/g/constants.g.dart @@ -1503,8 +1503,6 @@ const int COVAR_USE_AVG = 2; const int CV__CAP_PROP_LATEST = 71; -const int CV__VIDEOWRITER_PROP_LATEST = 12; - const int DCT_INVERSE = 1; const int DCT_ROWS = 4; @@ -1947,66 +1945,6 @@ const int SORT_EVERY_COLUMN = 1; const int SORT_EVERY_ROW = 0; -enum SolvePnPMethod { - /// !< Pose refinement using non-linear Levenberg-Marquardt minimization scheme @cite Madsen04 @cite Eade13 \n - /// !< Initial solution for non-planar "objectPoints" needs at least 6 points and uses the DLT algorithm. \n - /// !< Initial solution for planar "objectPoints" needs at least 4 points and uses pose from homography decomposition. - SOLVEPNP_ITERATIVE(0), - - /// !< EPnP: Efficient Perspective-n-Point Camera Pose Estimation @cite lepetit2009epnp - SOLVEPNP_EPNP(1), - - /// !< Complete Solution Classification for the Perspective-Three-Point Problem @cite gao2003complete - SOLVEPNP_P3P(2), - - /// !< **Broken implementation. Using this flag will fallback to EPnP.** \n - /// !< A Direct Least-Squares (DLS) Method for PnP @cite hesch2011direct - SOLVEPNP_DLS(3), - - /// !< **Broken implementation. Using this flag will fallback to EPnP.** \n - /// !< Exhaustive Linearization for Robust Camera Pose and Focal Length Estimation @cite penate2013exhaustive - SOLVEPNP_UPNP(4), - - /// !< An Efficient Algebraic Solution to the Perspective-Three-Point Problem @cite Ke17 - SOLVEPNP_AP3P(5), - - /// !< Infinitesimal Plane-Based Pose Estimation @cite Collins14 \n - /// !< Object points must be coplanar. - SOLVEPNP_IPPE(6), - - /// !< Infinitesimal Plane-Based Pose Estimation @cite Collins14 \n - /// !< This is a special case suitable for marker pose estimation.\n - /// !< 4 coplanar object points must be defined in the following order: - /// !< - point 0: [-squareLength / 2, squareLength / 2, 0] - /// !< - point 1: [ squareLength / 2, squareLength / 2, 0] - /// !< - point 2: [ squareLength / 2, -squareLength / 2, 0] - /// !< - point 3: [-squareLength / 2, -squareLength / 2, 0] - SOLVEPNP_IPPE_SQUARE(7), - - /// !< SQPnP: A Consistently Fast and Globally OptimalSolution to the Perspective-n-Point Problem @cite Terzakis2020SQPnP - SOLVEPNP_SQPNP(8), - - /// !< Used for count - SOLVEPNP_MAX_COUNT(9); - - final int value; - const SolvePnPMethod(this.value); - - static SolvePnPMethod fromValue(int value) => switch (value) { - 0 => SOLVEPNP_ITERATIVE, - 1 => SOLVEPNP_EPNP, - 2 => SOLVEPNP_P3P, - 3 => SOLVEPNP_DLS, - 4 => SOLVEPNP_UPNP, - 5 => SOLVEPNP_AP3P, - 6 => SOLVEPNP_IPPE, - 7 => SOLVEPNP_IPPE_SQUARE, - 8 => SOLVEPNP_SQPNP, - 9 => SOLVEPNP_MAX_COUNT, - _ => throw ArgumentError("Unknown value for SolvePnPMethod: $value"), - }; -} - const int TERM_COUNT = 1; const int TERM_EPS = 2; @@ -2055,28 +1993,6 @@ const int USAC_PARALLEL = 33; const int USAC_PROSAC = 37; -const int VIDEOWRITER_PROP_DEPTH = 5; - -const int VIDEOWRITER_PROP_FRAMEBYTES = 2; - -const int VIDEOWRITER_PROP_HW_ACCELERATION = 6; - -const int VIDEOWRITER_PROP_HW_ACCELERATION_USE_OPENCL = 8; - -const int VIDEOWRITER_PROP_HW_DEVICE = 7; - -const int VIDEOWRITER_PROP_IS_COLOR = 4; - -const int VIDEOWRITER_PROP_KEY_FLAG = 11; - -const int VIDEOWRITER_PROP_KEY_INTERVAL = 10; - -const int VIDEOWRITER_PROP_NSTRIPES = 3; - -const int VIDEOWRITER_PROP_QUALITY = 1; - -const int VIDEOWRITER_PROP_RAW_VIDEO = 9; - /// @brief Video Acceleration type /// /// Used as value in #CAP_PROP_HW_ACCELERATION and #VIDEOWRITER_PROP_HW_ACCELERATION diff --git a/packages/dartcv/lib/src/g/core.g.dart b/packages/dartcv/lib/src/g/core.g.dart index 68215956..692f25dd 100644 --- a/packages/dartcv/lib/src/g/core.g.dart +++ b/packages/dartcv/lib/src/g/core.g.dart @@ -12849,6 +12849,21 @@ class CvNativeCore { late final _std_VecVecDMatch_clear = _std_VecVecDMatch_clearPtr .asFunction)>(); + ffi.Pointer std_VecVecDMatch_clone( + ffi.Pointer self, + ) { + return _std_VecVecDMatch_clone( + self, + ); + } + + late final _std_VecVecDMatch_clonePtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + ffi.Pointer)>>('std_VecVecDMatch_clone'); + late final _std_VecVecDMatch_clone = _std_VecVecDMatch_clonePtr.asFunction< + ffi.Pointer Function(ffi.Pointer)>(); + ffi.Pointer std_VecVecDMatch_data( ffi.Pointer self, ) { diff --git a/packages/dartcv/lib/src/g/core.yaml b/packages/dartcv/lib/src/g/core.yaml index e733d214..a1fb9067 100644 --- a/packages/dartcv/lib/src/g/core.yaml +++ b/packages/dartcv/lib/src/g/core.yaml @@ -1442,6 +1442,8 @@ files: name: std_VecVecChar_shrink_to_fit c:@F@std_VecVecDMatch_clear: name: std_VecVecDMatch_clear + c:@F@std_VecVecDMatch_clone: + name: std_VecVecDMatch_clone c:@F@std_VecVecDMatch_data: name: std_VecVecDMatch_data c:@F@std_VecVecDMatch_extend: diff --git a/packages/dartcv/lib/src/g/types.g.dart b/packages/dartcv/lib/src/g/types.g.dart index 6d14a75e..a02c3c2e 100644 --- a/packages/dartcv/lib/src/g/types.g.dart +++ b/packages/dartcv/lib/src/g/types.g.dart @@ -153,6 +153,14 @@ final class CvPoint extends ffi.Struct { external int y; } +final class CvPoint2d extends ffi.Struct { + @ffi.Double() + external double x; + + @ffi.Double() + external double y; +} + final class CvPoint2f extends ffi.Struct { @ffi.Float() external double x; @@ -290,6 +298,9 @@ final class Mat extends ffi.Struct { external ffi.Pointer ptr; } +typedef MatIn = Mat; +typedef MatInOut = Mat; +typedef MatOut = Mat; typedef MatPtr = ffi.Pointer; final class MatStep extends ffi.Struct { @@ -411,6 +422,47 @@ final class TermCriteria extends ffi.Struct { external double epsilon; } +final class UsacParams extends ffi.Struct { + @ffi.Double() + external double confidence; + + @ffi.Bool() + external bool isParallel; + + @ffi.Int() + external int loIterations; + + @ffi.Int() + external int loMethod; + + @ffi.Int() + external int loSampleSize; + + @ffi.Int() + external int maxIterations; + + @ffi.Int() + external int neighborsSearch; + + @ffi.Int() + external int randomGeneratorState; + + @ffi.Int() + external int sampler; + + @ffi.Int() + external int score; + + @ffi.Double() + external double threshold; + + @ffi.Int() + external int final_polisher; + + @ffi.Int() + external int final_polisher_iterations; +} + final class Vec2b extends ffi.Struct { @ffi.UnsignedChar() external int val1; diff --git a/packages/dartcv/lib/src/g/types.yaml b/packages/dartcv/lib/src/g/types.yaml index 03b757a3..629b0cf4 100644 --- a/packages/dartcv/lib/src/g/types.yaml +++ b/packages/dartcv/lib/src/g/types.yaml @@ -26,6 +26,8 @@ files: name: CvCallback_9Function c:@S@CvPoint: name: CvPoint + c:@S@CvPoint2d: + name: CvPoint2d c:@S@CvPoint2f: name: CvPoint2f c:@S@CvPoint3f: @@ -62,6 +64,8 @@ files: name: Scalar c:@S@TermCriteria: name: TermCriteria + c:@S@UsacParams: + name: UsacParams c:@S@Vec2b: name: Vec2b c:@S@Vec2d: @@ -208,6 +212,12 @@ files: name: CvCallback_9 c:types.h@T@InputOutputArrayPtr: name: InputOutputArrayPtr + c:types.h@T@MatIn: + name: MatIn + c:types.h@T@MatInOut: + name: MatInOut + c:types.h@T@MatOut: + name: MatOut c:types.h@T@MatPtr: name: MatPtr c:types.h@T@RNGPtr: diff --git a/packages/dartcv/lib/src/g/videoio.g.dart b/packages/dartcv/lib/src/g/videoio.g.dart index 2883d918..bf08384f 100644 --- a/packages/dartcv/lib/src/g/videoio.g.dart +++ b/packages/dartcv/lib/src/g/videoio.g.dart @@ -493,6 +493,22 @@ class CvNativeVideoIO { late final _cv_VideoWriter_fourcc = _cv_VideoWriter_fourccPtr.asFunction(); + double cv_VideoWriter_get( + VideoWriter self, + int propId, + ) { + return _cv_VideoWriter_get( + self, + propId, + ); + } + + late final _cv_VideoWriter_getPtr = + _lookup>( + 'cv_VideoWriter_get'); + late final _cv_VideoWriter_get = + _cv_VideoWriter_getPtr.asFunction(); + ffi.Pointer cv_VideoWriter_getBackendName( VideoWriter self, ) { @@ -627,6 +643,25 @@ class CvNativeVideoIO { late final _cv_VideoWriter_release = _cv_VideoWriter_releasePtr .asFunction Function(VideoWriter)>(); + void cv_VideoWriter_set( + VideoWriter self, + int propId, + double val, + ) { + return _cv_VideoWriter_set( + self, + propId, + val, + ); + } + + late final _cv_VideoWriter_setPtr = _lookup< + ffi + .NativeFunction>( + 'cv_VideoWriter_set'); + late final _cv_VideoWriter_set = _cv_VideoWriter_setPtr + .asFunction(); + ffi.Pointer cv_VideoWriter_write( VideoWriter self, Mat img, diff --git a/packages/dartcv/lib/src/g/videoio.yaml b/packages/dartcv/lib/src/g/videoio.yaml index b1906b51..7b92cb7c 100644 --- a/packages/dartcv/lib/src/g/videoio.yaml +++ b/packages/dartcv/lib/src/g/videoio.yaml @@ -46,6 +46,8 @@ files: name: cv_VideoWriter_create_2 c:@F@cv_VideoWriter_fourcc: name: cv_VideoWriter_fourcc + c:@F@cv_VideoWriter_get: + name: cv_VideoWriter_get c:@F@cv_VideoWriter_getBackendName: name: cv_VideoWriter_getBackendName c:@F@cv_VideoWriter_isOpened: @@ -56,6 +58,8 @@ files: name: cv_VideoWriter_open_1 c:@F@cv_VideoWriter_release: name: cv_VideoWriter_release + c:@F@cv_VideoWriter_set: + name: cv_VideoWriter_set c:@F@cv_VideoWriter_write: name: cv_VideoWriter_write c:@S@VideoCapture: diff --git a/packages/dartcv/lib/src/videoio/videoio.dart b/packages/dartcv/lib/src/videoio/videoio.dart index 932efa11..6e68fca2 100644 --- a/packages/dartcv/lib/src/videoio/videoio.dart +++ b/packages/dartcv/lib/src/videoio/videoio.dart @@ -250,6 +250,17 @@ class VideoWriter extends CvStruct { return cvideoio.cv_VideoWriter_fourcc(cc_[0], cc_[1], cc_[2], cc_[3]); } + /// Returns the specified VideoWriter property. + /// + /// [propId] Property identifier from cv::VideoWriterProperties (eg. cv::VIDEOWRITER_PROP_QUALITY) or one of Additional flags for video I/O API backends + /// + /// Returns + /// Value for the specified property. Value 0 is returned when querying a property that is not supported by the backend used by the VideoWriter instance. + double get(int propId) => cvideoio.cv_VideoWriter_get(ref, propId); + + /// Sets a property in the VideoWriter. + void set(int propId, double value) => cvideoio.cv_VideoWriter_set(ref, propId, value); + void release() { cvRun(() => cvideoio.cv_VideoWriter_release(ref)); } @@ -265,3 +276,43 @@ class VideoWriter extends CvStruct { bool get isOpened => cvideoio.cv_VideoWriter_isOpened(ref); } + +// constants +/// Current quality (0..100%) of the encoded videostream. Can be adjusted dynamically in some codecs. +const int VIDEOWRITER_PROP_QUALITY = 1; + +/// (Read-only): Size of just encoded video frame. Note that the encoding order may be different from representation order. +const int VIDEOWRITER_PROP_FRAMEBYTES = 2; + +/// Number of stripes for parallel encoding. -1 for auto detection. +const int VIDEOWRITER_PROP_NSTRIPES = 3; + +/// If it is not zero, the encoder will expect and encode color frames, otherwise it will work with grayscale frames. +const int VIDEOWRITER_PROP_IS_COLOR = 4; + +/// Defaults to CV_8U. +const int VIDEOWRITER_PROP_DEPTH = 5; + +/// (**open-only**) Hardware acceleration type (see #VideoAccelerationType). Setting supported only via `params` parameter in VideoWriter constructor / .open() method. Default value is backend-specific. +const int VIDEOWRITER_PROP_HW_ACCELERATION = 6; + +/// (**open-only**) Hardware device index (select GPU if multiple available). Device enumeration is acceleration type specific. +const int VIDEOWRITER_PROP_HW_DEVICE = 7; + +/// (**open-only**) If non-zero, create new OpenCL context and bind it to current thread. The OpenCL context created with Video Acceleration context attached it (if not attached yet) for optimized GPU data copy between cv::UMat and HW accelerated encoder. +const int VIDEOWRITER_PROP_HW_ACCELERATION_USE_OPENCL = 8; + +/// (**open-only**) Set to non-zero to enable encapsulation of an encoded raw video stream. Each raw encoded video frame should be passed to VideoWriter::write() as single row or column of a CV_8UC1 Mat. \note If the key frame interval is not 1 then it must be manually specified by the user. This can either be performed during initialization passing VIDEOWRITER_PROP_KEY_INTERVAL as one of the extra encoder params to VideoWriter::VideoWriter(const String &, int, double, const Size &, const std::vector< int > ¶ms) or afterwards by setting the VIDEOWRITER_PROP_KEY_FLAG with VideoWriter::set() before writing each frame. FFMpeg backend only. +const int VIDEOWRITER_PROP_RAW_VIDEO = 9; + +/// (**open-only**) Set the key frame interval using raw video encapsulation (VIDEOWRITER_PROP_RAW_VIDEO != 0). Defaults to 1 when not set. FFmpeg back-end only. +const int VIDEOWRITER_PROP_KEY_INTERVAL = 10; + +/// Set to non-zero to signal that the following frames are key frames or zero if not, when encapsulating raw video (VIDEOWRITER_PROP_RAW_VIDEO != 0). FFmpeg back-end only. +const int VIDEOWRITER_PROP_KEY_FLAG = 11; + +/// Specifies the frame presentation timestamp for each frame using the FPS time base. This property is **only** necessary when encapsulating **externally** encoded video where the decoding order differs from the presentation order, such as in GOP patterns with bi-directional B-frames. The value should be provided by your external encoder and for video sources with fixed frame rates it is equivalent to dividing the current frame's presentation time (CAP_PROP_POS_MSEC) by the frame duration (1000.0 / VideoCapture::get(CAP_PROP_FPS)). It can be queried from the resulting encapsulated video file using VideoCapture::get(CAP_PROP_PTS). FFmpeg back-end only. +const int VIDEOWRITER_PROP_PTS = 12; + +/// Specifies the maximum difference between presentation (pts) and decompression timestamps (dts) using the FPS time base. This property is necessary **only** when encapsulating **externally** encoded video where the decoding order differs from the presentation order, such as in GOP patterns with bi-directional B-frames. The value should be calculated based on the specific GOP pattern used during encoding. For example, in a GOP with presentation order IBP and decoding order IPB, this value would be 1, as the B-frame is the second frame presented but the third to be decoded. It can be queried from the resulting encapsulated video file using VideoCapture::get(CAP_PROP_DTS_DELAY). Non-zero values usually imply the stream is encoded using B-frames. FFmpeg back-end only. +const int VIDEOWRITER_PROP_DTS_DELAY = 13; diff --git a/packages/dartcv/test/calib3d_test.dart b/packages/dartcv/test/calib3d/calib3d_async_test.dart similarity index 100% rename from packages/dartcv/test/calib3d_test.dart rename to packages/dartcv/test/calib3d/calib3d_async_test.dart diff --git a/packages/dartcv/test/calib3d/calib3d_fisheye_test.dart b/packages/dartcv/test/calib3d/calib3d_fisheye_test.dart new file mode 100644 index 00000000..403b5da1 --- /dev/null +++ b/packages/dartcv/test/calib3d/calib3d_fisheye_test.dart @@ -0,0 +1,252 @@ +import 'package:dartcv4/dartcv.dart' as cv; +import 'package:test/test.dart'; + +cv.Mat generate3DPoints() { + final pts = [ + [0.5, 0.5, -0.5], + [0.5, 0.5, 0.5], + [-0.5, 0.5, 0.5], + [-0.5, 0.5, -0.5], + [0.5, -0.5, -0.5], + [-0.5, -0.5, -0.5], + [-0.5, -0.5, 0.5], + ]; + final points = cv.Mat.zeros(7, 3, cv.MatType.CV_32FC1); + points.forEachPixel((i, j, v) { + v[0] = pts[i][j]; + }); + return points; +} + +cv.Mat create3DChessboardCorners(cv.Size boardSize, double squareSize) { + final vec = cv.VecPoint3f(boardSize.height * boardSize.width); + for (var y = 0; y < boardSize.height; y++) { + for (var x = 0; x < boardSize.width; x++) { + vec[y * boardSize.width + x] = cv.Point3f(x * squareSize, y * squareSize, 0.0); + } + } + return cv.Mat.fromVec(vec); +} + +void main() async { + // https://github.com/shimat/opencvsharp/blob/4a082880c7174d997a0dd35696103efdbfcb9293/test/OpenCvSharp.Tests/calib3d/Calib3dTest.cs#L156 + test('cv.Fisheye.calibrate', () async { + final patternSize = cv.Size(10, 7); + final image = cv.imread("test/images/calibration_00.jpg"); + final (found, corners) = cv.findChessboardCorners(image, (10, 7)); + expect(found, true); + + final objectPoints = cv.VecMat(); + objectPoints.add(create3DChessboardCorners(patternSize, 1.0)); + expect(objectPoints.length, 1); + + final imagePoints = cv.VecMat(); + imagePoints.add(cv.Mat.fromVec(corners)); + expect(imagePoints.length, 1); + + final cameraMatrix = cv.Mat.eye(3, 3, cv.MatType.CV_64FC1); + final distCoeffs = cv.Mat.empty(); + + { + final (rval, rotationVectors, translationVectors) = + cv.Fisheye.calibrate(objectPoints, imagePoints, patternSize, cameraMatrix, distCoeffs); + + expect(rval, greaterThan(10)); + expect(rotationVectors.isEmpty, false); + expect(distCoeffs.isEmpty, false); + expect(translationVectors.isEmpty, false); + + final (map1, map2) = cv.Fisheye.initUndistortRectifyMap( + cameraMatrix, + distCoeffs, + cv.Mat.eye(3, 3, cv.MatType.CV_64FC1), + cameraMatrix, + cv.Size(image.width, image.height), + cv.MatType.CV_16SC2.value, + ); + expect(map1.isEmpty, false); + expect(map2.isEmpty, false); + } + + { + final (rval, rotationVectors, translationVectors) = + await cv.Fisheye.calibrateAsync(objectPoints, imagePoints, patternSize, cameraMatrix, distCoeffs); + + expect(rval, greaterThan(10)); + expect(rotationVectors.isEmpty, false); + expect(distCoeffs.isEmpty, false); + expect(translationVectors.isEmpty, false); + + final (map1, map2) = await cv.Fisheye.initUndistortRectifyMapAsync( + cameraMatrix, + distCoeffs, + cv.Mat.eye(3, 3, cv.MatType.CV_64FC1), + cameraMatrix, + cv.Size(image.width, image.height), + cv.MatType.CV_16SC2.value, + ); + expect(map1.isEmpty, false); + expect(map2.isEmpty, false); + } + }); + + test('cv.Fisheye.projectPoints', () async { + final objectPoints = generate3DPoints().reshape(3); + final intrisicMat = cv.Mat.zeros(3, 3, cv.MatType.CV_64FC1); + intrisicMat.set(0, 0, 1.6415318549788924e+003); + intrisicMat.set(1, 1, 1.7067753507885654e+003); + intrisicMat.set(0, 2, 5.3262822453148601e+002); + intrisicMat.set(1, 2, 3.8095355839052968e+002); + intrisicMat.set(2, 2, 1); + + final rvec = cv.Mat.zeros(3, 1, cv.MatType.CV_64FC1); + rvec.set(0, 0, -3.9277902400761393e-002); + rvec.set(1, 0, 3.7803824407602084e-002); + rvec.set(2, 0, 2.6445674487856268e-002); + + final tvec = cv.Mat.zeros(3, 1, cv.MatType.CV_64FC1); + tvec.set(0, 0, 2.1158489381208221e+000); + tvec.set(1, 0, -7.6847683212704716e+000); + tvec.set(2, 0, 2.6169795190294256e+001); + + final distCoeffs = cv.Mat.zeros(4, 1, cv.MatType.CV_64FC1); + + { + final (imagePoints, jacobian) = + cv.Fisheye.projectPoints(objectPoints, rvec, tvec, intrisicMat, distCoeffs); + + expect(imagePoints.isEmpty, false); + expect(jacobian.isEmpty, false); + + final (rval, rv, tv) = + cv.Fisheye.solvePnP(objectPoints, imagePoints, cv.Mat.eye(3, 3, cv.MatType.CV_64FC1), distCoeffs); + expect(rval, true); + expect(rv.isEmpty, false); + expect(tv.isEmpty, false); + } + + { + final (imagePoints, jacobian) = + await cv.Fisheye.projectPointsAsync(objectPoints, rvec, tvec, intrisicMat, distCoeffs); + + expect(imagePoints.isEmpty, false); + expect(jacobian.isEmpty, false); + + final (rval, rv, tv) = await cv.Fisheye.solvePnPAsync( + objectPoints, imagePoints, cv.Mat.eye(3, 3, cv.MatType.CV_64FC1), distCoeffs); + expect(rval, true); + expect(rv.isEmpty, false); + expect(tv.isEmpty, false); + } + }); + + test('cv.Fisheye.undistortImage', () async { + final img = cv.imread("test/images/fisheye_sample.jpg", flags: cv.IMREAD_UNCHANGED); + expect(img.isEmpty, false); + final k = cv.Mat.zeros(3, 3, cv.MatType.CV_64FC1); + k.set(0, 0, 689.21); + k.set(0, 1, 0.0); + k.set(0, 2, 1295.56); + + k.set(1, 0, 0.0); + k.set(1, 1, 690.48); + k.set(1, 2, 942.17); + + k.set(2, 0, 0.0); + k.set(2, 1, 0.0); + k.set(2, 2, 1.0); + + final d = cv.Mat.zeros(1, 4, cv.MatType.CV_64FC1); + + { + final dst = cv.Fisheye.undistortImage(img, k, d); + expect(dst.isEmpty, false); + } + { + final dst = await cv.Fisheye.undistortImageAsync(img, k, d); + expect(dst.isEmpty, false); + } + }); + + test('cv.Fisheye.undistortPoints', () async { + final k = cv.Mat.zeros(3, 3, cv.MatType.CV_64FC1); + k.set(0, 0, 1094.7249578198823); + k.set(0, 1, 0.0); + k.set(0, 2, 959.4907612030962); + + k.set(1, 0, 0.0); + k.set(1, 1, 1094.9945708128778); + k.set(1, 2, 536.4566143451868); + + k.set(2, 0, 0.0); + k.set(2, 1, 0.0); + k.set(2, 2, 1.0); + + final d = cv.Mat.zeros(1, 4, cv.MatType.CV_64FC1); + d.set(0, 0, -0.05207412392075069); + d.set(0, 1, -0.089168300192224); + d.set(0, 2, 0.10465607695792184); + d.set(0, 3, -0.045693446831115585); + + final r = cv.Mat.empty(); + final src = cv.Mat.zeros(3, 1, cv.MatType.CV_64FC2); + final dst = cv.Mat.zeros(3, 1, cv.MatType.CV_64FC2); + + src.set(0, 0, 480.0); + src.set(0, 1, 270.0); + + src.set(1, 0, 960.0); + src.set(1, 1, 540.0); + + src.set(2, 0, 1440.0); + src.set(2, 1, 810.0); + + final knew = k.clone(); + knew.set(0, 0, 0.4 * k.at(0, 0)); + knew.set(1, 1, 0.4 * k.at(1, 1)); + + { + cv.Fisheye.estimateNewCameraMatrixForUndistortRectify( + k, + d, + (1920, 1080), + r, + P: knew, + balance: 1, + newSize: (1920, 1080), + ); + + cv.Fisheye.undistortPoints(src, k, d); + cv.Fisheye.undistortPoints(src, k, d, undistorted: dst, R: r, P: k); + expect(dst.isEmpty, false); + expect(dst.at(0, 0) != 0, true); + + final dst1 = cv.Fisheye.distortPoints(dst, k, d); + expect(dst1.isEmpty, false); + + cv.Fisheye.distortPoints(dst, k, d, Kundistorted: knew, distorted: dst1); + expect(dst1.isEmpty, false); + } + { + await cv.Fisheye.estimateNewCameraMatrixForUndistortRectifyAsync( + k, + d, + (1920, 1080), + r, + balance: 1, + newSize: (1920, 1080), + ); + + await cv.Fisheye.undistortPointsAsync(src, k, d); + await cv.Fisheye.undistortPointsAsync(src, k, d, R: r, P: k); + expect(dst.isEmpty, false); + expect(dst.at(0, 0) != 0, true); + + final dst1 = await cv.Fisheye.distortPointsAsync(dst, k, d); + expect(dst1.isEmpty, false); + + await cv.Fisheye.distortPointsAsync(dst, k, d, Kundistorted: knew, distorted: dst1); + expect(dst1.isEmpty, false); + } + }); +} diff --git a/packages/dartcv/test/calib3d/calib3d_test.dart b/packages/dartcv/test/calib3d/calib3d_test.dart new file mode 100644 index 00000000..3dfaff83 --- /dev/null +++ b/packages/dartcv/test/calib3d/calib3d_test.dart @@ -0,0 +1,599 @@ +import 'dart:math' as math; + +import 'package:dartcv4/dartcv.dart' as cv; +import 'package:test/test.dart'; + +cv.Mat generate3DPoints() { + final pts = [ + [0.5, 0.5, -0.5], + [0.5, 0.5, 0.5], + [-0.5, 0.5, 0.5], + [-0.5, 0.5, -0.5], + [0.5, -0.5, -0.5], + [-0.5, -0.5, -0.5], + [-0.5, -0.5, 0.5], + ]; + final points = cv.Mat.zeros(7, 3, cv.MatType.CV_32FC1); + points.forEachPixel((i, j, v) { + v[0] = pts[i][j]; + }); + return points; +} + +void main() async { + test('cv.undistortPoints', () async { + final k = cv.Mat.zeros(3, 3, cv.MatType.CV_64FC1); + k.set(0, 0, 1094.7249578198823); + k.set(0, 1, 0.0); + k.set(0, 2, 1094.7249578198823); + + k.set(1, 0, 0.0); + k.set(1, 1, 1094.9945708128778); + k.set(1, 2, 536.4566143451868); + + k.set(2, 0, 0.0); + k.set(2, 1, 0.0); + k.set(2, 2, 1.0); + + final d = cv.Mat.zeros(1, 4, cv.MatType.CV_64FC1); + d.set(0, 0, -0.05207412392075069); + d.set(0, 1, -0.089168300192224); + d.set(0, 2, 0.10465607695792184); + d.set(0, 3, -0.045693446831115585); + + final r = cv.Mat.empty(); + final src = cv.Mat.zeros(3, 1, cv.MatType.CV_64FC2); + + // This camera matrix is 1920x1080. Points where x < 960 and y < 540 should move toward the top left (x and y get smaller) + // The centre point should be mostly unchanged + // Points where x > 960 and y > 540 should move toward the bottom right (x and y get bigger) + + // The index being used for col here is actually the channel (i.e. the point's x/y dimensions) + // (since there's only 1 column so the formula: (colNumber * numChannels + channelNumber) reduces to + // (0 * 2) + channelNumber + // so col = 0 is the x coordinate and col = 1 is the y coordinate + + src.set(0, 0, 480.0); + src.set(0, 1, 270.0); + + src.set(1, 0, 960.0); + src.set(1, 1, 540.0); + + src.set(2, 0, 1920.0); + src.set(2, 1, 1080.0); + + { + cv.undistortPoints(src, k, d); + final dst = cv.undistortPoints(src, k, d, R: r, P: k); + expect(dst.isEmpty, false); + expect(dst.at(0, 0), lessThan(480)); + expect(dst.at(0, 1), lessThan(270)); + } + { + await cv.undistortPointsAsync(src, k, d); + final dst = await cv.undistortPointsAsync(src, k, d, R: r, P: k); + expect(dst.isEmpty, false); + expect(dst.at(0, 0), lessThan(480)); + expect(dst.at(0, 1), lessThan(270)); + } + }); + + test('cv.calibrateCamera', () async { + final img = cv.imread("test/images/chessboard_4x6_distort.png", flags: cv.IMREAD_GRAYSCALE); + expect(img.isEmpty, false); + + const patternSize = (4, 6); + final (found, corners) = cv.findChessboardCorners(img, patternSize, flags: 0); + expect(found, true); + expect(corners.isEmpty, false); + + final pts = []; + for (var j = 0; j < patternSize.$2; j++) { + for (var i = 0; i < patternSize.$1; i++) { + pts.add(cv.Point3f(i.toDouble(), j.toDouble(), 0)); + } + } + final objectPointsVector = cv.Contours3f.fromList([pts]); + final imagePointsVector = cv.Contours2f.fromList([corners.toList()]); + + { + final cameraMatrix = cv.Mat.empty(); + final distCoeffs = cv.Mat.empty(); + final (rmsErr, mtx, dist, rvecs, tvecs) = cv.calibrateCamera( + objectPointsVector, + imagePointsVector, + (img.cols, img.rows), + cameraMatrix, + distCoeffs, + ); + expect(rmsErr, greaterThan(0)); + expect(mtx.isEmpty || dist.isEmpty || rvecs.isEmpty || tvecs.isEmpty, false); + + final dst = cv.undistort(img, cameraMatrix, distCoeffs); + final target = cv.imread("test/images/chessboard_4x6_distort_correct.png", flags: cv.IMREAD_GRAYSCALE); + final xor = cv.bitwiseXOR(dst, target); + final sum = xor.sum(); + expect(sum.val1, lessThan(img.rows * img.cols * 0.005)); + } + { + final cameraMatrix = cv.Mat.empty(); + final distCoeffs = cv.Mat.empty(); + final (rmsErr, mtx, dist, rvecs, tvecs) = await cv.calibrateCameraAsync( + objectPointsVector, + imagePointsVector, + (img.cols, img.rows), + cameraMatrix, + distCoeffs, + ); + expect(rmsErr, greaterThan(0)); + expect(mtx.isEmpty || dist.isEmpty || rvecs.isEmpty || tvecs.isEmpty, false); + + final dst = await cv.undistortAsync(img, cameraMatrix, distCoeffs); + final target = cv.imread("test/images/chessboard_4x6_distort_correct.png", flags: cv.IMREAD_GRAYSCALE); + final xor = cv.bitwiseXOR(dst, target); + final sum = xor.sum(); + expect(sum.val1, lessThan(img.rows * img.cols * 0.005)); + } + }); + + test('cv.checkChessboard', () { + final patternSize = (4, 6).toSize(); + final img = cv.imread("test/images/chessboard_4x6.png", flags: cv.IMREAD_GRAYSCALE); + final img1 = cv.imread("test/images/lenna.png", flags: cv.IMREAD_GRAYSCALE); + expect(cv.checkChessboard(img, patternSize), true); + expect(cv.checkChessboard(img1, patternSize), false); + }); + + test('cv.findChessboardCorners, cv.drawChessboardCorners', () async { + final img = cv.imread("test/images/chessboard_4x6.png", flags: cv.IMREAD_UNCHANGED); + expect(img.isEmpty, false); + + { + final (found, corners) = cv.findChessboardCorners(img, (4, 6), flags: 0); + expect(found, true); + expect(corners.isEmpty, false); + + final img2 = cv.Mat.zeros(150, 150, cv.MatType.CV_8UC1); + cv.drawChessboardCorners(img2, (4, 6), corners, true); + expect(img2.isEmpty, false); + } + { + final (found, corners) = await cv.findChessboardCornersAsync(img, (4, 6)); + expect(found, true); + expect(corners.isEmpty, false); + + // final img2 = cv.Mat.zeros(150, 150, cv.MatType.CV_8UC1); + // await cv.drawChessboardCornersAsync(img2, (4, 6), corners, true); + // expect(img2.isEmpty, false); + } + }); + + test( + 'cv.findChessboardCornersSB', + // onPlatform: { + // "mac-os": const Skip("https://github.com/opencv/opencv/issues/20202"), + // }, + () async { + final img = cv.imread("test/images/chessboard_4x6.png", flags: cv.IMREAD_COLOR); + expect(img.isEmpty, false); + + { + final (found, corners) = cv.findChessboardCornersSB(img, (3, 3), flags: 0); + expect(found, true); + expect(corners.isEmpty, false); + + final img2 = cv.Mat.zeros(150, 150, cv.MatType.CV_8UC1); + cv.drawChessboardCorners(img2, (4, 6), corners, true); + expect(img2.isEmpty, false); + } + { + final (found, corners) = await cv.findChessboardCornersSBAsync(img, (4, 6), 0); + expect(found, true); + expect(corners.isEmpty, false); + + final img2 = cv.Mat.zeros(150, 150, cv.MatType.CV_8UC1); + await cv.drawChessboardCornersAsync(img2, (4, 6), corners, true); + expect(img2.isEmpty, false); + } + }, + ); + + test( + 'cv.findChessboardCornersSBWithMeta', + // onPlatform: { + // "mac-os": const Skip("https://github.com/opencv/opencv/issues/20202"), + // }, + () async { + final img = cv.imread("test/images/chessboard_4x6.png", flags: cv.IMREAD_COLOR); + expect(img.isEmpty, false); + + { + final (found, corners, meta) = cv.findChessboardCornersSBWithMeta(img, (4, 6), 0); + expect(found, true); + expect(corners.isEmpty, false); + expect(meta.isEmpty, false); + + final img2 = cv.Mat.zeros(150, 150, cv.MatType.CV_8UC1); + cv.drawChessboardCorners(img2, (4, 6), corners, true); + expect(img2.isEmpty, false); + } + { + final (found, corners, meta) = await cv.findChessboardCornersSBWithMetaAsync(img, (4, 6), 0); + expect(found, true); + expect(corners.isEmpty, false); + expect(meta.isEmpty, false); + + final img2 = cv.Mat.zeros(150, 150, cv.MatType.CV_8UC1); + await cv.drawChessboardCornersAsync(img2, (4, 6), corners, true); + expect(img2.isEmpty, false); + } + }, + ); + + test('cv.estimateAffinePartial2D', () async { + final src = [ + cv.Point2f(0, 0), + cv.Point2f(10, 5), + cv.Point2f(10, 10), + cv.Point2f(5, 10), + ].cvd; + final dst = [ + cv.Point2f(0, 0), + cv.Point2f(10, 0), + cv.Point2f(10, 10), + cv.Point2f(0, 10), + ].cvd; + { + final (m, inliers) = cv.estimateAffinePartial2D( + src, + dst, + ); + expect(inliers.isEmpty, false); + expect(m.isEmpty, false); + expect((m.rows, m.cols), (2, 3)); + } + { + final (m, inliers) = await cv.estimateAffinePartial2DAsync(src, dst); + expect(inliers.isEmpty, false); + expect(m.isEmpty, false); + expect((m.rows, m.cols), (2, 3)); + } + }); + + test('cv.estimateAffine2D', () async { + final src = [ + cv.Point2f(0, 0), + cv.Point2f(10, 5), + cv.Point2f(10, 10), + cv.Point2f(5, 10), + ].cvd; + final dst = [ + cv.Point2f(0, 0), + cv.Point2f(10, 0), + cv.Point2f(10, 10), + cv.Point2f(0, 10), + ].cvd; + { + final (m, inliers) = cv.estimateAffine2D( + src, + dst, + ); + expect(inliers.isEmpty, false); + expect(m.isEmpty, false); + expect((m.rows, m.cols), (2, 3)); + } + { + final (m, inliers) = await cv.estimateAffine2DAsync( + src, + dst, + ); + expect(inliers.isEmpty, false); + expect(m.isEmpty, false); + expect((m.rows, m.cols), (2, 3)); + } + }); + + // findHomography + test('cv.findHomography', () async { + final src = cv.Mat.zeros(4, 1, cv.MatType.CV_64FC2); + final dst = cv.Mat.zeros(4, 1, cv.MatType.CV_64FC2); + final srcPts = [ + cv.Point2f(193, 932), + cv.Point2f(191, 378), + cv.Point2f(1497, 183), + cv.Point2f(1889, 681), + ]; + final dstPts = [ + cv.Point2f(51.51206544281359, -0.10425475260813055), + cv.Point2f(51.51211051314331, -0.10437947532732306), + cv.Point2f(51.512222354139325, -0.10437679311830816), + cv.Point2f(51.51214828037607, -0.1042212249954444), + ]; + for (var i = 0; i < srcPts.length; i++) { + src.set(i, 0, srcPts[i].x); + src.set(i, 1, srcPts[i].y); + } + for (var i = 0; i < dstPts.length; i++) { + dst.set(i, 0, dstPts[i].x); + dst.set(i, 1, dstPts[i].y); + } + + { + final mask = cv.Mat.empty(); + final m = cv.findHomography( + src, + dst, + method: cv.HOMOGRAPY_ALL_POINTS, + ransacReprojThreshold: 3, + mask: mask, + ); + expect(m.isEmpty, false); + } + + { + final (m, _) = await cv.findHomographyAsync( + src, + dst, + method: cv.HOMOGRAPY_ALL_POINTS, + ransacReprojThreshold: 3, + ); + expect(m.isEmpty, false); + } + }); + + test('cv.findHomographyUsac', () async { + final points1 = cv.VecPoint2f.generate(5, (i) => cv.Point2f(i * 10, i * 20)); + final points2 = points1.map((p) => cv.Point2f(p.x + p.x / 10, p.y + p.y / 10)).toList(growable: false); + + final m1 = cv.Mat.fromVec(points1); + final m2 = cv.Mat.fromVec(points2.asVec()); + { + final mask = cv.Mat.empty(); + final _ = cv.findHomographyUsac( + m1, + m2, + cv.UsacParams(), + mask: mask, + ); + // expect(m.isEmpty, false); + } + }); + + test('cv.findFundamentalMat', () { + final imgPt1 = cv.Mat.from2DList( + [ + [1017.0883, 848.23529], + [1637, 848.23529], + [1637, 1648.7059], + [1017.0883, 1648.7059], + [2282.2144, 772], + [3034.9644, 772], + [3034.9644, 1744], + [2282.2144, 1744], + ], + cv.MatType.CV_64FC1, + ); + + final imgPt2 = cv.Mat.from2DList( + [ + [414.88824, 848.23529], + [1034.8, 848.23529], + [1034.8, 1648.7059], + [414.88824, 1648.7059], + [1550.9714, 772], + [2303.7214, 772], + [2303.7214, 1744], + [1550.9714, 1744], + ], + cv.MatType.CV_64FC1, + ); + + final m = cv.findFundamentalMat(imgPt1, imgPt2, method: cv.FM_8POINT); + expect(m.isEmpty, true); // TODO + }); + + test('cv.initUndistortRectifyMap', () async { + final img = cv.imread("test/images/distortion.jpg", flags: cv.IMREAD_UNCHANGED); + expect(img.isEmpty, false); + + final k = cv.Mat.zeros(3, 3, cv.MatType.CV_64FC1); + k.set(0, 0, 842.0261028); + k.set(0, 1, 0.0); + k.set(0, 2, 667.7569792); + + k.set(1, 0, 0.0); + k.set(1, 1, 707.3668897); + k.set(1, 2, 385.56476464); + + k.set(2, 0, 0.0); + k.set(2, 1, 0.0); + k.set(2, 2, 1.0); + + final d = cv.Mat.zeros(1, 5, cv.MatType.CV_64FC1); + d.set(0, 0, -3.65584802e-01); + d.set(0, 1, 1.41555815e-01); + d.set(0, 2, -2.62985819e-03); + d.set(0, 3, 2.05841873e-04); + d.set(0, 4, -2.35021914e-02); + + { + final (newC, roi) = cv.getOptimalNewCameraMatrix(k, d, (img.cols, img.rows), 1); + expect(newC.isEmpty, false); + expect(roi.width, greaterThan(0)); + + final r = cv.Mat.empty(); + final (map1, map2) = cv.initUndistortRectifyMap(k, d, r, newC, (img.cols, img.rows), 5); + final dst = cv.remap(img, map1, map2, cv.INTER_LINEAR); + expect(dst.isEmpty, false); + final success = cv.imwrite("test/images/distortion-correct.png", dst); + expect(success, true); + } + { + final (newC, roi) = await cv.getOptimalNewCameraMatrixAsync(k, d, (img.cols, img.rows), 1); + expect(newC.isEmpty, false); + expect(roi.width, greaterThan(0)); + + final r = cv.Mat.empty(); + final (map1, map2) = await cv.initUndistortRectifyMapAsync(k, d, r, newC, (img.cols, img.rows), 5); + final dst = cv.remap(img, map1, map2, cv.INTER_LINEAR); + expect(dst.isEmpty, false); + final success = cv.imwrite("test/images/distortion-correct.png", dst); + expect(success, true); + } + }); + + // from https://github.com/shimat/opencvsharp/blob/main/test/OpenCvSharp.Tests/calib3d/Calib3dTest.cs + test('cv.projectPoints', () async { + final objectPoints = generate3DPoints(); + final intrinsicMat = cv.Mat.zeros(3, 3, cv.MatType.CV_64FC1); + intrinsicMat.set(0, 0, 1.6415318549788924e+003); + intrinsicMat.set(1, 0, 0); + intrinsicMat.set(2, 0, 0); + intrinsicMat.set(0, 1, 0); + intrinsicMat.set(1, 1, 1.7067753507885654e+003); + intrinsicMat.set(2, 1, 0); + intrinsicMat.set(0, 2, 5.3262822453148601e+002); + intrinsicMat.set(1, 2, 3.8095355839052968e+002); + intrinsicMat.set(2, 2, 1); + + final rVec = cv.Mat.zeros(3, 1, cv.MatType.CV_64FC1); + rVec.set(0, 0, -3.9277902400761393e-002); + rVec.set(1, 0, 3.7803824407602084e-002); + rVec.set(2, 0, 2.6445674487856268e-002); + + final tVec = cv.Mat.zeros(3, 1, cv.MatType.CV_64FC1); + tVec.set(0, 0, 2.1158489381208221e+000); + tVec.set(1, 0, -7.6847683212704716e+000); + tVec.set(2, 0, 2.6169795190294256e+001); + + final distCoeffs = cv.Mat.zeros(4, 1, cv.MatType.CV_64FC1); + distCoeffs.set(0, 0, 0); + distCoeffs.set(1, 0, 0); + distCoeffs.set(2, 0, 0); + distCoeffs.set(3, 0, 0); + + { + final (imagePoints, jacobian) = cv.projectPoints(objectPoints, rVec, tVec, intrinsicMat, distCoeffs); + expect(imagePoints.isEmpty, false); + expect(jacobian.isEmpty, false); + } + + { + final (imagePoints, jacobian) = + await cv.projectPointsAsync(objectPoints, rVec, tVec, intrinsicMat, distCoeffs); + expect(imagePoints.isEmpty, false); + expect(jacobian.isEmpty, false); + } + }); + + test('cv.recoverPose', () { + final essential = cv.Mat.from2DList( + [ + [1.503247056657373e-16, -7.074103796034695e-16, -7.781514175638166e-16], + [6.720398606232961e-16, -6.189840821530359e-17, -0.7071067811865476], + [7.781514175638166e-16, 0.7071067811865475, -2.033804841359975e-16], + ], + cv.MatType.CV_64FC1, + ); + + final p1 = cv.Mat.from2DList( + [ + [1017.0883, 848.23529], + [1637, 848.23529], + [1637, 1648.7059], + [1017.0883, 1648.7059], + [2282.2144, 772], + [3034.9644, 772], + [3034.9644, 1744], + [2282.2144, 1744], + ], + cv.MatType.CV_64FC1, + ); + + final p2 = cv.Mat.from2DList( + [ + [414.88824, 848.23529], + [1034.8, 848.23529], + [1034.8, 1648.7059], + [414.88824, 1648.7059], + [1550.9714, 772], + [2303.7214, 772], + [2303.7214, 1744], + [1550.9714, 1744], + ], + cv.MatType.CV_64FC1, + ); + + final k = cv.Mat.from2DList( + [ + [3011, 0, 1637], + [0, 3024, 1204], + [0, 0, 1], + ], + cv.MatType.CV_64FC1, + ); + + final (rval, r, t, _) = cv.recoverPoseWithCameraMatrix(essential, p1, p2, k); + expect(rval, 0); + expect(r.isEmpty, false); + expect(t.isEmpty, false); + }); + + test('cv.Rodrigues', () { + const double angle = 45; + final cos = math.cos(angle * math.pi / 180); + final sin = math.sin(angle * math.pi / 180); + final matrix = cv.Mat.zeros(3, 3, cv.MatType.CV_64FC1); + matrix.set(0, 0, cos); + matrix.set(0, 1, -sin); + matrix.set(1, 0, sin); + matrix.set(1, 1, cos); + matrix.set(2, 2, 1.0); + final jacobian = cv.Mat.empty(); + final vector = cv.Rodrigues(matrix, jacobian: jacobian); + expect(vector.isEmpty, false); + expect(vector.total, 3); + expect(vector.rows, 3); + expect(vector.cols, 1); + expect(vector.atNum(0, 0), closeTo(0, 1e-3)); + expect(vector.atNum(1, 0), closeTo(0, 1e-3)); + expect(vector.atNum(2, 0), closeTo(0.785, 1e-3)); + + expect(jacobian.isEmpty, false); + expect(jacobian.rows, 9); + expect(jacobian.cols, 3); + }); + + test('cv.solvePnP', () async { + final rvec = cv.Mat.fromList(3, 1, cv.MatType.CV_32FC1, [0, 0, 0]); + final tvec = cv.Mat.fromList(3, 1, cv.MatType.CV_32FC1, [0, 0, 0]); + final cameraMatrix = cv.Mat.from2DList( + [ + [1, 0, 0], + [0, 1, 0], + [0, 0, 1], + ], + cv.MatType.CV_32FC1, + ); + final dist = cv.Mat.fromList(5, 1, cv.MatType.CV_32FC1, [0, 0, 0, 0, 0]); + final objPts = cv.Mat.from2DList( + [ + [0, 0, 1], + [1, 0, 1], + [0, 1, 1], + [1, 1, 1], + [1, 0, 2], + [0, 1, 2], + ], + cv.MatType.CV_32FC1, + ); + final (imgPts, jacobian) = cv.projectPoints(objPts, rvec, tvec, cameraMatrix, dist); + expect(imgPts.isEmpty, false); + expect(jacobian.isEmpty, false); + + final (rval, rv, tv) = cv.solvePnP(objPts, imgPts, cameraMatrix, dist); + expect(rval, true); + expect(rv.isEmpty, false); + expect(tv.isEmpty, false); + }); +}