@@ -49,8 +49,8 @@ namespace rgbd
49
49
50
50
enum
51
51
{
52
- RGBD_ODOMETRY = 1 ,
53
- ICP_ODOMETRY = 2 ,
52
+ RGBD_ODOMETRY = 1 ,
53
+ ICP_ODOMETRY = 2 ,
54
54
MERGED_ODOMETRY = RGBD_ODOMETRY + ICP_ODOMETRY
55
55
};
56
56
@@ -436,7 +436,7 @@ void computeProjectiveMatrix(const Mat& ksi, Mat& Rt)
436
436
437
437
eigen2cv (g, Rt);
438
438
#else
439
- // TODO: check computeProjectiveMatrix when there is not eigen library,
439
+ // TODO: check computeProjectiveMatrix when there is not eigen library,
440
440
// because it gives less accurate pose of the camera
441
441
Rt = Mat::eye (4 , 4 , CV_64FC1);
442
442
@@ -462,7 +462,7 @@ void computeCorresps(const Mat& K, const Mat& K_inv, const Mat& Rt,
462
462
CV_Assert (Rt.type () == CV_64FC1);
463
463
464
464
Mat corresps (depth1.size (), CV_16SC2, Scalar::all (-1 ));
465
-
465
+
466
466
Rect r (0 , 0 , depth1.cols , depth1.rows );
467
467
Mat Kt = Rt (Rect (3 ,0 ,1 ,3 )).clone ();
468
468
Kt = K * Kt;
@@ -486,7 +486,7 @@ void computeCorresps(const Mat& K, const Mat& K_inv, const Mat& Rt,
486
486
KRK_inv3_u1[u1] = (float )(KRK_inv_ptr[3 ] * u1);
487
487
KRK_inv6_u1[u1] = (float )(KRK_inv_ptr[6 ] * u1);
488
488
}
489
-
489
+
490
490
for (int v1 = 0 ; v1 < depth1.rows ; v1++)
491
491
{
492
492
KRK_inv1_v1_plus_KRK_inv2[v1] = (float )(KRK_inv_ptr[1 ] * v1 + KRK_inv_ptr[2 ]);
@@ -506,16 +506,16 @@ void computeCorresps(const Mat& K, const Mat& K_inv, const Mat& Rt,
506
506
if (mask1_row[u1])
507
507
{
508
508
CV_DbgAssert (!cvIsNaN (d1));
509
- float transformed_d1 = static_cast <float >(d1 * (KRK_inv6_u1[u1] + KRK_inv7_v1_plus_KRK_inv8[v1]) +
509
+ float transformed_d1 = static_cast <float >(d1 * (KRK_inv6_u1[u1] + KRK_inv7_v1_plus_KRK_inv8[v1]) +
510
510
Kt_ptr[2 ]);
511
511
if (transformed_d1 > 0 )
512
512
{
513
513
float transformed_d1_inv = 1 .f / transformed_d1;
514
- int u0 = cvRound (transformed_d1_inv * (d1 * (KRK_inv0_u1[u1] + KRK_inv1_v1_plus_KRK_inv2[v1]) +
514
+ int u0 = cvRound (transformed_d1_inv * (d1 * (KRK_inv0_u1[u1] + KRK_inv1_v1_plus_KRK_inv2[v1]) +
515
515
Kt_ptr[0 ]));
516
- int v0 = cvRound (transformed_d1_inv * (d1 * (KRK_inv3_u1[u1] + KRK_inv4_v1_plus_KRK_inv5[v1]) +
516
+ int v0 = cvRound (transformed_d1_inv * (d1 * (KRK_inv3_u1[u1] + KRK_inv4_v1_plus_KRK_inv5[v1]) +
517
517
Kt_ptr[1 ]));
518
-
518
+
519
519
if (r.contains (Point (u0,v0)))
520
520
{
521
521
float d0 = depth0.at <float >(v0,u0);
@@ -527,7 +527,7 @@ void computeCorresps(const Mat& K, const Mat& K_inv, const Mat& Rt,
527
527
{
528
528
int exist_u1 = c[0 ], exist_v1 = c[1 ];
529
529
530
- float exist_d1 = (float )(depth1.at <float >(exist_v1,exist_u1) *
530
+ float exist_d1 = (float )(depth1.at <float >(exist_v1,exist_u1) *
531
531
(KRK_inv6_u1[exist_u1] + KRK_inv7_v1_plus_KRK_inv8[exist_v1]) + Kt_ptr[2 ]);
532
532
533
533
if (transformed_d1 > exist_d1)
@@ -631,7 +631,7 @@ void calcICPEquationCoeffsTranslation(double* C, const Point3f& /*p0*/, const Ve
631
631
typedef
632
632
void (*CalcICPEquationCoeffsPtr)(double *, const Point3f&, const Vec3f&);
633
633
634
- static
634
+ static
635
635
void calcRgbdLsmMatrices (const Mat& image0, const Mat& cloud0, const Mat& Rt,
636
636
const Mat& image1, const Mat& dI_dx1, const Mat& dI_dy1,
637
637
const Mat& corresps, double fx, double fy, double sobelScaleIn,
@@ -657,8 +657,8 @@ void calcRgbdLsmMatrices(const Mat& image0, const Mat& cloud0, const Mat& Rt,
657
657
const Vec4i& c = corresps_ptr[correspIndex];
658
658
int u0 = c[0 ], v0 = c[1 ];
659
659
int u1 = c[2 ], v1 = c[3 ];
660
-
661
- diffs_ptr[correspIndex] = static_cast <float >(static_cast <int >(image0.at <uchar>(v0,u0)) -
660
+
661
+ diffs_ptr[correspIndex] = static_cast <float >(static_cast <int >(image0.at <uchar>(v0,u0)) -
662
662
static_cast <int >(image1.at <uchar>(v1,u1)));
663
663
sigma += diffs_ptr[correspIndex] * diffs_ptr[correspIndex];
664
664
}
@@ -697,8 +697,8 @@ void calcRgbdLsmMatrices(const Mat& image0, const Mat& cloud0, const Mat& Rt,
697
697
698
698
AtB_ptr[y] += A_ptr[y] * w * diffs_ptr[correspIndex];
699
699
}
700
- }
701
-
700
+ }
701
+
702
702
for (int y = 0 ; y < transformDim; y++)
703
703
for (int x = y+1 ; x < transformDim; x++)
704
704
AtA.at <double >(x,y) = AtA.at <double >(y,x);
@@ -790,16 +790,16 @@ bool solveSystem(const Mat& AtA, const Mat& AtB, double detThreshold, Mat& x)
790
790
return true ;
791
791
}
792
792
793
- static
793
+ static
794
794
bool testDeltaTransformation (const Mat& deltaRt, double maxTranslation, double maxRotation)
795
795
{
796
796
double translation = norm (deltaRt (Rect (3 , 0 , 1 , 3 )));
797
-
797
+
798
798
Mat rvec;
799
799
Rodrigues (deltaRt (Rect (0 ,0 ,3 ,3 )), rvec);
800
-
800
+
801
801
double rotation = norm (rvec) * 180 . / CV_PI;
802
-
802
+
803
803
return translation <= maxTranslation && rotation <= maxRotation;
804
804
}
805
805
@@ -920,9 +920,9 @@ bool RGBDICPOdometryImpl(Mat& Rt, const Mat& initRt,
920
920
isOk = true ;
921
921
}
922
922
}
923
-
923
+
924
924
Rt = resultRt;
925
-
925
+
926
926
if (isOk)
927
927
{
928
928
Mat deltaRt;
@@ -941,24 +941,25 @@ template<class ImageElemType>
941
941
static void
942
942
warpFrameImpl (const Mat& image, const Mat& depth, const Mat& mask,
943
943
const Mat& Rt, const Mat& cameraMatrix, const Mat& distCoeff,
944
- Mat& warpedImage, Mat* warpedDepth, Mat* warpedMask)
944
+ OutputArray _warpedImage, OutputArray warpedDepth, OutputArray warpedMask)
945
945
{
946
946
CV_Assert (image.size () == depth.size ());
947
-
947
+
948
948
Mat cloud;
949
949
depthTo3d (depth, cameraMatrix, cloud);
950
-
950
+
951
951
std::vector<Point2f> points2d;
952
952
Mat transformedCloud;
953
953
perspectiveTransform (cloud, transformedCloud, Rt);
954
954
projectPoints (transformedCloud.reshape (3 , 1 ), Mat::eye (3 , 3 , CV_64FC1), Mat::zeros (3 , 1 , CV_64FC1), cameraMatrix,
955
955
distCoeff, points2d);
956
956
957
- warpedImage = Mat (image.size (), image.type (), Scalar::all (0 ));
957
+ _warpedImage.create (image.size (), image.type ());
958
+ Mat warpedImage = _warpedImage.getMat ();
958
959
959
960
Mat zBuffer (image.size (), CV_32FC1, std::numeric_limits<float >::max ());
960
961
const Rect rect = Rect (0 , 0 , image.cols , image.rows );
961
-
962
+
962
963
for (int y = 0 ; y < image.rows ; y++)
963
964
{
964
965
// const Point3f* cloud_row = cloud.ptr<Point3f>(y);
@@ -978,13 +979,13 @@ warpFrameImpl(const Mat& image, const Mat& depth, const Mat& mask,
978
979
}
979
980
}
980
981
981
- if (warpedMask)
982
- *warpedMask = zBuffer != std::numeric_limits<float >::max ();
982
+ if (warpedMask. needed () )
983
+ Mat ( zBuffer != std::numeric_limits<float >::max ()). copyTo (warpedMask );
983
984
984
- if (warpedDepth)
985
+ if (warpedDepth. needed () )
985
986
{
986
987
zBuffer.setTo (std::numeric_limits<float >::quiet_NaN (), zBuffer == std::numeric_limits<float >::max ());
987
- *warpedDepth = zBuffer;
988
+ zBuffer. copyTo (warpedDepth) ;
988
989
}
989
990
}
990
991
@@ -1406,7 +1407,7 @@ bool RgbdICPOdometry::computeImpl(const Ptr<OdometryFrame>& srcFrame, const Ptr<
1406
1407
void
1407
1408
warpFrame (const Mat& image, const Mat& depth, const Mat& mask,
1408
1409
const Mat& Rt, const Mat& cameraMatrix, const Mat& distCoeff,
1409
- Mat& warpedImage, Mat* warpedDepth, Mat* warpedMask)
1410
+ OutputArray warpedImage, OutputArray warpedDepth, OutputArray warpedMask)
1410
1411
{
1411
1412
if (image.type () == CV_8UC1)
1412
1413
warpFrameImpl<uchar>(image, depth, mask, Rt, cameraMatrix, distCoeff, warpedImage, warpedDepth, warpedMask);
0 commit comments