From 68244485d2e5897f58bcaa226eed8e038bdf69ea Mon Sep 17 00:00:00 2001 From: Steffen Urban Date: Wed, 16 Dec 2020 13:44:44 +0100 Subject: [PATCH 01/12] initial commit of new aruco functionality --- modules/aruco/include/opencv2/aruco.hpp | 40 ++ modules/aruco/samples/detector_params.yml | 1 + modules/aruco/src/aruco.cpp | 588 ++++++++++++++++------ 3 files changed, 462 insertions(+), 167 deletions(-) diff --git a/modules/aruco/include/opencv2/aruco.hpp b/modules/aruco/include/opencv2/aruco.hpp index 9244f0020f..4ff231a3fc 100644 --- a/modules/aruco/include/opencv2/aruco.hpp +++ b/modules/aruco/include/opencv2/aruco.hpp @@ -146,6 +146,33 @@ enum CornerRefineMethod{ * Parameter is the standard deviation in pixels. Very noisy images benefit from non-zero values (e.g. 0.8). (default 0.0) * - detectInvertedMarker: to check if there is a white marker. In order to generate a "white" marker just * invert a normal marker by using a tilde, ~markerImage. (default false) + * - useAruco3Detection: to enable the new and faster Aruco detection strategy. The most important observation from the authors of + * Romero-Ramirez et al: Speeded up detection of squared fiducial markers (2018) is, that the binary + * code of a marker can be reliably detected if the canonical image (that is used to extract the binary code) + * has a size of minSideLengthCanonicalImg (in practice tau_c=16-32 pixels). + * In addition, very small markers are barely useful for pose estimation and thus a we can define a minimum marker size that we + * still want to be able to detect (e.g. 50x50 pixel). + * To decouple this from the initial image size they propose to resize the input image + * to (I_w_r, I_h_r) = (tau_c / tau_dot_i) * (I_w, I_h), with tau_dot_i = tau_c + max(I_w,I_h) * tau_i. + * Here tau_i (parameter: minMarkerLengthRatioOriginalImg) is a ratio in the range [0,1]. + * If we set this to 0, the smallest marker we can detect + * has a side length of tau_c. If we set it to 1 the marker would fill the entire image. + * For a FullHD video a good value to start with is 0.1. + * - minSideLengthCanonicalImg: minimum side length of a marker in the canonical image. + * Latter is the binarized image in which contours are searched. + * So all contours with a size smaller than minSideLengthCanonicalImg*minSideLengthCanonicalImg will omitted from the search. + * - minMarkerLengthRatioOriginalImg: range [0,1], eq (2) from paper + * - cameraMotionSpeed: is in the range [0,1]. This parameter (tau_s in the paper) implements the feature proposed + * in Section 3.7. and is particularly useful for video sequences. + * The parameter tau_i has a direct influence on the processing speed. Instead of setting a fixed value for it, + * it can be adjusted at the end of each frame using + * tau_i = (1-tau_s)*P(v_s)/4 (eq. 6 in paper). + * Where P(v_s) is the perimeter of the smallest marker that was detected in the last frame. + * - useGlobalThreshold: if we process a video, the assumption is, that the illumination conditions remains + * constant and global instead of adaptive thresholding can be applied, speeding up the binarization step. + * - foundGlobalThreshold: internal variable. It is used to cache the variable to the next detector call. + * - otsuGlobalThreshold: internal variable. It is used to cache the global otsu threshold to the next detector call. + * - foundMarkerInLastFrames: internal variable. It is used to cache if markers were found in the last frame. */ struct CV_EXPORTS_W DetectorParameters { @@ -188,6 +215,19 @@ struct CV_EXPORTS_W DetectorParameters { // to detect white (inverted) markers CV_PROP_RW bool detectInvertedMarker; + + // New Aruco functionality proposed in the paper: + // Romero-Ramirez et al: Speeded up detection of squared fiducial markers (2018) + CV_PROP_RW bool useAruco3Detection; + CV_PROP_RW int minSideLengthCanonicalImg; + CV_PROP_RW float minMarkerLengthRatioOriginalImg; + + // New Aruco functionality especially for video + CV_PROP_RW float cameraMotionSpeed; + CV_PROP_RW bool useGlobalThreshold; + CV_PROP_RW bool foundGlobalThreshold; + CV_PROP_RW float otsuGlobalThreshold; + CV_PROP_RW int foundMarkerInLastFrames; }; diff --git a/modules/aruco/samples/detector_params.yml b/modules/aruco/samples/detector_params.yml index 6f804e500f..23a45afade 100644 --- a/modules/aruco/samples/detector_params.yml +++ b/modules/aruco/samples/detector_params.yml @@ -22,3 +22,4 @@ perspectiveRemoveIgnoredMarginPerCell: 0.13 maxErroneousBitsInBorderRate: 0.04 minOtsuStdDev: 5.0 errorCorrectionRate: 0.6 +# new aruco functionality diff --git a/modules/aruco/src/aruco.cpp b/modules/aruco/src/aruco.cpp index dee2669ebc..ee40f14427 100644 --- a/modules/aruco/src/aruco.cpp +++ b/modules/aruco/src/aruco.cpp @@ -40,7 +40,6 @@ the use of this software, even if advised of the possibility of such damage. #include "opencv2/aruco.hpp" #include #include - #include "apriltag_quad_thresh.hpp" #include "zarray.hpp" @@ -49,10 +48,15 @@ the use of this software, even if advised of the possibility of such damage. #include "opencv2/imgcodecs.hpp" #endif + +#include +#include + namespace cv { namespace aruco { using namespace std; +using namespace std::chrono; /** @@ -60,7 +64,7 @@ using namespace std; */ DetectorParameters::DetectorParameters() : adaptiveThreshWinSizeMin(3), - adaptiveThreshWinSizeMax(23), + adaptiveThreshWinSizeMax(13), adaptiveThreshWinSizeStep(10), adaptiveThreshConstant(7), minMarkerPerimeterRate(0.03), @@ -69,9 +73,9 @@ DetectorParameters::DetectorParameters() minCornerDistanceRate(0.05), minDistanceToBorder(3), minMarkerDistanceRate(0.05), - cornerRefinementMethod(CORNER_REFINE_NONE), + cornerRefinementMethod(CORNER_REFINE_SUBPIX), cornerRefinementWinSize(5), - cornerRefinementMaxIterations(30), + cornerRefinementMaxIterations(5), cornerRefinementMinAccuracy(0.1), markerBorderBits(1), perspectiveRemovePixelPerCell(4), @@ -87,7 +91,15 @@ DetectorParameters::DetectorParameters() aprilTagMaxLineFitMse(10.0), aprilTagMinWhiteBlackDiff(5), aprilTagDeglitch(0), - detectInvertedMarker(false){} + detectInvertedMarker(false), + useAruco3Detection(true), + minSideLengthCanonicalImg(16), + minMarkerLengthRatioOriginalImg(0.02), + cameraMotionSpeed(0.8), + useGlobalThreshold(true), + foundGlobalThreshold(false), + foundMarkerInLastFrames(0) +{} /** @@ -131,7 +143,7 @@ static void _threshold(InputArray _in, OutputArray _out, int winSize, double con static void _findMarkerContours(InputArray _in, vector< vector< Point2f > > &candidates, vector< vector< Point > > &contoursOut, double minPerimeterRate, double maxPerimeterRate, double accuracyRate, - double minCornerDistanceRate, int minDistanceToBorder) { + double minCornerDistanceRate, int minDistanceToBorder, int minSize) { CV_Assert(minPerimeterRate > 0 && maxPerimeterRate > 0 && accuracyRate > 0 && minCornerDistanceRate >= 0 && minDistanceToBorder >= 0); @@ -142,10 +154,16 @@ static void _findMarkerContours(InputArray _in, vector< vector< Point2f > > &can unsigned int maxPerimeterPixels = (unsigned int)(maxPerimeterRate * max(_in.getMat().cols, _in.getMat().rows)); + // for aruco3 functionality + if (minSize != 0) { + minPerimeterPixels = 4*minSize; + } + Mat contoursImg; _in.getMat().copyTo(contoursImg); vector< vector< Point > > contours; findContours(contoursImg, contours, RETR_LIST, CHAIN_APPROX_NONE); + // now filter list of contours for(unsigned int i = 0; i < contours.size(); i++) { // check perimeter @@ -167,7 +185,7 @@ static void _findMarkerContours(InputArray _in, vector< vector< Point2f > > &can (double)(approxCurve[j].y - approxCurve[(j + 1) % 4].y); minDistSq = min(minDistSq, d); } - double minCornerDistancePixels = double(contours[i].size()) * minCornerDistanceRate; + const double minCornerDistancePixels = double(contours[i].size()) * minCornerDistanceRate; if(minDistSq < minCornerDistancePixels * minCornerDistancePixels) continue; // check if it is too near to the image border @@ -217,7 +235,7 @@ static vector< Point2f > alignContourOrder( Point2f corner, vector< Point2f > ca uint8_t r=0; double min = cv::norm( Vec2f( corner - candidate[0] ), NORM_L2SQR); for(uint8_t pos=1; pos < 4; pos++) { - double nDiff = cv::norm( Vec2f( corner - candidate[pos] ), NORM_L2SQR); + const double nDiff = cv::norm( Vec2f( corner - candidate[pos] ), NORM_L2SQR); if(nDiff < min){ r = pos; min =nDiff; @@ -348,7 +366,7 @@ static void _filterTooCloseCandidates(const vector< vector< Point2f > > &candida /** * @brief Initial steps on finding square candidates */ -static void _detectInitialCandidates(const Mat &grey, vector< vector< Point2f > > &candidates, +static float _detectInitialCandidates(const Mat &grey, vector< vector< Point2f > > &candidates, vector< vector< Point > > &contours, const Ptr ¶ms) { @@ -363,25 +381,49 @@ static void _detectInitialCandidates(const Mat &grey, vector< vector< Point2f > vector< vector< vector< Point2f > > > candidatesArrays((size_t) nScales); vector< vector< vector< Point > > > contoursArrays((size_t) nScales); - ////for each value in the interval of thresholding window sizes - parallel_for_(Range(0, nScales), [&](const Range& range) { - const int begin = range.start; - const int end = range.end; + double otsu_treshold = 0.0; + // extract with global theshold) + if (params->useGlobalThreshold && params->foundMarkerInLastFrames > 2 && params->useAruco3Detection) { + Mat thresh; + if (params->foundGlobalThreshold) { + cv::threshold(grey, thresh, params->otsuGlobalThreshold, 255, cv::THRESH_BINARY_INV); + otsu_treshold = params->otsuGlobalThreshold; + } + else { // first time get threshold with otsu + otsu_treshold = cv::threshold(grey, thresh, 0, 255, cv::THRESH_BINARY_INV | cv::THRESH_OTSU); + params->foundGlobalThreshold = true; - for (int i = begin; i < end; i++) { - int currScale = params->adaptiveThreshWinSizeMin + i * params->adaptiveThreshWinSizeStep; - // threshold - Mat thresh; - _threshold(grey, thresh, currScale, params->adaptiveThreshConstant); - - // detect rectangles - _findMarkerContours(thresh, candidatesArrays[i], contoursArrays[i], - params->minMarkerPerimeterRate, params->maxMarkerPerimeterRate, - params->polygonalApproxAccuracyRate, params->minCornerDistanceRate, - params->minDistanceToBorder); } - }); + // get lines + int el_size = 3; + cv::Mat struc_el = cv::getStructuringElement(cv::MORPH_CROSS, cv::Size(el_size,el_size), cv::Point(el_size/2.0,el_size/2.0)); + cv::Mat eroded_imaged; + cv::erode(thresh, eroded_imaged, struc_el); + cv::bitwise_xor(eroded_imaged, thresh, thresh); + _findMarkerContours(thresh, candidatesArrays[0], contoursArrays[0], + params->minMarkerPerimeterRate, params->maxMarkerPerimeterRate, + params->polygonalApproxAccuracyRate, params->minCornerDistanceRate, + params->minDistanceToBorder, params->minSideLengthCanonicalImg); + } else { + ////for each value in the interval of thresholding window sizes + parallel_for_(Range(0, nScales), [&](const Range& range) { + const int begin = range.start; + const int end = range.end; + for (int i = begin; i < end; i++) { + int currScale = params->adaptiveThreshWinSizeMin + i * params->adaptiveThreshWinSizeStep; + // threshold + Mat thresh; + _threshold(grey, thresh, currScale, params->adaptiveThreshConstant); + + // detect rectangles + _findMarkerContours(thresh, candidatesArrays[i], contoursArrays[i], + params->minMarkerPerimeterRate, params->maxMarkerPerimeterRate, + params->polygonalApproxAccuracyRate, params->minCornerDistanceRate, + params->minDistanceToBorder, params->minSideLengthCanonicalImg); + } + }); + } // join candidates for(int i = 0; i < nScales; i++) { for(unsigned int j = 0; j < candidatesArrays[i].size(); j++) { @@ -389,27 +431,28 @@ static void _detectInitialCandidates(const Mat &grey, vector< vector< Point2f > contours.push_back(contoursArrays[i][j]); } } + + return otsu_treshold; } /** * @brief Detect square candidates in the input image */ -static void _detectCandidates(InputArray _image, vector< vector< vector< Point2f > > >& candidatesSetOut, +static float _detectCandidates(InputArray _image, vector< vector< vector< Point2f > > >& candidatesSetOut, vector< vector< vector< Point > > >& contoursSetOut, const Ptr &_params) { - Mat image = _image.getMat(); - CV_Assert(image.total() != 0); + Mat grey = _image.getMat(); + CV_Assert(grey.total() != 0); /// 1. CONVERT TO GRAY - Mat grey; - _convertToGrey(image, grey); + //Mat grey; + //_convertToGrey(image, grey); vector< vector< Point2f > > candidates; vector< vector< Point > > contours; /// 2. DETECT FIRST SET OF CANDIDATES - _detectInitialCandidates(grey, candidates, contours, _params); - + float new_otsu_global_thresh = _detectInitialCandidates(grey, candidates, contours, _params); /// 3. SORT CORNERS _reorderCandidatesCorners(candidates); @@ -417,6 +460,8 @@ static void _detectCandidates(InputArray _image, vector< vector< vector< Point2f // save the outter/inner border (i.e. potential candidates) _filterTooCloseCandidates(candidates, candidatesSetOut, contours, contoursSetOut, _params->minMarkerDistanceRate, _params->detectInvertedMarker); + + return new_otsu_global_thresh; } @@ -524,8 +569,9 @@ static int _getBorderErrors(const Mat &bits, int markerSize, int borderSize) { * 2 if the candidate is a white candidate */ static uint8_t _identifyOneCandidate(const Ptr& dictionary, InputArray _image, - vector& _corners, int& idx, - const Ptr& params, int& rotation) + const vector& _corners, int& idx, + const Ptr& params, int& rotation, + const double& scale = 1.0) { CV_Assert(_corners.size() == 4); CV_Assert(_image.getMat().total() != 0); @@ -533,10 +579,20 @@ static uint8_t _identifyOneCandidate(const Ptr& dictionary, InputArr uint8_t typ=1; // get bits + //auto start = high_resolution_clock::now(); + // scale corners to the correct size to search on the corresponding image pyramid + vector scaled_corners(4); + for (int i=0; i < 4; ++i) { + scaled_corners[i].x = _corners[i].x * scale; + scaled_corners[i].y = _corners[i].y * scale; + } + Mat candidateBits = - _extractBits(_image, _corners, dictionary->markerSize, params->markerBorderBits, + _extractBits(_image, scaled_corners, dictionary->markerSize, params->markerBorderBits, params->perspectiveRemovePixelPerCell, params->perspectiveRemoveIgnoredMarginPerCell, params->minOtsuStdDev); + //auto stop = high_resolution_clock::now(); + //std::cout << "time extract bits microseconds: "<(stop - start).count() << std::endl; // analyze border bits int maximumErrorsInBorder = @@ -574,28 +630,37 @@ static uint8_t _identifyOneCandidate(const Ptr& dictionary, InputArr /** * @brief Copy the contents of a corners vector to an OutputArray, settings its size. */ -static void _copyVector2Output(vector< vector< Point2f > > &vec, OutputArrayOfArrays out) { +static void _copyVector2Output(vector< vector< Point2f > > &vec, OutputArrayOfArrays out, const double& scale = 1.0) { out.create((int)vec.size(), 1, CV_32FC2); - + vector vec_scaled(4); if(out.isMatVector()) { for (unsigned int i = 0; i < vec.size(); i++) { out.create(4, 1, CV_32FC2, i); Mat &m = out.getMatRef(i); - Mat(Mat(vec[i]).t()).copyTo(m); + for (int p = 0; p < 4; ++p) { + vec_scaled[p] = vec[i][p]*scale; + } + Mat(Mat(vec_scaled).t()).copyTo(m); } } else if(out.isUMatVector()) { for (unsigned int i = 0; i < vec.size(); i++) { out.create(4, 1, CV_32FC2, i); UMat &m = out.getUMatRef(i); - Mat(Mat(vec[i]).t()).copyTo(m); + for (int p = 0; p < 4; ++p) { + vec_scaled[p] = vec[i][p]*scale; + } + Mat(Mat(vec_scaled).t()).copyTo(m); } } else if(out.kind() == _OutputArray::STD_VECTOR_VECTOR){ for (unsigned int i = 0; i < vec.size(); i++) { out.create(4, 1, CV_32FC2, i); Mat m = out.getMat(i); - Mat(Mat(vec[i]).t()).copyTo(m); + for (int p = 0; p < 4; ++p) { + vec_scaled[p] = vec[i][p]*scale; + } + Mat(Mat(vec_scaled).t()).copyTo(m); } } else { @@ -611,10 +676,34 @@ static void correctCornerPosition( vector< Point2f >& _candidate, int rotate){ std::rotate(_candidate.begin(), _candidate.begin() + 4 - rotate, _candidate.end()); } +static unsigned int _findOptPyrImageForCanonicalImg( + const std::vector& img_pyr_sizes, + const cv::Size& resized_seg_image, + const int& cur_perimeter, + const int& min_perimeter) { + + unsigned int h = 0; + double dist = std::numeric_limits::max(); + for (size_t i=0; i < img_pyr_sizes.size(); ++i) { + const double factor = (double)resized_seg_image.width / img_pyr_sizes[i].width; + double perimeter_scaled = cur_perimeter * factor; + const double new_dist = std::abs(perimeter_scaled - min_perimeter); + //std::cout<<"new_dist: "< > >& _candidatesSet, +static void _identifyCandidates(InputArray _image, + const std::vector& _image_pyr, + const std::vector& _image_pyr_sizes, + vector< vector< vector< Point2f > > >& _candidatesSet, vector< vector< vector > >& _contoursSet, const Ptr &_dictionary, vector< vector< Point2f > >& _accepted, vector< vector >& _contours, vector< int >& ids, const Ptr ¶ms, @@ -628,23 +717,37 @@ static void _identifyCandidates(InputArray _image, vector< vector< vector< Point CV_Assert(_image.getMat().total() != 0); - Mat grey; - _convertToGrey(_image.getMat(), grey); + //Mat grey; + //_convertToGrey(_image.getMat(), grey); vector< int > idsTmp(ncandidates, -1); vector< int > rotated(ncandidates, 0); vector< uint8_t > validCandidates(ncandidates, 0); + const int min_perimeter = params->minSideLengthCanonicalImg * params->minSideLengthCanonicalImg; + //// Analyze each of the candidates parallel_for_(Range(0, ncandidates), [&](const Range &range) { const int begin = range.start; const int end = range.end; vector< vector< Point2f > >& candidates = params->detectInvertedMarker ? _candidatesSet[1] : _candidatesSet[0]; + //std::cout<<"candidates.size "< >& cont = params->detectInvertedMarker ? _contoursSet[1] : _contoursSet[0]; + //std::cout<<"cont.size "< 0) idsTmp[i] = currId; @@ -705,41 +808,41 @@ static void _getSingleMarkerObjectPoints(float markerLength, OutputArray _objPoi * @param nContours, contour-container */ static Point3f _interpolate2Dline(const std::vector& nContours){ - float minX, minY, maxX, maxY; - minX = maxX = nContours[0].x; - minY = maxY = nContours[0].y; - - for(unsigned int i = 0; i< nContours.size(); i++){ - minX = nContours[i].x < minX ? nContours[i].x : minX; - minY = nContours[i].y < minY ? nContours[i].y : minY; - maxX = nContours[i].x > maxX ? nContours[i].x : maxX; - maxY = nContours[i].y > maxY ? nContours[i].y : maxY; - } - - Mat A = Mat::ones((int)nContours.size(), 2, CV_32F); // Coefficient Matrix (N x 2) - Mat B((int)nContours.size(), 1, CV_32F); // Variables Matrix (N x 1) - Mat C; // Constant - - if(maxX - minX > maxY - minY){ - for(unsigned int i =0; i < nContours.size(); i++){ + float minX, minY, maxX, maxY; + minX = maxX = nContours[0].x; + minY = maxY = nContours[0].y; + + for(unsigned int i = 0; i< nContours.size(); i++){ + minX = nContours[i].x < minX ? nContours[i].x : minX; + minY = nContours[i].y < minY ? nContours[i].y : minY; + maxX = nContours[i].x > maxX ? nContours[i].x : maxX; + maxY = nContours[i].y > maxY ? nContours[i].y : maxY; + } + + Mat A = Mat::ones((int)nContours.size(), 2, CV_32F); // Coefficient Matrix (N x 2) + Mat B((int)nContours.size(), 1, CV_32F); // Variables Matrix (N x 1) + Mat C; // Constant + + if(maxX - minX > maxY - minY){ + for(unsigned int i =0; i < nContours.size(); i++){ A.at(i,0)= nContours[i].x; B.at(i,0)= nContours[i].y; - } + } - solve(A, B, C, DECOMP_NORMAL); + solve(A, B, C, DECOMP_NORMAL); - return Point3f(C.at(0, 0), -1., C.at(1, 0)); - } - else{ - for(unsigned int i =0; i < nContours.size(); i++){ - A.at(i,0)= nContours[i].y; - B.at(i,0)= nContours[i].x; - } + return Point3f(C.at(0, 0), -1., C.at(1, 0)); + } + else{ + for(unsigned int i =0; i < nContours.size(); i++){ + A.at(i,0)= nContours[i].y; + B.at(i,0)= nContours[i].x; + } - solve(A, B, C, DECOMP_NORMAL); + solve(A, B, C, DECOMP_NORMAL); - return Point3f(-1., C.at(0, 0), C.at(1, 0)); - } + return Point3f(-1., C.at(0, 0), C.at(1, 0)); + } } @@ -750,9 +853,9 @@ static Point3f _interpolate2Dline(const std::vector& nContours){ * @return Crossed Point */ static Point2f _getCrossPoint(Point3f nLine1, Point3f nLine2){ - Matx22f A(nLine1.x, nLine1.y, nLine2.x, nLine2.y); - Vec2f B(-nLine1.z, -nLine2.z); - return Vec2f(A.solve(B).val); + Matx22f A(nLine1.x, nLine1.y, nLine2.x, nLine2.y); + Vec2f B(-nLine1.z, -nLine2.z); + return Vec2f(A.solve(B).val); } static void _distortPoints(vector& in, const Mat& camMatrix, const Mat& distCoeff) { @@ -778,68 +881,68 @@ static void _distortPoints(vector& in, const Mat& camMatrix, const * @param distCoeff, distCoeffs vector of distortion coefficient */ static void _refineCandidateLines(std::vector& nContours, std::vector& nCorners, const Mat& camMatrix, const Mat& distCoeff){ - vector contour2f(nContours.begin(), nContours.end()); - - if(!camMatrix.empty() && !distCoeff.empty()){ - undistortPoints(contour2f, contour2f, camMatrix, distCoeff); - } - - /* 5 groups :: to group the edges - * 4 - classified by its corner - * extra group - (temporary) if contours do not begin with a corner - */ - vector cntPts[5]; - int cornerIndex[4]={-1}; - int group=4; - - for ( unsigned int i =0; i < nContours.size(); i++ ) { - for(unsigned int j=0; j<4; j++){ - if ( nCorners[j] == contour2f[i] ){ - cornerIndex[j] = i; - group=j; - } - } - cntPts[group].push_back(contour2f[i]); - } - - // saves extra group into corresponding - if( !cntPts[4].empty() ){ - for( unsigned int i=0; i < cntPts[4].size() ; i++ ) - cntPts[group].push_back(cntPts[4].at(i)); - cntPts[4].clear(); - } - - //Evaluate contour direction :: using the position of the detected corners - int inc=1; + vector contour2f(nContours.begin(), nContours.end()); + + if(!camMatrix.empty() && !distCoeff.empty()){ + undistortPoints(contour2f, contour2f, camMatrix, distCoeff); + } + + /* 5 groups :: to group the edges + * 4 - classified by its corner + * extra group - (temporary) if contours do not begin with a corner + */ + vector cntPts[5]; + int cornerIndex[4]={-1}; + int group=4; + + for ( unsigned int i =0; i < nContours.size(); i++ ) { + for(unsigned int j=0; j<4; j++){ + if ( nCorners[j] == contour2f[i] ){ + cornerIndex[j] = i; + group=j; + } + } + cntPts[group].push_back(contour2f[i]); + } + + // saves extra group into corresponding + if( !cntPts[4].empty() ){ + for( unsigned int i=0; i < cntPts[4].size() ; i++ ) + cntPts[group].push_back(cntPts[4].at(i)); + cntPts[4].clear(); + } + + //Evaluate contour direction :: using the position of the detected corners + int inc=1; inc = ( (cornerIndex[0] > cornerIndex[1]) && (cornerIndex[3] > cornerIndex[0]) ) ? -1:inc; - inc = ( (cornerIndex[2] > cornerIndex[3]) && (cornerIndex[1] > cornerIndex[2]) ) ? -1:inc; - - // calculate the line :: who passes through the grouped points - Point3f lines[4]; - for(int i=0; i<4; i++){ - lines[i]=_interpolate2Dline(cntPts[i]); - } - - /* - * calculate the corner :: where the lines crosses to each other - * clockwise direction no clockwise direction - * 0 1 - * .---. 1 .---. 2 - * | | | | - * 3 .___. 0 .___. - * 2 3 - */ - for(int i=0; i < 4; i++){ - if(inc<0) - nCorners[i] = _getCrossPoint(lines[ i ], lines[ (i+1)%4 ]); // 01 12 23 30 - else - nCorners[i] = _getCrossPoint(lines[ i ], lines[ (i+3)%4 ]); // 30 01 12 23 - } - - if(!camMatrix.empty() && !distCoeff.empty()){ - _distortPoints(nCorners, camMatrix, distCoeff); - } + inc = ( (cornerIndex[2] > cornerIndex[3]) && (cornerIndex[1] > cornerIndex[2]) ) ? -1:inc; + + // calculate the line :: who passes through the grouped points + Point3f lines[4]; + for(int i=0; i<4; i++){ + lines[i]=_interpolate2Dline(cntPts[i]); + } + + /* + * calculate the corner :: where the lines crosses to each other + * clockwise direction no clockwise direction + * 0 1 + * .---. 1 .---. 2 + * | | | | + * 3 .___. 0 .___. + * 2 3 + */ + for(int i=0; i < 4; i++){ + if(inc<0) + nCorners[i] = _getCrossPoint(lines[ i ], lines[ (i+1)%4 ]); // 01 12 23 30 + else + nCorners[i] = _getCrossPoint(lines[ i ], lines[ (i+3)%4 ]); // 30 01 12 23 + } + + if(!camMatrix.empty() && !distCoeff.empty()){ + _distortPoints(nCorners, camMatrix, distCoeff); + } } #ifdef APRIL_DEBUG @@ -979,61 +1082,194 @@ static void _apriltag(Mat im_orig, const Ptr & _params, std: /** */ -void detectMarkers(InputArray _image, const Ptr &_dictionary, OutputArrayOfArrays _corners, - OutputArray _ids, const Ptr &_params, - OutputArrayOfArrays _rejectedImgPoints, InputArrayOfArrays camMatrix, InputArrayOfArrays distCoeff) { +float detectMarkers(InputArray _image, const Ptr &_dictionary, OutputArrayOfArrays _corners, + OutputArray _ids, const Ptr &_params, + OutputArrayOfArrays _rejectedImgPoints, + InputArrayOfArrays camMatrix, InputArrayOfArrays distCoeff) { CV_Assert(!_image.empty()); + // check that the parameters are set correctly if Aruco3 is used + CV_Assert(!(_params->useAruco3Detection == true && + _params->minSideLengthCanonicalImg == 0 && + _params->minMarkerLengthRatioOriginalImg == 0.0)); Mat grey; _convertToGrey(_image.getMat(), grey); - /// STEP 1: Detect marker candidates + // Aruco3 functionality is the extension of Aruco. + // The description can be found in: + // [1] Speeded up detection of squared fiducial markers, 2018, FJ Romera-Ramirez et al. + // if Aruco3 functionality if not wanted + // change some parameters to be sure to turn it off + if (!_params->useAruco3Detection) { + _params->useGlobalThreshold = false; + _params->foundGlobalThreshold = false; + _params->minMarkerLengthRatioOriginalImg = 0.0; + _params->minSideLengthCanonicalImg = 0; + } + + /// Step 0: equation (2) from paper [1] + const unsigned int tau_i_dot = _params->minSideLengthCanonicalImg + + std::max(grey.cols, grey.rows) * _params->minMarkerLengthRatioOriginalImg; + + //// Step 0.1: resize image with equation (1) from paper [1] + const float fxfy = (float)_params->minSideLengthCanonicalImg / tau_i_dot; + const cv::Size seg_img_size = cv::Size(cvRound(fxfy * grey.cols), cvRound(fxfy * grey.rows)); + const int image_area = seg_img_size.width * seg_img_size.height; + + /// Step 1: create image pyramid. Section 3.4. in [1] + std::vector grey_pyramid; + int closest_pyr_image_idx = 0; + if (_params->useAruco3Detection) { + // find max level + int num_levels = 0; + int pyr_factor = 2; + const int min_size_2 = _params->minSideLengthCanonicalImg * _params->minSideLengthCanonicalImg; + // the closest pyramid image to the downsampled segmentation image + // will later be used as start index for corner upsampling + int min_dist = std::numeric_limits::max(); + while (true && min_size_2 < image_area) { + const int resized_img_area = (grey.cols / pyr_factor) * (grey.rows / pyr_factor); + if (resized_img_area - min_size_2 > 0) { + ++num_levels; + pyr_factor *= 2; + const int resized_diff = static_cast(std::abs(resized_img_area - image_area)); + if (resized_diff < min_dist) { + closest_pyr_image_idx = num_levels; + min_dist = resized_diff; + } + } + else + break; + } + if (num_levels > 0) { + --num_levels; + } + cv::buildPyramid(grey, grey_pyramid, num_levels); + + // resize to segmentation image + // in this reduces size the contours will be detected + cv::resize(grey, grey, seg_img_size); + } + else { + grey_pyramid.push_back(grey); + } + + // save pyramid sizes + std::vector img_pyr_sizes(grey_pyramid.size()); + for (size_t i = 0; i < grey_pyramid.size(); ++i) { + img_pyr_sizes[i] = grey_pyramid[i].size(); + } + + /// STEP 2: Detect marker candidates vector< vector< Point2f > > candidates; vector< vector< Point > > contours; vector< int > ids; vector< vector< vector< Point2f > > > candidatesSet; vector< vector< vector< Point > > > contoursSet; - /// STEP 1.a Detect marker candidates :: using AprilTag - if(_params->cornerRefinementMethod == CORNER_REFINE_APRILTAG){ - _apriltag(grey, _params, candidates, contours); + /// STEP 2.a Detect marker candidates :: using AprilTag + bool no_cand_found_in_first_iter = false; + for (int i=0; i < 2; ++i) { + // if a global threshold was found in the last iteration, + // but no candidates in this frame + if (i >= 1 && _params->foundGlobalThreshold && no_cand_found_in_first_iter) { + _params->foundGlobalThreshold = false; + _params->foundMarkerInLastFrames = 1; + } - candidatesSet.push_back(candidates); - contoursSet.push_back(contours); - } + // save otsu threshold when using video + // use it for subsequent frames for global thresholding. Paper [1] section 3.2. + float otsu_global_tresh_video = 0.0f; + if(_params->cornerRefinementMethod == CORNER_REFINE_APRILTAG){ + _apriltag(grey, _params, candidates, contours); + + candidatesSet.push_back(candidates); + contoursSet.push_back(contours); + } + /// STEP 1.b Detect marker candidates :: traditional way + else + otsu_global_tresh_video = _detectCandidates(grey, candidatesSet, contoursSet, _params); - /// STEP 1.b Detect marker candidates :: traditional way - else - _detectCandidates(grey, candidatesSet, contoursSet, _params); - /// STEP 2: Check candidate codification (identify markers) - _identifyCandidates(grey, candidatesSet, contoursSet, _dictionary, candidates, contours, ids, _params, - _rejectedImgPoints); + /// STEP 2: Check candidate codification (identify markers) + _identifyCandidates(grey, grey_pyramid, img_pyr_sizes, candidatesSet, contoursSet, _dictionary, + candidates, contours, ids, _params, _rejectedImgPoints); + + // if we found corners set the otsu threshold for the next iteration (for video processing) + if (candidates.size() > 0) { + if (_params->foundGlobalThreshold) { + _params->otsuGlobalThreshold = otsu_global_tresh_video; + } + _params->foundMarkerInLastFrames++; + // if we found candidates we can break the for loop + // if not we are going into a second loop and use adaptive thresholding to try to find candidates + // see section 3.2 in [1] + break; + } else { + no_cand_found_in_first_iter = true; + _params->foundGlobalThreshold = false; + _params->foundMarkerInLastFrames = 0; + } + } // copy to output arrays _copyVector2Output(candidates, _corners); Mat(ids).copyTo(_ids); /// STEP 3: Corner refinement :: use corner subpix - if( _params->cornerRefinementMethod == CORNER_REFINE_SUBPIX ) { - CV_Assert(_params->cornerRefinementWinSize > 0 && _params->cornerRefinementMaxIterations > 0 && + if( _params->cornerRefinementMethod == CORNER_REFINE_SUBPIX) { + CV_Assert(_params->cornerRefinementWinSize > 0 && + _params->cornerRefinementMaxIterations > 0 && _params->cornerRefinementMinAccuracy > 0); + if (_params->useAruco3Detection) { + // if Aruco3 featue is selected we use + const float scale_init = (float)grey_pyramid[closest_pyr_image_idx].cols / grey.cols; + const float scale_pyr = (float)grey_pyramid[0].cols / grey_pyramid[1].cols; - //// do corner refinement for each of the detected markers - parallel_for_(Range(0, _corners.cols()), [&](const Range& range) { - const int begin = range.start; - const int end = range.end; + // Do subpixel estimation. In Aruco3 start on the lowest pyramid level and + // upsample the corners + parallel_for_(Range(0, _corners.cols()), [&](const Range& range) { + const int begin = range.start; + const int end = range.end; - for (int i = begin; i < end; i++) { - cornerSubPix(grey, _corners.getMat(i), - Size(_params->cornerRefinementWinSize, _params->cornerRefinementWinSize), - Size(-1, -1), - TermCriteria(TermCriteria::MAX_ITER | TermCriteria::EPS, - _params->cornerRefinementMaxIterations, - _params->cornerRefinementMinAccuracy)); - } - }); + for (int i = begin; i < end; i++) { + // scale it up to the closest pyramid level + for (int p = 0; p < 4; ++p) { + _corners.getMat(i).ptr(0)[p] *= scale_init; + } + + for (int n = closest_pyr_image_idx-1; n >= 0; --n) { + // scale them to new pyramid level + for (int p = 0; p < 4; ++p) { + _corners.getMat(i).ptr(0)[p] *= scale_pyr; + } + + cornerSubPix(grey_pyramid[n], _corners.getMat(i), + Size(_params->cornerRefinementWinSize, _params->cornerRefinementWinSize), + Size(-1, -1), + TermCriteria(TermCriteria::MAX_ITER | TermCriteria::EPS, + _params->cornerRefinementMaxIterations, + _params->cornerRefinementMinAccuracy)); + } + } + }); + } else { // no pyramid search + //// do corner refinement for each of the detected markers + parallel_for_(Range(0, _corners.cols()), [&](const Range& range) { + const int begin = range.start; + const int end = range.end; + + for (int i = begin; i < end; i++) { + cornerSubPix(grey, _corners.getMat(i), + Size(_params->cornerRefinementWinSize, _params->cornerRefinementWinSize), + Size(-1, -1), + TermCriteria(TermCriteria::MAX_ITER | TermCriteria::EPS, + _params->cornerRefinementMaxIterations, + _params->cornerRefinementMinAccuracy)); + } + }); + } } /// STEP 3, Optional : Corner refinement :: use contour container @@ -1053,6 +1289,24 @@ void detectMarkers(InputArray _image, const Ptr &_dictionary, Output _copyVector2Output(candidates, _corners); } } + + if (_params->cornerRefinementMethod != CORNER_REFINE_APRILTAG && + _params->cornerRefinementMethod != CORNER_REFINE_SUBPIX) { + // scale to orignal size, this however will lead to inaccurate detections! + _copyVector2Output(candidates, _corners, 1./fxfy); + } + + // if the detection is used on a video the parameter tau_i (eq. 2) can be dynamically updated + // according to section 3.2.7. in the paper + // sort contours according to perimeter + if (contours.size() > 0 && _params->cameraMotionSpeed > 0 && _params->useAruco3Detection) { + std::sort(contours.begin(), contours.end(), [](vector a, vector b) {return a.size() < b.size();}); + const float next_frame_tau_i = (1.0 - _params->cameraMotionSpeed) * contours[0].size() / 4.0; + return next_frame_tau_i / std::max(img_pyr_sizes[0].width, img_pyr_sizes[0].height); // normalize new tau_i + } + else { + return 0.0f; + } } /** From b6fbfba2247c6fb58f2aa83db0452ad8ea03e790 Mon Sep 17 00:00:00 2001 From: Steffen Urban Date: Wed, 16 Dec 2020 17:26:36 +0100 Subject: [PATCH 02/12] added example and changed detector params --- modules/aruco/include/opencv2/aruco.hpp | 9 ++++---- modules/aruco/samples/detect_markers.cpp | 25 +++++++++++++++++++---- modules/aruco/samples/detector_params.yml | 8 ++++++++ modules/aruco/src/aruco.cpp | 5 ++--- 4 files changed, 36 insertions(+), 11 deletions(-) diff --git a/modules/aruco/include/opencv2/aruco.hpp b/modules/aruco/include/opencv2/aruco.hpp index 4ff231a3fc..5672a49b4a 100644 --- a/modules/aruco/include/opencv2/aruco.hpp +++ b/modules/aruco/include/opencv2/aruco.hpp @@ -255,12 +255,13 @@ struct CV_EXPORTS_W DetectorParameters { * are searched. For each detected marker, it returns the 2D position of its corner in the image * and its corresponding identifier. * Note that this function does not perform pose estimation. - * @sa estimatePoseSingleMarkers, estimatePoseBoard + * The function returns an estimate of the parameter minMarkerLengthRatioOriginalImg if useAruco3Detection=1. If not it returns 0.0. + * @sa estimatePoseSingleMarkers, estimatePoseBoard * */ -CV_EXPORTS_W void detectMarkers(InputArray image, const Ptr &dictionary, OutputArrayOfArrays corners, - OutputArray ids, const Ptr ¶meters = DetectorParameters::create(), - OutputArrayOfArrays rejectedImgPoints = noArray(), InputArray cameraMatrix= noArray(), InputArray distCoeff= noArray()); +CV_EXPORTS_W float detectMarkers(InputArray image, const Ptr &dictionary, OutputArrayOfArrays corners, + OutputArray ids, const Ptr ¶meters = DetectorParameters::create(), + OutputArrayOfArrays rejectedImgPoints = noArray(), InputArray cameraMatrix= noArray(), InputArray distCoeff= noArray()); diff --git a/modules/aruco/samples/detect_markers.cpp b/modules/aruco/samples/detect_markers.cpp index dac27d1e0c..7207f55ed1 100644 --- a/modules/aruco/samples/detect_markers.cpp +++ b/modules/aruco/samples/detect_markers.cpp @@ -59,7 +59,8 @@ const char* keys = "{dp | | File of marker detector parameters }" "{r | | show rejected candidates too }" "{refine | | Corner refinement: CORNER_REFINE_NONE=0, CORNER_REFINE_SUBPIX=1," - "CORNER_REFINE_CONTOUR=2, CORNER_REFINE_APRILTAG=3}"; + "CORNER_REFINE_CONTOUR=2, CORNER_REFINE_APRILTAG=3}" + "{ar3vid | | Adapt the paramater tau_i if aruco3 functionality is used. }"; } /** @@ -101,6 +102,12 @@ static bool readDetectorParameters(string filename, Ptr> params->maxErroneousBitsInBorderRate; fs["minOtsuStdDev"] >> params->minOtsuStdDev; fs["errorCorrectionRate"] >> params->errorCorrectionRate; + // new aruco functionality + fs["useAruco3Detection"] >> params->useAruco3Detection; + fs["minSideLengthCanonicalImg"] >> params->minSideLengthCanonicalImg; + fs["minMarkerLengthRatioOriginalImg"] >> params->minMarkerLengthRatioOriginalImg; + fs["cameraMotionSpeed"] >> params->cameraMotionSpeed; + fs["useGlobalThreshold"] >> params->useGlobalThreshold; return true; } @@ -121,6 +128,7 @@ int main(int argc, char *argv[]) { bool showRejected = parser.has("r"); bool estimatePose = parser.has("c"); float markerLength = parser.get("l"); + bool useAruco3DynamicUpdates = parser.has("ar3vid"); Ptr detectorParams = aruco::DetectorParameters::create(); if(parser.has("dp")) { @@ -165,7 +173,7 @@ int main(int argc, char *argv[]) { int waitTime; if(!video.empty()) { inputVideo.open(video); - waitTime = 0; + waitTime = 1; } else { inputVideo.open(camId); waitTime = 10; @@ -173,7 +181,8 @@ int main(int argc, char *argv[]) { double totalTime = 0; int totalIterations = 0; - + float new_marker_length_ratio = 0.0; + size_t total_nr_detected_corners = 0; while(inputVideo.grab()) { Mat image, imageCopy; inputVideo.retrieve(image); @@ -185,7 +194,13 @@ int main(int argc, char *argv[]) { vector< Vec3d > rvecs, tvecs; // detect markers and estimate pose - aruco::detectMarkers(image, dictionary, corners, ids, detectorParams, rejected); + if (useAruco3DynamicUpdates) { + // if new aruco3 features are used, we can also set the new min + // marker length ratio dymamically from the last frame + detectorParams->minMarkerLengthRatioOriginalImg = new_marker_length_ratio; + } + new_marker_length_ratio = aruco::detectMarkers(image, dictionary, corners, ids, detectorParams, rejected); + total_nr_detected_corners += ids.size(); if(estimatePose && ids.size() > 0) aruco::estimatePoseSingleMarkers(corners, markerLength, camMatrix, distCoeffs, rvecs, tvecs); @@ -218,5 +233,7 @@ int main(int argc, char *argv[]) { if(key == 27) break; } + cout<<"Total number detected corners: "< vector< vector< vector< Point2f > > > candidatesArrays((size_t) nScales); vector< vector< vector< Point > > > contoursArrays((size_t) nScales); - double otsu_treshold = 0.0; // extract with global theshold) if (params->useGlobalThreshold && params->foundMarkerInLastFrames > 2 && params->useAruco3Detection) { @@ -1305,7 +1304,7 @@ float detectMarkers(InputArray _image, const Ptr &_dictionary, Outpu return next_frame_tau_i / std::max(img_pyr_sizes[0].width, img_pyr_sizes[0].height); // normalize new tau_i } else { - return 0.0f; + return -1.0f; } } From c13af3c619a1a483fce84c9c9dc5cd0f465c1791 Mon Sep 17 00:00:00 2001 From: Steffen Urban Date: Wed, 16 Dec 2020 18:13:26 +0100 Subject: [PATCH 03/12] fixed a bug --- modules/aruco/samples/detect_markers.cpp | 37 ++++++++++++----------- modules/aruco/samples/detector_params.yml | 6 ++-- modules/aruco/src/aruco.cpp | 13 ++------ 3 files changed, 25 insertions(+), 31 deletions(-) diff --git a/modules/aruco/samples/detect_markers.cpp b/modules/aruco/samples/detect_markers.cpp index 7207f55ed1..1512502479 100644 --- a/modules/aruco/samples/detect_markers.cpp +++ b/modules/aruco/samples/detect_markers.cpp @@ -198,6 +198,9 @@ int main(int argc, char *argv[]) { // if new aruco3 features are used, we can also set the new min // marker length ratio dymamically from the last frame detectorParams->minMarkerLengthRatioOriginalImg = new_marker_length_ratio; + if(totalIterations % 30 == 0) { + cout<<"Current tau_i= "< 0) { - aruco::drawDetectedMarkers(imageCopy, corners, ids); - - if(estimatePose) { - for(unsigned int i = 0; i < ids.size(); i++) - aruco::drawAxis(imageCopy, camMatrix, distCoeffs, rvecs[i], tvecs[i], - markerLength * 0.5f); - } - } - - if(showRejected && rejected.size() > 0) - aruco::drawDetectedMarkers(imageCopy, rejected, noArray(), Scalar(100, 0, 255)); - - imshow("out", imageCopy); - char key = (char)waitKey(waitTime); - if(key == 27) break; +// image.copyTo(imageCopy); +// if(ids.size() > 0) { +// aruco::drawDetectedMarkers(imageCopy, corners, ids); + +// if(estimatePose) { +// for(unsigned int i = 0; i < ids.size(); i++) +// aruco::drawAxis(imageCopy, camMatrix, distCoeffs, rvecs[i], tvecs[i], +// markerLength * 0.5f); +// } +// } + + //if(showRejected && rejected.size() > 0) + // aruco::drawDetectedMarkers(imageCopy, rejected, noArray(), Scalar(100, 0, 255)); + + //imshow("out", imageCopy); + //char key = (char)waitKey(waitTime); + //if(key == 27) break; } cout<<"Total number detected corners: "< tau_c from the paper +minMarkerLengthRatioOriginalImg: 0.02 # range [0,0.2] --> tau_i from the paper +cameraMotionSpeed: 0.1 # range [0,1) --> tau_s from the paper useGlobalThreshold: 0 diff --git a/modules/aruco/src/aruco.cpp b/modules/aruco/src/aruco.cpp index ab4d68b430..a37367c053 100644 --- a/modules/aruco/src/aruco.cpp +++ b/modules/aruco/src/aruco.cpp @@ -578,7 +578,6 @@ static uint8_t _identifyOneCandidate(const Ptr& dictionary, InputArr uint8_t typ=1; // get bits - //auto start = high_resolution_clock::now(); // scale corners to the correct size to search on the corresponding image pyramid vector scaled_corners(4); for (int i=0; i < 4; ++i) { @@ -590,8 +589,6 @@ static uint8_t _identifyOneCandidate(const Ptr& dictionary, InputArr _extractBits(_image, scaled_corners, dictionary->markerSize, params->markerBorderBits, params->perspectiveRemovePixelPerCell, params->perspectiveRemoveIgnoredMarginPerCell, params->minOtsuStdDev); - //auto stop = high_resolution_clock::now(); - //std::cout << "time extract bits microseconds: "<(stop - start).count() << std::endl; // analyze border bits int maximumErrorsInBorder = @@ -687,7 +684,6 @@ static unsigned int _findOptPyrImageForCanonicalImg( const double factor = (double)resized_seg_image.width / img_pyr_sizes[i].width; double perimeter_scaled = cur_perimeter * factor; const double new_dist = std::abs(perimeter_scaled - min_perimeter); - //std::cout<<"new_dist: "< idsTmp(ncandidates, -1); vector< int > rotated(ncandidates, 0); vector< uint8_t > validCandidates(ncandidates, 0); @@ -731,9 +724,7 @@ static void _identifyCandidates(InputArray _image, const int end = range.end; vector< vector< Point2f > >& candidates = params->detectInvertedMarker ? _candidatesSet[1] : _candidatesSet[0]; - //std::cout<<"candidates.size "< >& cont = params->detectInvertedMarker ? _contoursSet[1] : _contoursSet[0]; - //std::cout<<"cont.size "< 0) @@ -1114,6 +1104,7 @@ float detectMarkers(InputArray _image, const Ptr &_dictionary, Outpu //// Step 0.1: resize image with equation (1) from paper [1] const float fxfy = (float)_params->minSideLengthCanonicalImg / tau_i_dot; const cv::Size seg_img_size = cv::Size(cvRound(fxfy * grey.cols), cvRound(fxfy * grey.rows)); + const int image_area = seg_img_size.width * seg_img_size.height; /// Step 1: create image pyramid. Section 3.4. in [1] @@ -1304,7 +1295,7 @@ float detectMarkers(InputArray _image, const Ptr &_dictionary, Outpu return next_frame_tau_i / std::max(img_pyr_sizes[0].width, img_pyr_sizes[0].height); // normalize new tau_i } else { - return -1.0f; + return 0.0f; } } From 182082397b86175a3497a166cade155fea23e796 Mon Sep 17 00:00:00 2001 From: Steffen Urban Date: Wed, 16 Dec 2020 18:19:40 +0100 Subject: [PATCH 04/12] uncomment imshow in example --- modules/aruco/samples/detect_markers.cpp | 34 ++++++++++++------------ 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/modules/aruco/samples/detect_markers.cpp b/modules/aruco/samples/detect_markers.cpp index 1512502479..b5da886e6f 100644 --- a/modules/aruco/samples/detect_markers.cpp +++ b/modules/aruco/samples/detect_markers.cpp @@ -217,23 +217,23 @@ int main(int argc, char *argv[]) { } // draw results -// image.copyTo(imageCopy); -// if(ids.size() > 0) { -// aruco::drawDetectedMarkers(imageCopy, corners, ids); - -// if(estimatePose) { -// for(unsigned int i = 0; i < ids.size(); i++) -// aruco::drawAxis(imageCopy, camMatrix, distCoeffs, rvecs[i], tvecs[i], -// markerLength * 0.5f); -// } -// } - - //if(showRejected && rejected.size() > 0) - // aruco::drawDetectedMarkers(imageCopy, rejected, noArray(), Scalar(100, 0, 255)); - - //imshow("out", imageCopy); - //char key = (char)waitKey(waitTime); - //if(key == 27) break; + image.copyTo(imageCopy); + if(ids.size() > 0) { + aruco::drawDetectedMarkers(imageCopy, corners, ids); + + if(estimatePose) { + for(unsigned int i = 0; i < ids.size(); i++) + aruco::drawAxis(imageCopy, camMatrix, distCoeffs, rvecs[i], tvecs[i], + markerLength * 0.5f); + } + } + + if(showRejected && rejected.size() > 0) + aruco::drawDetectedMarkers(imageCopy, rejected, noArray(), Scalar(100, 0, 255)); + + imshow("out", imageCopy); + char key = (char)waitKey(waitTime); + if(key == 27) break; } cout<<"Total number detected corners: "< Date: Tue, 29 Dec 2020 15:39:37 +0100 Subject: [PATCH 05/12] fixed bug after merge --- modules/aruco/src/aruco.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/aruco/src/aruco.cpp b/modules/aruco/src/aruco.cpp index a72a2979fc..e6e4198805 100644 --- a/modules/aruco/src/aruco.cpp +++ b/modules/aruco/src/aruco.cpp @@ -728,7 +728,7 @@ static void _identifyCandidates(InputArray _image, int currId; // implements equation (4) - const int perimeter_in_seg_img = cont[i].size(); + const int perimeter_in_seg_img = _contours[i].size(); int n = _findOptPyrImageForCanonicalImg(_image_pyr_sizes, _image.size(), perimeter_in_seg_img, min_perimeter); const Mat& pyr_img = _image_pyr[n]; From bdf139ad35b1174de5e7ba5882f02e9228b81268 Mon Sep 17 00:00:00 2001 From: Steffen Urban Date: Wed, 10 Feb 2021 11:19:02 +0100 Subject: [PATCH 06/12] fixed candidate identification if aruco3=false --- modules/aruco/src/aruco.cpp | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/modules/aruco/src/aruco.cpp b/modules/aruco/src/aruco.cpp index e6e4198805..62b7b91097 100644 --- a/modules/aruco/src/aruco.cpp +++ b/modules/aruco/src/aruco.cpp @@ -88,7 +88,7 @@ DetectorParameters::DetectorParameters() aprilTagMinWhiteBlackDiff(5), aprilTagDeglitch(0), detectInvertedMarker(false), - useAruco3Detection(true), + useAruco3Detection(false), minSideLengthCanonicalImg(16), minMarkerLengthRatioOriginalImg(0.02), cameraMotionSpeed(1.0), @@ -728,12 +728,15 @@ static void _identifyCandidates(InputArray _image, int currId; // implements equation (4) - const int perimeter_in_seg_img = _contours[i].size(); - int n = _findOptPyrImageForCanonicalImg(_image_pyr_sizes, _image.size(), perimeter_in_seg_img, min_perimeter); - const Mat& pyr_img = _image_pyr[n]; - - double scale = (double)_image_pyr_sizes[n].width / _image.cols(); - validCandidates[i] = _identifyOneCandidate(_dictionary, pyr_img, candidates[i], currId, params, rotated[i], scale); + if (params->useAruco3Detection) { + const int perimeter_in_seg_img = _contours[i].size(); + int n = _findOptPyrImageForCanonicalImg(_image_pyr_sizes, _image.size(), perimeter_in_seg_img, min_perimeter); + const Mat& pyr_img = _image_pyr[n]; + double scale = (double)_image_pyr_sizes[n].width / _image.cols(); + validCandidates[i] = _identifyOneCandidate(_dictionary, pyr_img, candidates[i], currId, params, rotated[i], scale); + } else { + validCandidates[i] = _identifyOneCandidate(_dictionary, _image, candidates[i], currId, params, rotated[i]); + } if(validCandidates[i] > 0) idsTmp[i] = currId; From c7fdd76bce938f683978f99420d3b3e96cd345bf Mon Sep 17 00:00:00 2001 From: Steffen Urban Date: Wed, 10 Feb 2021 11:43:54 +0100 Subject: [PATCH 07/12] added test and fixed a bug --- modules/aruco/src/aruco.cpp | 3 ++- modules/aruco/test/test_arucodetection.cpp | 12 ++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/modules/aruco/src/aruco.cpp b/modules/aruco/src/aruco.cpp index 62b7b91097..effe86ba73 100644 --- a/modules/aruco/src/aruco.cpp +++ b/modules/aruco/src/aruco.cpp @@ -723,13 +723,14 @@ static void _identifyCandidates(InputArray _image, const int end = range.end; vector< vector< Point2f > >& candidates = params->detectInvertedMarker ? _candidatesSet[1] : _candidatesSet[0]; + vector< vector< Point > >& contourS = params->detectInvertedMarker ? _contoursSet[1] : _contoursSet[0]; for(int i = begin; i < end; i++) { int currId; // implements equation (4) if (params->useAruco3Detection) { - const int perimeter_in_seg_img = _contours[i].size(); + const int perimeter_in_seg_img = contourS[i].size(); int n = _findOptPyrImageForCanonicalImg(_image_pyr_sizes, _image.size(), perimeter_in_seg_img, min_perimeter); const Mat& pyr_img = _image_pyr[n]; double scale = (double)_image_pyr_sizes[n].width / _image.cols(); diff --git a/modules/aruco/test/test_arucodetection.cpp b/modules/aruco/test/test_arucodetection.cpp index e56fdf8125..cb42d9d73e 100644 --- a/modules/aruco/test/test_arucodetection.cpp +++ b/modules/aruco/test/test_arucodetection.cpp @@ -258,6 +258,7 @@ class CV_ArucoDetectionPerspective : public cvtest::BaseTest { enum checkWithParameter{ USE_APRILTAG=1, /// Detect marker candidates :: using AprilTag DETECT_INVERTED_MARKER, /// Check if there is a white marker + USE_ARUCO3 /// Check if aruco3 should be used }; protected: @@ -306,6 +307,11 @@ void CV_ArucoDetectionPerspective::run(int tryWith) { params->cornerRefinementMethod = cv::aruco::CORNER_REFINE_APRILTAG; } + if (CV_ArucoDetectionPerspective::USE_ARUCO3 == tryWith) { + params->useAruco3Detection = true; + params->cornerRefinementMethod = cv::aruco::CORNER_REFINE_SUBPIX; + } + // detect markers vector< vector< Point2f > > corners; vector< int > ids; @@ -523,6 +529,7 @@ void CV_ArucoBitCorrection::run(int) { typedef CV_ArucoDetectionPerspective CV_AprilTagDetectionPerspective; typedef CV_ArucoDetectionPerspective CV_InvertedArucoDetectionPerspective; +typedef CV_ArucoDetectionPerspective CV_Aruco3DetectionPerspective; TEST(CV_InvertedArucoDetectionPerspective, algorithmic) { CV_InvertedArucoDetectionPerspective test; @@ -534,6 +541,11 @@ TEST(CV_AprilTagDetectionPerspective, algorithmic) { test.safe_run(CV_ArucoDetectionPerspective::USE_APRILTAG); } +TEST(CV_Aruco3DetectionPerspective, algorithmic) { + CV_Aruco3DetectionPerspective test; + test.safe_run(CV_ArucoDetectionPerspective::USE_ARUCO3); +} + TEST(CV_ArucoDetectionSimple, algorithmic) { CV_ArucoDetectionSimple test; test.safe_run(); From 927653e0da6adb67736b1d2481957278bb96e448 Mon Sep 17 00:00:00 2001 From: Steffen Urban Date: Wed, 10 Feb 2021 14:57:04 +0100 Subject: [PATCH 08/12] bugfix --- modules/aruco/src/aruco.cpp | 42 ++++++++++++------- modules/aruco/test/test_arucodetection.cpp | 3 ++ modules/aruco/test/test_boarddetection.cpp | 44 ++++++++++++++++---- modules/aruco/test/test_charucodetection.cpp | 4 +- 4 files changed, 67 insertions(+), 26 deletions(-) diff --git a/modules/aruco/src/aruco.cpp b/modules/aruco/src/aruco.cpp index effe86ba73..513daa799f 100644 --- a/modules/aruco/src/aruco.cpp +++ b/modules/aruco/src/aruco.cpp @@ -89,11 +89,12 @@ DetectorParameters::DetectorParameters() aprilTagDeglitch(0), detectInvertedMarker(false), useAruco3Detection(false), - minSideLengthCanonicalImg(16), - minMarkerLengthRatioOriginalImg(0.02), + minSideLengthCanonicalImg(32), + minMarkerLengthRatioOriginalImg(0.0), cameraMotionSpeed(1.0), useGlobalThreshold(true), foundGlobalThreshold(false), + otsuGlobalThreshold(0.0), foundMarkerInLastFrames(0) {} @@ -576,7 +577,7 @@ static uint8_t _identifyOneCandidate(const Ptr& dictionary, InputArr // get bits // scale corners to the correct size to search on the corresponding image pyramid vector scaled_corners(4); - for (int i=0; i < 4; ++i) { + for (int i = 0; i < 4; ++i) { scaled_corners[i].x = _corners[i].x * scale; scaled_corners[i].y = _corners[i].y * scale; } @@ -676,11 +677,13 @@ static unsigned int _findOptPyrImageForCanonicalImg( unsigned int h = 0; double dist = std::numeric_limits::max(); - for (size_t i=0; i < img_pyr_sizes.size(); ++i) { + for (size_t i = 0; i < img_pyr_sizes.size(); ++i) { const double factor = (double)resized_seg_image.width / img_pyr_sizes[i].width; - double perimeter_scaled = cur_perimeter * factor; - const double new_dist = std::abs(perimeter_scaled - min_perimeter); - if (new_dist < dist) { + const double perimeter_scaled = cur_perimeter * factor; + // instead of std::abs() favor the larger pyramid level by checking if the distance is postive + // will slow down the algorithm but find more corners in the end + const double new_dist = perimeter_scaled - min_perimeter; + if (new_dist < dist && new_dist > 0.0) { dist = new_dist; h = i; } @@ -730,10 +733,10 @@ static void _identifyCandidates(InputArray _image, // implements equation (4) if (params->useAruco3Detection) { - const int perimeter_in_seg_img = contourS[i].size(); - int n = _findOptPyrImageForCanonicalImg(_image_pyr_sizes, _image.size(), perimeter_in_seg_img, min_perimeter); + const size_t perimeter_in_seg_img = contourS[i].size(); + const int n = _findOptPyrImageForCanonicalImg(_image_pyr_sizes, _image.size(), perimeter_in_seg_img, min_perimeter); const Mat& pyr_img = _image_pyr[n]; - double scale = (double)_image_pyr_sizes[n].width / _image.cols(); + const double scale = (double)_image_pyr_sizes[n].width / _image.cols(); validCandidates[i] = _identifyOneCandidate(_dictionary, pyr_img, candidates[i], currId, params, rotated[i], scale); } else { validCandidates[i] = _identifyOneCandidate(_dictionary, _image, candidates[i], currId, params, rotated[i]); @@ -1096,6 +1099,9 @@ float detectMarkers(InputArray _image, const Ptr &_dictionary, Outpu _params->foundGlobalThreshold = false; _params->minMarkerLengthRatioOriginalImg = 0.0; _params->minSideLengthCanonicalImg = 0; + } else { + // always turn on corner refinement in case of Aruco3, due to upsampling + _params->cornerRefinementMethod = CORNER_REFINE_SUBPIX; } /// Step 0: equation (2) from paper [1] @@ -1103,7 +1109,10 @@ float detectMarkers(InputArray _image, const Ptr &_dictionary, Outpu std::max(grey.cols, grey.rows) * _params->minMarkerLengthRatioOriginalImg; //// Step 0.1: resize image with equation (1) from paper [1] - const float fxfy = (float)_params->minSideLengthCanonicalImg / tau_i_dot; + float fxfy = (float)_params->minSideLengthCanonicalImg / tau_i_dot; + if (!_params->useAruco3Detection) { + fxfy = 1.0; + } const cv::Size seg_img_size = cv::Size(cvRound(fxfy * grey.cols), cvRound(fxfy * grey.rows)); const int image_area = seg_img_size.width * seg_img_size.height; @@ -1140,7 +1149,9 @@ float detectMarkers(InputArray _image, const Ptr &_dictionary, Outpu // resize to segmentation image // in this reduces size the contours will be detected - cv::resize(grey, grey, seg_img_size); + if (grey.size() != seg_img_size) { + cv::resize(grey, grey, seg_img_size); + } } else { grey_pyramid.push_back(grey); @@ -1214,7 +1225,7 @@ float detectMarkers(InputArray _image, const Ptr &_dictionary, Outpu _params->cornerRefinementMaxIterations > 0 && _params->cornerRefinementMinAccuracy > 0); if (_params->useAruco3Detection) { - // if Aruco3 featue is selected we use + // if Aruco3 feature is selected we use const float scale_init = (float)grey_pyramid[closest_pyr_image_idx].cols / grey.cols; const float scale_pyr = (float)grey_pyramid[0].cols / grey_pyramid[1].cols; @@ -1235,9 +1246,10 @@ float detectMarkers(InputArray _image, const Ptr &_dictionary, Outpu for (int p = 0; p < 4; ++p) { _corners.getMat(i).ptr(0)[p] *= scale_pyr; } - + // use larger win size for larger images + const int subpix_win_size = std::max(grey_pyramid[n].cols, grey_pyramid[n].rows) > 1080 ? 5 : 3; cornerSubPix(grey_pyramid[n], _corners.getMat(i), - Size(_params->cornerRefinementWinSize, _params->cornerRefinementWinSize), + Size(subpix_win_size, subpix_win_size), Size(-1, -1), TermCriteria(TermCriteria::MAX_ITER | TermCriteria::EPS, _params->cornerRefinementMaxIterations, diff --git a/modules/aruco/test/test_arucodetection.cpp b/modules/aruco/test/test_arucodetection.cpp index cb42d9d73e..799af5bf91 100644 --- a/modules/aruco/test/test_arucodetection.cpp +++ b/modules/aruco/test/test_arucodetection.cpp @@ -319,6 +319,9 @@ void CV_ArucoDetectionPerspective::run(int tryWith) { // check results if(ids.size() != 1 || (ids.size() == 1 && ids[0] != currentId)) { + aruco::detectMarkers(img, dictionary, corners, ids, params); + cv::imshow("img",img); + cv::waitKey(0); if(ids.size() != 1) ts->printf(cvtest::TS::LOG, "Incorrect number of detected markers"); else diff --git a/modules/aruco/test/test_boarddetection.cpp b/modules/aruco/test/test_boarddetection.cpp index 474bc372f1..d6dc9cc9ed 100644 --- a/modules/aruco/test/test_boarddetection.cpp +++ b/modules/aruco/test/test_boarddetection.cpp @@ -160,6 +160,10 @@ class CV_ArucoBoardPose : public cvtest::BaseTest { public: CV_ArucoBoardPose(); + enum checkWithParameter{ + USE_ARUCO3 = 1 /// Check if aruco3 should be used + }; + protected: void run(int); }; @@ -168,7 +172,7 @@ class CV_ArucoBoardPose : public cvtest::BaseTest { CV_ArucoBoardPose::CV_ArucoBoardPose() {} -void CV_ArucoBoardPose::run(int) { +void CV_ArucoBoardPose::run(int run_with) { int iter = 0; Mat cameraMatrix = Mat::eye(3, 3, CV_64FC1); @@ -180,9 +184,13 @@ void CV_ArucoBoardPose::run(int) { cameraMatrix.at< double >(0, 2) = imgSize.width / 2; cameraMatrix.at< double >(1, 2) = imgSize.height / 2; Mat distCoeffs(5, 1, CV_64FC1, Scalar::all(0)); - + double max_dist = 0.4; + // aruco3 detection is a bit worse from large distances it seems + if (run_with == checkWithParameter::USE_ARUCO3) { + max_dist = 0.2; + } // for different perspectives - for(double distance = 0.2; distance <= 0.4; distance += 0.2) { + for(double distance = 0.2; distance <= max_dist; distance += 0.2) { for(int yaw = 0; yaw < 360; yaw += 100) { for(int pitch = 30; pitch <= 90; pitch += 50) { for(unsigned int i = 0; i < gridboard->ids.size(); i++) @@ -194,11 +202,13 @@ void CV_ArucoBoardPose::run(int) { Mat img = projectBoard(gridboard, cameraMatrix, deg2rad(pitch), deg2rad(yaw), distance, imgSize, markerBorder); - vector< vector< Point2f > > corners; vector< int > ids; Ptr params = aruco::DetectorParameters::create(); params->minDistanceToBorder = 3; + if (run_with == checkWithParameter::USE_ARUCO3) { + params->useAruco3Detection = true; + } params->markerBorderBits = markerBorder; aruco::detectMarkers(img, dictionary, corners, ids, params); @@ -254,16 +264,18 @@ void CV_ArucoBoardPose::run(int) { class CV_ArucoRefine : public cvtest::BaseTest { public: CV_ArucoRefine(); - + enum checkWithParameter{ + USE_ARUCO3 = 1 /// Check if aruco3 should be used + }; protected: - void run(int); + void run(int run_with); }; CV_ArucoRefine::CV_ArucoRefine() {} -void CV_ArucoRefine::run(int) { +void CV_ArucoRefine::run(int run_with) { int iter = 0; Mat cameraMatrix = Mat::eye(3, 3, CV_64FC1); @@ -296,6 +308,9 @@ void CV_ArucoRefine::run(int) { Ptr params = aruco::DetectorParameters::create(); params->minDistanceToBorder = 3; params->cornerRefinementMethod = aruco::CORNER_REFINE_SUBPIX; + if (run_with == checkWithParameter::USE_ARUCO3) { + params->useAruco3Detection = true; + } params->markerBorderBits = markerBorder; aruco::detectMarkers(img, dictionary, corners, ids, params, rejected); @@ -323,18 +338,29 @@ void CV_ArucoRefine::run(int) { } - - TEST(CV_ArucoBoardPose, accuracy) { CV_ArucoBoardPose test; test.safe_run(); } +typedef CV_ArucoBoardPose CV_Aruco3BoardPose; +TEST(CV_Aruco3BoardPose, accuracy) { + CV_Aruco3BoardPose test; + test.safe_run(CV_Aruco3BoardPose::checkWithParameter::USE_ARUCO3); +} + +typedef CV_ArucoRefine CV_Aruco3Refine; + TEST(CV_ArucoRefine, accuracy) { CV_ArucoRefine test; test.safe_run(); } +TEST(CV_Aruco3Refine, accuracy) { + CV_Aruco3Refine test; + test.safe_run(CV_Aruco3Refine::checkWithParameter::USE_ARUCO3); +} + TEST(CV_ArucoBoardPose, CheckNegativeZ) { double matrixData[9] = { -3.9062571886921410e+02, 0., 4.2350000000000000e+02, diff --git a/modules/aruco/test/test_charucodetection.cpp b/modules/aruco/test/test_charucodetection.cpp index e803a031d9..09c49ad67a 100644 --- a/modules/aruco/test/test_charucodetection.cpp +++ b/modules/aruco/test/test_charucodetection.cpp @@ -214,7 +214,7 @@ class CV_CharucoDetection : public cvtest::BaseTest { CV_CharucoDetection::CV_CharucoDetection() {} -void CV_CharucoDetection::run(int) { +void CV_CharucoDetection::run(int try_with) { int iter = 0; Mat cameraMatrix = Mat::eye(3, 3, CV_64FC1); @@ -227,7 +227,6 @@ void CV_CharucoDetection::run(int) { cameraMatrix.at< double >(1, 2) = imgSize.height / 2; Mat distCoeffs(5, 1, CV_64FC1, Scalar::all(0)); - // for different perspectives for(double distance = 0.2; distance <= 0.4; distance += 0.2) { for(int yaw = 0; yaw < 360; yaw += 100) { @@ -326,6 +325,7 @@ void CV_CharucoPoseEstimation::run(int) { cameraMatrix.at< double >(1, 2) = imgSize.height / 2; Mat distCoeffs(5, 1, CV_64FC1, Scalar::all(0)); + // for different perspectives for(double distance = 0.2; distance <= 0.4; distance += 0.2) { for(int yaw = 0; yaw < 360; yaw += 100) { From ede9777a3c052eba07f4ab1e36dbed76fa9db6d5 Mon Sep 17 00:00:00 2001 From: Steffen Urban Date: Wed, 10 Feb 2021 14:57:29 +0100 Subject: [PATCH 09/12] unused parm --- modules/aruco/test/test_charucodetection.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/aruco/test/test_charucodetection.cpp b/modules/aruco/test/test_charucodetection.cpp index 09c49ad67a..1f71d8229f 100644 --- a/modules/aruco/test/test_charucodetection.cpp +++ b/modules/aruco/test/test_charucodetection.cpp @@ -214,7 +214,7 @@ class CV_CharucoDetection : public cvtest::BaseTest { CV_CharucoDetection::CV_CharucoDetection() {} -void CV_CharucoDetection::run(int try_with) { +void CV_CharucoDetection::run(int) { int iter = 0; Mat cameraMatrix = Mat::eye(3, 3, CV_64FC1); From 8e91d26001a15a6f289a8c72174cb8805b6a64fb Mon Sep 17 00:00:00 2001 From: Steffen Urban Date: Thu, 11 Feb 2021 07:27:26 +0100 Subject: [PATCH 10/12] first fix win compiler warnings --- modules/aruco/src/aruco.cpp | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/modules/aruco/src/aruco.cpp b/modules/aruco/src/aruco.cpp index 513daa799f..f2b1b427e8 100644 --- a/modules/aruco/src/aruco.cpp +++ b/modules/aruco/src/aruco.cpp @@ -377,7 +377,7 @@ static float _detectInitialCandidates(const Mat &grey, vector< vector< Point2f > vector< vector< vector< Point2f > > > candidatesArrays((size_t) nScales); vector< vector< vector< Point > > > contoursArrays((size_t) nScales); - double otsu_treshold = 0.0; + float otsu_treshold = 0.0; // extract with global theshold) if (params->useGlobalThreshold && params->foundMarkerInLastFrames > 2 && params->useAruco3Detection) { Mat thresh; @@ -391,8 +391,9 @@ static float _detectInitialCandidates(const Mat &grey, vector< vector< Point2f > } // get lines - int el_size = 3; - cv::Mat struc_el = cv::getStructuringElement(cv::MORPH_CROSS, cv::Size(el_size,el_size), cv::Point(el_size/2.0,el_size/2.0)); + const int el_size = 3; + const int el_size_half = el_size / 2.0; + cv::Mat struc_el = cv::getStructuringElement(cv::MORPH_CROSS, cv::Size(el_size,el_size), cv::Point(el_size_half,el_size_half)); cv::Mat eroded_imaged; cv::erode(thresh, eroded_imaged, struc_el); cv::bitwise_xor(eroded_imaged, thresh, thresh); @@ -567,7 +568,7 @@ static int _getBorderErrors(const Mat &bits, int markerSize, int borderSize) { static uint8_t _identifyOneCandidate(const Ptr& dictionary, InputArray _image, const vector& _corners, int& idx, const Ptr& params, int& rotation, - const double& scale = 1.0) + const float& scale = 1.f) { CV_Assert(_corners.size() == 4); CV_Assert(_image.getMat().total() != 0); @@ -669,13 +670,13 @@ static void correctCornerPosition( vector< Point2f >& _candidate, int rotate){ std::rotate(_candidate.begin(), _candidate.begin() + 4 - rotate, _candidate.end()); } -static unsigned int _findOptPyrImageForCanonicalImg( +static size_t _findOptPyrImageForCanonicalImg( const std::vector& img_pyr_sizes, const cv::Size& resized_seg_image, const int& cur_perimeter, const int& min_perimeter) { - unsigned int h = 0; + size_t h = 0; double dist = std::numeric_limits::max(); for (size_t i = 0; i < img_pyr_sizes.size(); ++i) { const double factor = (double)resized_seg_image.width / img_pyr_sizes[i].width; @@ -734,9 +735,9 @@ static void _identifyCandidates(InputArray _image, // implements equation (4) if (params->useAruco3Detection) { const size_t perimeter_in_seg_img = contourS[i].size(); - const int n = _findOptPyrImageForCanonicalImg(_image_pyr_sizes, _image.size(), perimeter_in_seg_img, min_perimeter); + const size_t n = _findOptPyrImageForCanonicalImg(_image_pyr_sizes, _image.size(), perimeter_in_seg_img, min_perimeter); const Mat& pyr_img = _image_pyr[n]; - const double scale = (double)_image_pyr_sizes[n].width / _image.cols(); + const float scale = (float)_image_pyr_sizes[n].width / _image.cols(); validCandidates[i] = _identifyOneCandidate(_dictionary, pyr_img, candidates[i], currId, params, rotated[i], scale); } else { validCandidates[i] = _identifyOneCandidate(_dictionary, _image, candidates[i], currId, params, rotated[i]); @@ -1304,11 +1305,11 @@ float detectMarkers(InputArray _image, const Ptr &_dictionary, Outpu // sort contours according to perimeter if (contours.size() > 0 && _params->cameraMotionSpeed > 0 && _params->useAruco3Detection) { std::sort(contours.begin(), contours.end(), [](vector a, vector b) {return a.size() < b.size();}); - const float next_frame_tau_i = (1.0 - _params->cameraMotionSpeed) * contours[0].size() / 4.0; + const float next_frame_tau_i = (1.f - _params->cameraMotionSpeed) * contours[0].size() / 4.f; return next_frame_tau_i / std::max(img_pyr_sizes[0].width, img_pyr_sizes[0].height); // normalize new tau_i } else { - return 0.0f; + return 0.f; } } From 234b22320bf58387539e5e273fb5c6b9fa04d2e7 Mon Sep 17 00:00:00 2001 From: Steffen Urban Date: Thu, 11 Feb 2021 09:24:46 +0100 Subject: [PATCH 11/12] second try eliminate warnings --- modules/aruco/src/aruco.cpp | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/modules/aruco/src/aruco.cpp b/modules/aruco/src/aruco.cpp index f2b1b427e8..36db64408a 100644 --- a/modules/aruco/src/aruco.cpp +++ b/modules/aruco/src/aruco.cpp @@ -377,13 +377,13 @@ static float _detectInitialCandidates(const Mat &grey, vector< vector< Point2f > vector< vector< vector< Point2f > > > candidatesArrays((size_t) nScales); vector< vector< vector< Point > > > contoursArrays((size_t) nScales); - float otsu_treshold = 0.0; + double otsu_treshold = 0.0; // extract with global theshold) if (params->useGlobalThreshold && params->foundMarkerInLastFrames > 2 && params->useAruco3Detection) { Mat thresh; if (params->foundGlobalThreshold) { - cv::threshold(grey, thresh, params->otsuGlobalThreshold, 255, cv::THRESH_BINARY_INV); - otsu_treshold = params->otsuGlobalThreshold; + cv::threshold(grey, thresh, (double)params->otsuGlobalThreshold, 255, cv::THRESH_BINARY_INV); + otsu_treshold = (double)params->otsuGlobalThreshold; } else { // first time get threshold with otsu otsu_treshold = cv::threshold(grey, thresh, 0, 255, cv::THRESH_BINARY_INV | cv::THRESH_OTSU); @@ -392,7 +392,7 @@ static float _detectInitialCandidates(const Mat &grey, vector< vector< Point2f > } // get lines const int el_size = 3; - const int el_size_half = el_size / 2.0; + const int el_size_half = static_cast(el_size / 2.0); cv::Mat struc_el = cv::getStructuringElement(cv::MORPH_CROSS, cv::Size(el_size,el_size), cv::Point(el_size_half,el_size_half)); cv::Mat eroded_imaged; cv::erode(thresh, eroded_imaged, struc_el); @@ -429,7 +429,7 @@ static float _detectInitialCandidates(const Mat &grey, vector< vector< Point2f > } } - return otsu_treshold; + return (float)otsu_treshold; } @@ -734,7 +734,7 @@ static void _identifyCandidates(InputArray _image, // implements equation (4) if (params->useAruco3Detection) { - const size_t perimeter_in_seg_img = contourS[i].size(); + const int perimeter_in_seg_img = (int)contourS[i].size(); const size_t n = _findOptPyrImageForCanonicalImg(_image_pyr_sizes, _image.size(), perimeter_in_seg_img, min_perimeter); const Mat& pyr_img = _image_pyr[n]; const float scale = (float)_image_pyr_sizes[n].width / _image.cols(); @@ -1106,13 +1106,13 @@ float detectMarkers(InputArray _image, const Ptr &_dictionary, Outpu } /// Step 0: equation (2) from paper [1] - const unsigned int tau_i_dot = _params->minSideLengthCanonicalImg + + const int tau_i_dot = _params->minSideLengthCanonicalImg + std::max(grey.cols, grey.rows) * _params->minMarkerLengthRatioOriginalImg; //// Step 0.1: resize image with equation (1) from paper [1] float fxfy = (float)_params->minSideLengthCanonicalImg / tau_i_dot; if (!_params->useAruco3Detection) { - fxfy = 1.0; + fxfy = 1.f; } const cv::Size seg_img_size = cv::Size(cvRound(fxfy * grey.cols), cvRound(fxfy * grey.rows)); From 8a9e7c5002bdbc33eae227cd324ae93e2a28e97c Mon Sep 17 00:00:00 2001 From: Steffen Urban Date: Thu, 11 Feb 2021 10:09:27 +0100 Subject: [PATCH 12/12] one last warning --- modules/aruco/src/aruco.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/aruco/src/aruco.cpp b/modules/aruco/src/aruco.cpp index 36db64408a..4a1dd98de2 100644 --- a/modules/aruco/src/aruco.cpp +++ b/modules/aruco/src/aruco.cpp @@ -1107,7 +1107,7 @@ float detectMarkers(InputArray _image, const Ptr &_dictionary, Outpu /// Step 0: equation (2) from paper [1] const int tau_i_dot = _params->minSideLengthCanonicalImg + - std::max(grey.cols, grey.rows) * _params->minMarkerLengthRatioOriginalImg; + (int)((float)std::max(grey.cols, grey.rows) * _params->minMarkerLengthRatioOriginalImg); //// Step 0.1: resize image with equation (1) from paper [1] float fxfy = (float)_params->minSideLengthCanonicalImg / tau_i_dot;