From f6a39c5d01a7b2d2bb223a2a67beb9736fce7d93 Mon Sep 17 00:00:00 2001 From: Alexander Panov Date: Mon, 25 Apr 2022 22:05:28 +0300 Subject: [PATCH 01/45] Merge pull request #3229 from AleksandrPanov:add_Dictionary_bindings * add Dictionary bindings * add python tests --- .../include/opencv2/aruco/dictionary.hpp | 4 ++-- modules/aruco/misc/python/test/test_aruco.py | 20 +++++++++++++++++++ 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/modules/aruco/include/opencv2/aruco/dictionary.hpp b/modules/aruco/include/opencv2/aruco/dictionary.hpp index 40174162e4b..ca9d0b4a0fa 100644 --- a/modules/aruco/include/opencv2/aruco/dictionary.hpp +++ b/modules/aruco/include/opencv2/aruco/dictionary.hpp @@ -117,13 +117,13 @@ class CV_EXPORTS_W Dictionary { * @brief Given a matrix of bits. Returns whether if marker is identified or not. * It returns by reference the correct id (if any) and the correct rotation */ - bool identify(const Mat &onlyBits, int &idx, int &rotation, double maxCorrectionRate) const; + CV_WRAP bool identify(const Mat &onlyBits, CV_OUT int &idx, CV_OUT int &rotation, double maxCorrectionRate) const; /** * @brief Returns the distance of the input bits to the specific id. If allRotations is true, * the four posible bits rotation are considered */ - int getDistanceToId(InputArray bits, int id, bool allRotations = true) const; + CV_WRAP int getDistanceToId(InputArray bits, int id, bool allRotations = true) const; /** diff --git a/modules/aruco/misc/python/test/test_aruco.py b/modules/aruco/misc/python/test/test_aruco.py index 9fb9675aa3d..6c76fb5ed92 100644 --- a/modules/aruco/misc/python/test/test_aruco.py +++ b/modules/aruco/misc/python/test/test_aruco.py @@ -64,6 +64,26 @@ def test_write_read_dict(self): if os.path.exists(filename): os.remove(filename) + def test_identify(self): + aruco_dict = cv.aruco.Dictionary_get(cv.aruco.DICT_4X4_50) + expected_idx = 9 + expected_rotation = 2 + bit_marker = np.array([[0, 1, 1, 0], [1, 0, 1, 0], [1, 1, 1, 1], [0, 0, 1, 1]], dtype=np.uint8) + + check, idx, rotation = aruco_dict.identify(bit_marker, 0) + + self.assertTrue(check, True) + self.assertEqual(idx, expected_idx) + self.assertEqual(rotation, expected_rotation) + + def test_getDistanceToId(self): + aruco_dict = cv.aruco.Dictionary_get(cv.aruco.DICT_4X4_50) + idx = 7 + rotation = 3 + bit_marker = np.array([[0, 1, 0, 1], [0, 1, 1, 1], [1, 1, 0, 0], [0, 1, 0, 0]], dtype=np.uint8) + dist = aruco_dict.getDistanceToId(bit_marker, idx) + + self.assertEqual(dist, 0) if __name__ == '__main__': NewOpenCVTests.bootstrap() From 4c766202995220946e33e2f66338d060dc9e0910 Mon Sep 17 00:00:00 2001 From: huangziqing <270704881@qq.com> Date: Fri, 29 Apr 2022 23:13:53 +0800 Subject: [PATCH 02/45] Extend the interface of the ORB class --- .../include/opencv2/cudafeatures2d.hpp | 30 +++++++++++++++++-- .../misc/python/test/test_cudafeatures2d.py | 12 ++++++++ modules/cudafeatures2d/src/orb.cpp | 4 +-- 3 files changed, 41 insertions(+), 5 deletions(-) diff --git a/modules/cudafeatures2d/include/opencv2/cudafeatures2d.hpp b/modules/cudafeatures2d/include/opencv2/cudafeatures2d.hpp index da9974e2416..311b0dc28b8 100644 --- a/modules/cudafeatures2d/include/opencv2/cudafeatures2d.hpp +++ b/modules/cudafeatures2d/include/opencv2/cudafeatures2d.hpp @@ -471,12 +471,36 @@ class CV_EXPORTS_W ORB : public Feature2DAsync int fastThreshold=20, bool blurForDescriptor=false); - //! if true, image will be blurred before descriptors calculation - CV_WRAP virtual void setBlurForDescriptor(bool blurForDescriptor) = 0; - CV_WRAP virtual bool getBlurForDescriptor() const = 0; + CV_WRAP virtual void setMaxFeatures(int maxFeatures) = 0; + CV_WRAP virtual int getMaxFeatures() const = 0; + + CV_WRAP virtual void setScaleFactor(double scaleFactor) = 0; + CV_WRAP virtual double getScaleFactor() const = 0; + + CV_WRAP virtual void setNLevels(int nlevels) = 0; + CV_WRAP virtual int getNLevels() const = 0; + + CV_WRAP virtual void setEdgeThreshold(int edgeThreshold) = 0; + CV_WRAP virtual int getEdgeThreshold() const = 0; + + CV_WRAP virtual void setFirstLevel(int firstLevel) = 0; + CV_WRAP virtual int getFirstLevel() const = 0; + + CV_WRAP virtual void setWTA_K(int wta_k) = 0; + CV_WRAP virtual int getWTA_K() const = 0; + + CV_WRAP virtual void setScoreType(int scoreType) = 0; + CV_WRAP virtual int getScoreType() const = 0; + + CV_WRAP virtual void setPatchSize(int patchSize) = 0; + CV_WRAP virtual int getPatchSize() const = 0; CV_WRAP virtual void setFastThreshold(int fastThreshold) = 0; CV_WRAP virtual int getFastThreshold() const = 0; + + //! if true, image will be blurred before descriptors calculation + CV_WRAP virtual void setBlurForDescriptor(bool blurForDescriptor) = 0; + CV_WRAP virtual bool getBlurForDescriptor() const = 0; }; //! @} diff --git a/modules/cudafeatures2d/misc/python/test/test_cudafeatures2d.py b/modules/cudafeatures2d/misc/python/test/test_cudafeatures2d.py index 9c17da79674..3105d5f02df 100644 --- a/modules/cudafeatures2d/misc/python/test/test_cudafeatures2d.py +++ b/modules/cudafeatures2d/misc/python/test/test_cudafeatures2d.py @@ -27,6 +27,18 @@ def test_cudafeatures2d(self): _kps = fast.detectAsync(cuMat1) orb = cv.cuda_ORB.create() + + orb.setMaxFeatures(500) + orb.setScaleFactor(1.2) + orb.setNLevels(8) + orb.setEdgeThreshold(31) + orb.setFirstLevel(0) + orb.setWTA_K(2) + orb.setScoreType(cv.ORB_HARRIS_SCORE) + orb.setPatchSize(31) + orb.setFastThreshold(20) + orb.setBlurForDescriptor(True) + _kps1, descs1 = orb.detectAndComputeAsync(cuMat1, None) _kps2, descs2 = orb.detectAndComputeAsync(cuMat2, None) diff --git a/modules/cudafeatures2d/src/orb.cpp b/modules/cudafeatures2d/src/orb.cpp index 75cdd7efa88..3edf981f339 100644 --- a/modules/cudafeatures2d/src/orb.cpp +++ b/modules/cudafeatures2d/src/orb.cpp @@ -373,13 +373,13 @@ namespace virtual void setFirstLevel(int firstLevel) { firstLevel_ = firstLevel; } virtual int getFirstLevel() const { return firstLevel_; } - virtual void setWTA_K(int wta_k) { WTA_K_ = wta_k; } + virtual void setWTA_K(int wta_k) { CV_Assert( wta_k == 2 || wta_k == 3 || wta_k == 4 ); WTA_K_ = wta_k; } virtual int getWTA_K() const { return WTA_K_; } virtual void setScoreType(int scoreType) { scoreType_ = scoreType; } virtual int getScoreType() const { return scoreType_; } - virtual void setPatchSize(int patchSize) { patchSize_ = patchSize; } + virtual void setPatchSize(int patchSize) { CV_Assert( patchSize >= 2 ); patchSize_ = patchSize; } virtual int getPatchSize() const { return patchSize_; } virtual void setFastThreshold(int fastThreshold) { fastThreshold_ = fastThreshold; } From a7e3630f30d2c1c0c98339ae629842cc6ac22ac5 Mon Sep 17 00:00:00 2001 From: Andrey Senyaev Date: Wed, 11 May 2022 11:37:15 +0300 Subject: [PATCH 03/45] Workflow for Github Actions to build and test OpenCV on Linux for 3.4 --- .github/workflows/PR-3.4-U20.yaml | 214 ++++++++++++++++++++++++++++++ 1 file changed, 214 insertions(+) create mode 100644 .github/workflows/PR-3.4-U20.yaml diff --git a/.github/workflows/PR-3.4-U20.yaml b/.github/workflows/PR-3.4-U20.yaml new file mode 100644 index 00000000000..d97db46a3f2 --- /dev/null +++ b/.github/workflows/PR-3.4-U20.yaml @@ -0,0 +1,214 @@ +name: PR:3.4 U20 + +on: + pull_request: + branches: + - 3.4 + +env: + EXTRA_CMAKE_OPTIONS: '-DBUILD_DOCS=ON -DPYTHON_DEFAULT_EXECUTABLE=/usr/bin/python3 -DBUILD_EXAMPLES=ON -DOPENCV_ENABLE_NONFREE=ON -DENABLE_CCACHE=OFF' + OPENCV_TEST_DATA_PATH: '/opencv_extra/testdata' + OPENCV_CONTRIB_DOCKER_WORKDIR: '/__w/opencv_contrib/opencv_contrib' + PR_AUTHOR: ${{ github.event.pull_request.user.login }} + PR_AUTHOR_FORK: ${{ github.event.pull_request.head.repo.full_name }} + SOURCE_BRANCH_NAME: ${{ github.head_ref }} + TARGET_BRANCH_NAME: ${{ github.base_ref }} + ANT_HOME: '/usr/share/ant' + PYTHONPATH: /opencv-contrib-build/python_loader:$PYTHONPATH + GTEST_FILTER_STRING: '-tracking_GOTURN.GOTURN/*' + +jobs: + BuildAndTest: + runs-on: ubuntu-20.04 + defaults: + run: + shell: bash + container: + image: quay.io/asenyaev/opencv-ubuntu:20.04 + steps: + - name: PR info + run: | + echo "PR Author: ${{ env.PR_AUTHOR }}" + echo "Source branch name: ${{ env.SOURCE_BRANCH_NAME }}" + echo "Target branch name: ${{ env.TARGET_BRANCH_NAME }}" + - name: Clean + run: find . -mindepth 1 -delete + - name: Fetch opencv_contrib + uses: actions/checkout@v3 + with: + repository: opencv/opencv_contrib + ref: ${{ env.TARGET_BRANCH_NAME }} + fetch-depth: 0 + - name: Merge opencv_contrib with ${{ env.SOURCE_BRANCH_NAME }} branch + run: | + cd ${{ env.OPENCV_CONTRIB_DOCKER_WORKDIR }} + git config --global --add safe.directory ${{ env.OPENCV_CONTRIB_DOCKER_WORKDIR }} + git config user.email "opencv.ci" + git config user.name "opencv.ci" + git pull -v "https://github.com/${{ env.PR_AUTHOR_FORK }}" "${{ env.SOURCE_BRANCH_NAME }}" + - name: Clone opencv + run: git clone --single-branch --branch ${{ env.TARGET_BRANCH_NAME }} https://github.com/opencv/opencv.git /opencv + - name: Merge opencv with ${{ env.SOURCE_BRANCH_NAME }} branch + run: | + OPENCV_FORK=$(git ls-remote --heads "https://github.com/${{ env.PR_AUTHOR }}/opencv" "${{ env.SOURCE_BRANCH_NAME }}") || true + if [[ ! -z "$OPENCV_FORK" ]]; then + echo "Merge opencv with ${{ env.SOURCE_BRANCH_NAME }} branch" + cd /opencv + git config user.email "opencv.ci" + git config user.name "opencv.ci" + git pull -v "https://github.com/${{ env.PR_AUTHOR }}/opencv" "${{ env.SOURCE_BRANCH_NAME }}" + else + echo "No merge since ${{ env.PR_AUTHOR }}/opencv does not have branch ${{ env.SOURCE_BRANCH_NAME }}" + fi + - name: Clone opencv_extra + run: git clone --single-branch --branch ${{ env.TARGET_BRANCH_NAME }} https://github.com/opencv/opencv_extra.git /opencv_extra + - name: Merge opencv_extra with ${{ env.SOURCE_BRANCH_NAME }} branch + run: | + OPENCV_EXTRA_FORK=$(git ls-remote --heads "https://github.com/${{ env.PR_AUTHOR }}/opencv_extra" "${{ env.SOURCE_BRANCH_NAME }}") || true + if [[ ! -z "$OPENCV_EXTRA_FORK" ]]; then + echo "Merge opencv_extra with ${{ env.SOURCE_BRANCH_NAME }} branch" + cd /opencv_extra + git config user.email "opencv.ci" + git config user.name "opencv.ci" + git pull -v "https://github.com/${{ env.PR_AUTHOR }}/opencv_extra" "${{ env.SOURCE_BRANCH_NAME }}" + else + echo "No merge since ${{ env.PR_AUTHOR }}/opencv_extra does not have branch ${{ env.SOURCE_BRANCH_NAME }}" + fi + - name: Configure OpenCV + run: | + cd /opencv-contrib-build + cmake -G Ninja ${{ env.EXTRA_CMAKE_OPTIONS }} -DOPENCV_EXTRA_MODULES_PATH=${{ env.OPENCV_CONTRIB_DOCKER_WORKDIR }}/modules /opencv + - name: Build OpenCV + run: | + cd /opencv-contrib-build + ninja + - name: Accuracy:aruco + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_aruco --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:bgsegm + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_bgsegm --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:bioinspired + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_bioinspired --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:calib3d + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_calib3d --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:core + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_core --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:dnn + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_dnn --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:face + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_face --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:features2d + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_features2d --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:flann + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_flann --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:fuzzy + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_fuzzy --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:hdf + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_hdf --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:highgui + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_highgui --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:img_hash + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_img_hash --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:imgcodecs + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_imgcodecs --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:imgproc + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_imgproc --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:line_descriptor + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_line_descriptor --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:ml + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_ml --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:objdetect + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_objdetect --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:optflow + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_optflow --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:phase_unwrapping + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_phase_unwrapping --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:photo + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_photo --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:reg + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_reg --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:rgbd + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_rgbd --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:sfm + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_sfm --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:shape + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_shape --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:stereo + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_stereo --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:stitching + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_stitching --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:structured_light + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_structured_light --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:superres + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_superres --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:text + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_text --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:tracking + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_tracking --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:video + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_video --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:videoio + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_videoio --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:videostab + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_videostab --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:xfeatures2d + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_xfeatures2d --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:ximgproc + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_ximgproc --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:xphoto + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_xphoto --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Performance:bioinspired + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_bioinspired --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Performance:calib3d + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_calib3d --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Performance:core + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_core --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Performance:dnn + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_dnn --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Performance:features2d + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_features2d --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Performance:imgcodecs + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_imgcodecs --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Performance:imgproc + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_imgproc --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Performance:line_descriptor + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_line_descriptor --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Performance:objdetect + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_objdetect --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Performance:optflow + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_optflow --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Performance:photo + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_photo --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Performance:reg + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_reg --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Performance:stereo + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_stereo --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Performance:stitching + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_stitching --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Performance:superres + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_superres --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Performance:tracking + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_tracking --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Performance:video + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_video --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Performance:videoio + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_videoio --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Performance:xfeatures2d + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_xfeatures2d --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Performance:ximgproc + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_ximgproc --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Performance:xphoto + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_xphoto --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Python3 + run: | + cd /opencv/modules/python/test + python3 ./test.py --repo ../../../ -v + - name: Java + run: cd /opencv-contrib-build && xvfb-run -a python3 /opencv/modules/ts/misc/run.py . -a -t java + - name: Save Unit Test Results + uses: actions/upload-artifact@v3 + if: always() + with: + name: junit-html + path: /opencv-contrib-build/java_test/testResults/junit-noframes.html + - name: Pylint + run: cd /opencv-contrib-build && cmake --build . --config release --target check_pylint -- -j4 From 0a5cef2cf7511c09169bbf77919999fcc858d73c Mon Sep 17 00:00:00 2001 From: Andrey Senyaev Date: Thu, 28 Apr 2022 19:14:12 +0300 Subject: [PATCH 04/45] Workflow for Github Actions to build and test OpenCV on Linux for 4.x --- .github/workflows/PR-4.x-U20.yaml | 32 +++++++++++++++++++++++++++++-- 1 file changed, 30 insertions(+), 2 deletions(-) diff --git a/.github/workflows/PR-4.x-U20.yaml b/.github/workflows/PR-4.x-U20.yaml index cf809ca8851..2c4348ef49f 100644 --- a/.github/workflows/PR-4.x-U20.yaml +++ b/.github/workflows/PR-4.x-U20.yaml @@ -1,7 +1,9 @@ name: PR:4.x U20 -# TODO: enable pipeline after 4.x update -on: workflow_dispatch +on: + pull_request: + branches: + - 4.x env: EXTRA_CMAKE_OPTIONS: '-DBUILD_DOCS=ON -DPYTHON_DEFAULT_EXECUTABLE=/usr/bin/python3 -DBUILD_EXAMPLES=ON -DOPENCV_ENABLE_NONFREE=ON -DENABLE_CCACHE=OFF' @@ -82,6 +84,8 @@ jobs: ninja - name: Accuracy:aruco run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_aruco --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:barcode + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_barcode --gtest_filter=${{ env.GTEST_FILTER_STRING }} - name: Accuracy:bgsegm run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_bgsegm --gtest_filter=${{ env.GTEST_FILTER_STRING }} - name: Accuracy:bioinspired @@ -92,6 +96,8 @@ jobs: run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_core --gtest_filter=${{ env.GTEST_FILTER_STRING }} - name: Accuracy:dnn run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_dnn --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:dnn_superres + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_dnn_superres --gtest_filter=${{ env.GTEST_FILTER_STRING }} - name: Accuracy:face run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_face --gtest_filter=${{ env.GTEST_FILTER_STRING }} - name: Accuracy:features2d @@ -100,6 +106,8 @@ jobs: run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_flann --gtest_filter=${{ env.GTEST_FILTER_STRING }} - name: Accuracy:fuzzy run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_fuzzy --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:gapi + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_gapi --gtest_filter=${{ env.GTEST_FILTER_STRING }} - name: Accuracy:hdf run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_hdf --gtest_filter=${{ env.GTEST_FILTER_STRING }} - name: Accuracy:highgui @@ -110,8 +118,12 @@ jobs: run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_imgcodecs --gtest_filter=${{ env.GTEST_FILTER_STRING }} - name: Accuracy:imgproc run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_imgproc --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:intensity_transform + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_intensity_transform --gtest_filter=${{ env.GTEST_FILTER_STRING }} - name: Accuracy:line_descriptor run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_line_descriptor --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:mcc + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_mcc --gtest_filter=${{ env.GTEST_FILTER_STRING }} - name: Accuracy:ml run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_ml --gtest_filter=${{ env.GTEST_FILTER_STRING }} - name: Accuracy:objdetect @@ -122,10 +134,16 @@ jobs: run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_phase_unwrapping --gtest_filter=${{ env.GTEST_FILTER_STRING }} - name: Accuracy:photo run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_photo --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:quality + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_quality --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:rapid + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_rapid --gtest_filter=${{ env.GTEST_FILTER_STRING }} - name: Accuracy:reg run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_reg --gtest_filter=${{ env.GTEST_FILTER_STRING }} - name: Accuracy:rgbd run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_rgbd --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:saliency + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_saliency --gtest_filter=${{ env.GTEST_FILTER_STRING }} - name: Accuracy:sfm run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_sfm --gtest_filter=${{ env.GTEST_FILTER_STRING }} - name: Accuracy:shape @@ -148,12 +166,16 @@ jobs: run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_videoio --gtest_filter=${{ env.GTEST_FILTER_STRING }} - name: Accuracy:videostab run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_videostab --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Accuracy:wechat_qrcode + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_wechat_qrcode --gtest_filter=${{ env.GTEST_FILTER_STRING }} - name: Accuracy:xfeatures2d run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_xfeatures2d --gtest_filter=${{ env.GTEST_FILTER_STRING }} - name: Accuracy:ximgproc run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_ximgproc --gtest_filter=${{ env.GTEST_FILTER_STRING }} - name: Accuracy:xphoto run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_xphoto --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Performance:aruco + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_aruco --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - name: Performance:bioinspired run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_bioinspired --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - name: Performance:calib3d @@ -162,8 +184,12 @@ jobs: run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_core --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - name: Performance:dnn run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_dnn --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Performance:dnn_superres + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_dnn_superres --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - name: Performance:features2d run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_features2d --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Performance:gapi + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_gapi --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - name: Performance:imgcodecs run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_imgcodecs --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - name: Performance:imgproc @@ -178,6 +204,8 @@ jobs: run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_photo --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - name: Performance:reg run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_reg --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} + - name: Performance:rgbd + run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_rgbd --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - name: Performance:stereo run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_stereo --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - name: Performance:stitching From ac6a5d44d69a45d4c51d320dbfe65802529de448 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Sun, 15 May 2022 16:21:48 +0000 Subject: [PATCH 05/45] build: GCC12 warnings --- modules/surface_matching/src/pose_3d.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/surface_matching/src/pose_3d.cpp b/modules/surface_matching/src/pose_3d.cpp index 165ec197481..89a8205f4b0 100644 --- a/modules/surface_matching/src/pose_3d.cpp +++ b/modules/surface_matching/src/pose_3d.cpp @@ -273,7 +273,6 @@ int PoseCluster3D::readPoseCluster(FILE* f) status = fread(&id, sizeof(int), 1, f); status = fread(&numVotes, sizeof(int), 1, f); status = fread(&numPoses, sizeof(int), 1, f); - fclose(f); poseList.clear(); poseList.resize(numPoses); @@ -283,6 +282,7 @@ int PoseCluster3D::readPoseCluster(FILE* f) poseList[i]->readPose(f); } + fclose(f); return 0; } From 176d81cc7f7a8d2a5557aca8a4ef36af6a1e8cc3 Mon Sep 17 00:00:00 2001 From: Andrey Senyaev Date: Fri, 20 May 2022 19:48:00 +0300 Subject: [PATCH 06/45] Move workflows to a dedicated repository for 4.x branch --- .github/workflows/PR-4.x-U20.yaml | 240 ------------------------------ .github/workflows/PR-4.x.yaml | 16 ++ 2 files changed, 16 insertions(+), 240 deletions(-) delete mode 100644 .github/workflows/PR-4.x-U20.yaml create mode 100644 .github/workflows/PR-4.x.yaml diff --git a/.github/workflows/PR-4.x-U20.yaml b/.github/workflows/PR-4.x-U20.yaml deleted file mode 100644 index 2c4348ef49f..00000000000 --- a/.github/workflows/PR-4.x-U20.yaml +++ /dev/null @@ -1,240 +0,0 @@ -name: PR:4.x U20 - -on: - pull_request: - branches: - - 4.x - -env: - EXTRA_CMAKE_OPTIONS: '-DBUILD_DOCS=ON -DPYTHON_DEFAULT_EXECUTABLE=/usr/bin/python3 -DBUILD_EXAMPLES=ON -DOPENCV_ENABLE_NONFREE=ON -DENABLE_CCACHE=OFF' - OPENCV_TEST_DATA_PATH: '/opencv_extra/testdata' - OPENCV_CONTRIB_DOCKER_WORKDIR: '/__w/opencv_contrib/opencv_contrib' - PR_AUTHOR: ${{ github.event.pull_request.user.login }} - PR_AUTHOR_FORK: ${{ github.event.pull_request.head.repo.full_name }} - SOURCE_BRANCH_NAME: ${{ github.head_ref }} - TARGET_BRANCH_NAME: ${{ github.base_ref }} - ANT_HOME: '/usr/share/ant' - PYTHONPATH: /opencv-contrib-build/python_loader:$PYTHONPATH - GTEST_FILTER_STRING: '-tracking_GOTURN.GOTURN/*' - -jobs: - BuildAndTest: - runs-on: ubuntu-20.04 - defaults: - run: - shell: bash - container: - image: quay.io/asenyaev/opencv-ubuntu:20.04 - steps: - - name: PR info - run: | - echo "PR Author: ${{ env.PR_AUTHOR }}" - echo "Source branch name: ${{ env.SOURCE_BRANCH_NAME }}" - echo "Target branch name: ${{ env.TARGET_BRANCH_NAME }}" - - name: Clean - run: find . -mindepth 1 -delete - - name: Fetch opencv_contrib - uses: actions/checkout@v3 - with: - repository: opencv/opencv_contrib - ref: ${{ env.TARGET_BRANCH_NAME }} - fetch-depth: 0 - - name: Merge opencv_contrib with ${{ env.SOURCE_BRANCH_NAME }} branch - run: | - cd ${{ env.OPENCV_CONTRIB_DOCKER_WORKDIR }} - git config --global --add safe.directory ${{ env.OPENCV_CONTRIB_DOCKER_WORKDIR }} - git config user.email "opencv.ci" - git config user.name "opencv.ci" - git pull -v "https://github.com/${{ env.PR_AUTHOR_FORK }}" "${{ env.SOURCE_BRANCH_NAME }}" - - name: Clone opencv - run: git clone --single-branch --branch ${{ env.TARGET_BRANCH_NAME }} https://github.com/opencv/opencv.git /opencv - - name: Merge opencv with ${{ env.SOURCE_BRANCH_NAME }} branch - run: | - OPENCV_FORK=$(git ls-remote --heads "https://github.com/${{ env.PR_AUTHOR }}/opencv" "${{ env.SOURCE_BRANCH_NAME }}") || true - if [[ ! -z "$OPENCV_FORK" ]]; then - echo "Merge opencv with ${{ env.SOURCE_BRANCH_NAME }} branch" - cd /opencv - git config user.email "opencv.ci" - git config user.name "opencv.ci" - git pull -v "https://github.com/${{ env.PR_AUTHOR }}/opencv" "${{ env.SOURCE_BRANCH_NAME }}" - else - echo "No merge since ${{ env.PR_AUTHOR }}/opencv does not have branch ${{ env.SOURCE_BRANCH_NAME }}" - fi - - name: Clone opencv_extra - run: git clone --single-branch --branch ${{ env.TARGET_BRANCH_NAME }} https://github.com/opencv/opencv_extra.git /opencv_extra - - name: Merge opencv_extra with ${{ env.SOURCE_BRANCH_NAME }} branch - run: | - OPENCV_EXTRA_FORK=$(git ls-remote --heads "https://github.com/${{ env.PR_AUTHOR }}/opencv_extra" "${{ env.SOURCE_BRANCH_NAME }}") || true - if [[ ! -z "$OPENCV_EXTRA_FORK" ]]; then - echo "Merge opencv_extra with ${{ env.SOURCE_BRANCH_NAME }} branch" - cd /opencv_extra - git config user.email "opencv.ci" - git config user.name "opencv.ci" - git pull -v "https://github.com/${{ env.PR_AUTHOR }}/opencv_extra" "${{ env.SOURCE_BRANCH_NAME }}" - else - echo "No merge since ${{ env.PR_AUTHOR }}/opencv_extra does not have branch ${{ env.SOURCE_BRANCH_NAME }}" - fi - - name: Configure OpenCV - run: | - cd /opencv-contrib-build - cmake -G Ninja ${{ env.EXTRA_CMAKE_OPTIONS }} -DOPENCV_EXTRA_MODULES_PATH=${{ env.OPENCV_CONTRIB_DOCKER_WORKDIR }}/modules /opencv - - name: Build OpenCV - run: | - cd /opencv-contrib-build - ninja - - name: Accuracy:aruco - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_aruco --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:barcode - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_barcode --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:bgsegm - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_bgsegm --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:bioinspired - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_bioinspired --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:calib3d - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_calib3d --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:core - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_core --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:dnn - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_dnn --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:dnn_superres - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_dnn_superres --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:face - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_face --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:features2d - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_features2d --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:flann - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_flann --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:fuzzy - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_fuzzy --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:gapi - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_gapi --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:hdf - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_hdf --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:highgui - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_highgui --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:img_hash - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_img_hash --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:imgcodecs - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_imgcodecs --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:imgproc - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_imgproc --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:intensity_transform - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_intensity_transform --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:line_descriptor - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_line_descriptor --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:mcc - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_mcc --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:ml - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_ml --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:objdetect - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_objdetect --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:optflow - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_optflow --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:phase_unwrapping - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_phase_unwrapping --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:photo - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_photo --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:quality - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_quality --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:rapid - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_rapid --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:reg - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_reg --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:rgbd - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_rgbd --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:saliency - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_saliency --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:sfm - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_sfm --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:shape - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_shape --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:stereo - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_stereo --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:stitching - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_stitching --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:structured_light - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_structured_light --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:superres - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_superres --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:text - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_text --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:tracking - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_tracking --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:video - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_video --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:videoio - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_videoio --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:videostab - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_videostab --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:wechat_qrcode - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_wechat_qrcode --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:xfeatures2d - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_xfeatures2d --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:ximgproc - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_ximgproc --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:xphoto - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_xphoto --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:aruco - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_aruco --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:bioinspired - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_bioinspired --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:calib3d - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_calib3d --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:core - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_core --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:dnn - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_dnn --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:dnn_superres - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_dnn_superres --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:features2d - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_features2d --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:gapi - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_gapi --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:imgcodecs - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_imgcodecs --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:imgproc - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_imgproc --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:line_descriptor - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_line_descriptor --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:objdetect - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_objdetect --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:optflow - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_optflow --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:photo - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_photo --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:reg - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_reg --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:rgbd - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_rgbd --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:stereo - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_stereo --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:stitching - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_stitching --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:superres - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_superres --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:tracking - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_tracking --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:video - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_video --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:videoio - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_videoio --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:xfeatures2d - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_xfeatures2d --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:ximgproc - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_ximgproc --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:xphoto - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_xphoto --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Python3 - run: | - cd /opencv/modules/python/test - python3 ./test.py --repo ../../../ -v - - name: Java - run: cd /opencv-contrib-build && xvfb-run -a python3 /opencv/modules/ts/misc/run.py . -a -t java - - name: Save Unit Test Results - uses: actions/upload-artifact@v3 - if: always() - with: - name: junit-html - path: /opencv-contrib-build/java_test/testResults/junit-noframes.html - - name: Pylint - run: cd /opencv-contrib-build && cmake --build . --config release --target check_pylint -- -j4 diff --git a/.github/workflows/PR-4.x.yaml b/.github/workflows/PR-4.x.yaml new file mode 100644 index 00000000000..a1441e004d9 --- /dev/null +++ b/.github/workflows/PR-4.x.yaml @@ -0,0 +1,16 @@ +name: PR:4.x + +on: + pull_request: + branches: + - 4.x + +jobs: + ARM64: + uses: opencv/ci-gha-workflow/.github/workflows/OCV-Contrib-PR-4.x-ARM64.yaml@main + + U20: + uses: opencv/ci-gha-workflow/.github/workflows/OCV-Contrib-PR-4.x-U20.yaml@main + + W10: + uses: opencv/ci-gha-workflow/.github/workflows/OCV-Contrib-PR-4.x-W10.yaml@main \ No newline at end of file From 39530eaed92ab2ed8cbbc89ca08d9d2ef88fd8ec Mon Sep 17 00:00:00 2001 From: Andrey Senyaev Date: Fri, 20 May 2022 19:48:11 +0300 Subject: [PATCH 07/45] Move workflows to a dedicated repository for 3.4 branch --- .github/workflows/PR-3.4-U20.yaml | 214 ------------------------------ .github/workflows/PR-3.4.yaml | 16 +++ 2 files changed, 16 insertions(+), 214 deletions(-) delete mode 100644 .github/workflows/PR-3.4-U20.yaml create mode 100644 .github/workflows/PR-3.4.yaml diff --git a/.github/workflows/PR-3.4-U20.yaml b/.github/workflows/PR-3.4-U20.yaml deleted file mode 100644 index d97db46a3f2..00000000000 --- a/.github/workflows/PR-3.4-U20.yaml +++ /dev/null @@ -1,214 +0,0 @@ -name: PR:3.4 U20 - -on: - pull_request: - branches: - - 3.4 - -env: - EXTRA_CMAKE_OPTIONS: '-DBUILD_DOCS=ON -DPYTHON_DEFAULT_EXECUTABLE=/usr/bin/python3 -DBUILD_EXAMPLES=ON -DOPENCV_ENABLE_NONFREE=ON -DENABLE_CCACHE=OFF' - OPENCV_TEST_DATA_PATH: '/opencv_extra/testdata' - OPENCV_CONTRIB_DOCKER_WORKDIR: '/__w/opencv_contrib/opencv_contrib' - PR_AUTHOR: ${{ github.event.pull_request.user.login }} - PR_AUTHOR_FORK: ${{ github.event.pull_request.head.repo.full_name }} - SOURCE_BRANCH_NAME: ${{ github.head_ref }} - TARGET_BRANCH_NAME: ${{ github.base_ref }} - ANT_HOME: '/usr/share/ant' - PYTHONPATH: /opencv-contrib-build/python_loader:$PYTHONPATH - GTEST_FILTER_STRING: '-tracking_GOTURN.GOTURN/*' - -jobs: - BuildAndTest: - runs-on: ubuntu-20.04 - defaults: - run: - shell: bash - container: - image: quay.io/asenyaev/opencv-ubuntu:20.04 - steps: - - name: PR info - run: | - echo "PR Author: ${{ env.PR_AUTHOR }}" - echo "Source branch name: ${{ env.SOURCE_BRANCH_NAME }}" - echo "Target branch name: ${{ env.TARGET_BRANCH_NAME }}" - - name: Clean - run: find . -mindepth 1 -delete - - name: Fetch opencv_contrib - uses: actions/checkout@v3 - with: - repository: opencv/opencv_contrib - ref: ${{ env.TARGET_BRANCH_NAME }} - fetch-depth: 0 - - name: Merge opencv_contrib with ${{ env.SOURCE_BRANCH_NAME }} branch - run: | - cd ${{ env.OPENCV_CONTRIB_DOCKER_WORKDIR }} - git config --global --add safe.directory ${{ env.OPENCV_CONTRIB_DOCKER_WORKDIR }} - git config user.email "opencv.ci" - git config user.name "opencv.ci" - git pull -v "https://github.com/${{ env.PR_AUTHOR_FORK }}" "${{ env.SOURCE_BRANCH_NAME }}" - - name: Clone opencv - run: git clone --single-branch --branch ${{ env.TARGET_BRANCH_NAME }} https://github.com/opencv/opencv.git /opencv - - name: Merge opencv with ${{ env.SOURCE_BRANCH_NAME }} branch - run: | - OPENCV_FORK=$(git ls-remote --heads "https://github.com/${{ env.PR_AUTHOR }}/opencv" "${{ env.SOURCE_BRANCH_NAME }}") || true - if [[ ! -z "$OPENCV_FORK" ]]; then - echo "Merge opencv with ${{ env.SOURCE_BRANCH_NAME }} branch" - cd /opencv - git config user.email "opencv.ci" - git config user.name "opencv.ci" - git pull -v "https://github.com/${{ env.PR_AUTHOR }}/opencv" "${{ env.SOURCE_BRANCH_NAME }}" - else - echo "No merge since ${{ env.PR_AUTHOR }}/opencv does not have branch ${{ env.SOURCE_BRANCH_NAME }}" - fi - - name: Clone opencv_extra - run: git clone --single-branch --branch ${{ env.TARGET_BRANCH_NAME }} https://github.com/opencv/opencv_extra.git /opencv_extra - - name: Merge opencv_extra with ${{ env.SOURCE_BRANCH_NAME }} branch - run: | - OPENCV_EXTRA_FORK=$(git ls-remote --heads "https://github.com/${{ env.PR_AUTHOR }}/opencv_extra" "${{ env.SOURCE_BRANCH_NAME }}") || true - if [[ ! -z "$OPENCV_EXTRA_FORK" ]]; then - echo "Merge opencv_extra with ${{ env.SOURCE_BRANCH_NAME }} branch" - cd /opencv_extra - git config user.email "opencv.ci" - git config user.name "opencv.ci" - git pull -v "https://github.com/${{ env.PR_AUTHOR }}/opencv_extra" "${{ env.SOURCE_BRANCH_NAME }}" - else - echo "No merge since ${{ env.PR_AUTHOR }}/opencv_extra does not have branch ${{ env.SOURCE_BRANCH_NAME }}" - fi - - name: Configure OpenCV - run: | - cd /opencv-contrib-build - cmake -G Ninja ${{ env.EXTRA_CMAKE_OPTIONS }} -DOPENCV_EXTRA_MODULES_PATH=${{ env.OPENCV_CONTRIB_DOCKER_WORKDIR }}/modules /opencv - - name: Build OpenCV - run: | - cd /opencv-contrib-build - ninja - - name: Accuracy:aruco - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_aruco --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:bgsegm - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_bgsegm --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:bioinspired - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_bioinspired --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:calib3d - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_calib3d --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:core - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_core --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:dnn - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_dnn --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:face - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_face --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:features2d - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_features2d --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:flann - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_flann --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:fuzzy - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_fuzzy --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:hdf - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_hdf --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:highgui - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_highgui --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:img_hash - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_img_hash --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:imgcodecs - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_imgcodecs --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:imgproc - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_imgproc --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:line_descriptor - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_line_descriptor --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:ml - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_ml --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:objdetect - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_objdetect --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:optflow - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_optflow --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:phase_unwrapping - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_phase_unwrapping --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:photo - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_photo --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:reg - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_reg --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:rgbd - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_rgbd --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:sfm - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_sfm --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:shape - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_shape --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:stereo - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_stereo --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:stitching - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_stitching --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:structured_light - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_structured_light --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:superres - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_superres --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:text - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_text --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:tracking - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_tracking --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:video - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_video --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:videoio - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_videoio --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:videostab - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_videostab --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:xfeatures2d - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_xfeatures2d --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:ximgproc - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_ximgproc --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Accuracy:xphoto - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_test_xphoto --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:bioinspired - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_bioinspired --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:calib3d - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_calib3d --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:core - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_core --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:dnn - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_dnn --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:features2d - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_features2d --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:imgcodecs - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_imgcodecs --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:imgproc - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_imgproc --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:line_descriptor - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_line_descriptor --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:objdetect - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_objdetect --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:optflow - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_optflow --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:photo - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_photo --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:reg - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_reg --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:stereo - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_stereo --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:stitching - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_stitching --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:superres - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_superres --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:tracking - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_tracking --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:video - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_video --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:videoio - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_videoio --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:xfeatures2d - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_xfeatures2d --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:ximgproc - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_ximgproc --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Performance:xphoto - run: cd /opencv-contrib-build && xvfb-run -a bin/opencv_perf_xphoto --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter=${{ env.GTEST_FILTER_STRING }} - - name: Python3 - run: | - cd /opencv/modules/python/test - python3 ./test.py --repo ../../../ -v - - name: Java - run: cd /opencv-contrib-build && xvfb-run -a python3 /opencv/modules/ts/misc/run.py . -a -t java - - name: Save Unit Test Results - uses: actions/upload-artifact@v3 - if: always() - with: - name: junit-html - path: /opencv-contrib-build/java_test/testResults/junit-noframes.html - - name: Pylint - run: cd /opencv-contrib-build && cmake --build . --config release --target check_pylint -- -j4 diff --git a/.github/workflows/PR-3.4.yaml b/.github/workflows/PR-3.4.yaml new file mode 100644 index 00000000000..3d6d61bb48f --- /dev/null +++ b/.github/workflows/PR-3.4.yaml @@ -0,0 +1,16 @@ +name: PR:3.4 + +on: + pull_request: + branches: + - 3.4 + +jobs: + ARM64: + uses: opencv/ci-gha-workflow/.github/workflows/OCV-Contrib-PR-3.4-ARM64.yaml@main + + U20: + uses: opencv/ci-gha-workflow/.github/workflows/OCV-Contrib-PR-3.4-U20.yaml@main + + W10: + uses: opencv/ci-gha-workflow/.github/workflows/OCV-Contrib-PR-3.4-W10.yaml@main \ No newline at end of file From 3ae17ac161c8f3ae5d20db43bd5e63564fca2cb8 Mon Sep 17 00:00:00 2001 From: Simon Traub <51950538+TraubSimon@users.noreply.github.com> Date: Wed, 25 May 2022 09:51:17 +0200 Subject: [PATCH 08/45] Update video_reader.cpp MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Tried to build opencv with cmake and got that error: openCV/opencv_contrib/modules/cudacodec/src/video_reader.cpp:99:14: error: extra qualification ‘{anonymous}::VideoReaderImpl::’ on member ‘set’ [-fpermissive] 99 | void VideoReaderImpl::set(const ColorFormat _colorFormat) CV_OVERRIDE; | ^~~~~~~~~~~~~~~ Would propose do delete VideoReaderImpl:: in line 99 --- modules/cudacodec/src/video_reader.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/cudacodec/src/video_reader.cpp b/modules/cudacodec/src/video_reader.cpp index b096d2a0f7e..3db9f1bbb15 100644 --- a/modules/cudacodec/src/video_reader.cpp +++ b/modules/cudacodec/src/video_reader.cpp @@ -96,7 +96,7 @@ namespace bool set(const VideoReaderProps propertyId, const double propertyVal) CV_OVERRIDE; - void VideoReaderImpl::set(const ColorFormat _colorFormat) CV_OVERRIDE; + void set(const ColorFormat _colorFormat) CV_OVERRIDE; bool get(const VideoReaderProps propertyId, double& propertyVal) const CV_OVERRIDE; From a590a59055c7a4efe5b92fe11b2d45c2fed75ff2 Mon Sep 17 00:00:00 2001 From: Suleyman TURKMEN Date: Wed, 25 May 2022 08:12:37 +0300 Subject: [PATCH 09/45] update python samples --- modules/stereo/samples/sample_quasi_dense.py | 2 +- modules/tracking/samples/multitracker.py | 12 ++++++------ modules/tracking/samples/tracker.py | 4 ++-- modules/wechat_qrcode/samples/qrcode.py | 2 +- modules/ximgproc/samples/dericheSample.py | 1 - modules/ximgproc/samples/radon_transform_demo.py | 2 +- samples/python2/seeds.py | 7 ++----- 7 files changed, 13 insertions(+), 17 deletions(-) diff --git a/modules/stereo/samples/sample_quasi_dense.py b/modules/stereo/samples/sample_quasi_dense.py index bd11e1d2898..0f6de2db697 100644 --- a/modules/stereo/samples/sample_quasi_dense.py +++ b/modules/stereo/samples/sample_quasi_dense.py @@ -4,7 +4,7 @@ left_img = cv.imread(cv.samples.findFile("aloeL.jpg"), cv.IMREAD_COLOR) right_img = cv.imread(cv.samples.findFile("aloeR.jpg"), cv.IMREAD_COLOR) -frame_size = leftImg.shape[0:2]; +frame_size = left_img.shape[0:2]; stereo = cv.stereo.QuasiDenseStereo_create(frame_size[::-1]) stereo.process(left_img, right_img) diff --git a/modules/tracking/samples/multitracker.py b/modules/tracking/samples/multitracker.py index 96ed1f1305a..1fe6bf6a61f 100644 --- a/modules/tracking/samples/multitracker.py +++ b/modules/tracking/samples/multitracker.py @@ -10,7 +10,7 @@ cv.namedWindow("tracking") camera = cv.VideoCapture(sys.argv[1]) -tracker = cv.MultiTracker_create() +tracker = cv.legacy.MultiTracker_create() init_once = False ok, image=camera.read() @@ -25,17 +25,17 @@ while camera.isOpened(): ok, image=camera.read() if not ok: - print 'no image to read' + print('no image to read') break if not init_once: - ok = tracker.add(cv.TrackerMIL_create(), image, bbox1) - ok = tracker.add(cv.TrackerMIL_create(), image, bbox2) - ok = tracker.add(cv.TrackerMIL_create(), image, bbox3) + ok = tracker.add(cv.legacy.TrackerMIL_create(), image, bbox1) + ok = tracker.add(cv.legacy.TrackerMIL_create(), image, bbox2) + ok = tracker.add(cv.legacy.TrackerMIL_create(), image, bbox3) init_once = True ok, boxes = tracker.update(image) - print ok, boxes + print(ok, boxes) for newbox in boxes: p1 = (int(newbox[0]), int(newbox[1])) diff --git a/modules/tracking/samples/tracker.py b/modules/tracking/samples/tracker.py index 891571ec69a..8041d409163 100644 --- a/modules/tracking/samples/tracker.py +++ b/modules/tracking/samples/tracker.py @@ -19,7 +19,7 @@ while camera.isOpened(): ok, image=camera.read() if not ok: - print 'no image to read' + print('no image to read') break if not init_once: @@ -27,7 +27,7 @@ init_once = True ok, newbox = tracker.update(image) - print ok, newbox + print(ok, newbox) if ok: p1 = (int(newbox[0]), int(newbox[1])) diff --git a/modules/wechat_qrcode/samples/qrcode.py b/modules/wechat_qrcode/samples/qrcode.py index fd79607efcf..7713734f993 100644 --- a/modules/wechat_qrcode/samples/qrcode.py +++ b/modules/wechat_qrcode/samples/qrcode.py @@ -37,7 +37,7 @@ cap = cv2.VideoCapture(camIdx) while True: res, img = cap.read() - if img.empty(): + if img is None: break res, points = detector.detectAndDecode(img) for t in res: diff --git a/modules/ximgproc/samples/dericheSample.py b/modules/ximgproc/samples/dericheSample.py index 6468b07c86f..917db05a3f6 100644 --- a/modules/ximgproc/samples/dericheSample.py +++ b/modules/ximgproc/samples/dericheSample.py @@ -28,7 +28,6 @@ def DericheFilter(self): self.module = np.sqrt(dx2+dy2) cv.normalize(src=self.module,dst=self.module,norm_type=cv.NORM_MINMAX) def SlideBarDeriche(self): - cv.destroyWindow(self.filename) cv.namedWindow(self.filename) AddSlider("alpha",self.filename,1,400,self.alpha,self.UpdateAlpha) AddSlider("omega",self.filename,1,1000,self.omega,self.UpdateOmega) diff --git a/modules/ximgproc/samples/radon_transform_demo.py b/modules/ximgproc/samples/radon_transform_demo.py index f8ef5663b2a..0ce7386b063 100644 --- a/modules/ximgproc/samples/radon_transform_demo.py +++ b/modules/ximgproc/samples/radon_transform_demo.py @@ -7,7 +7,7 @@ if __name__ == "__main__": src = cv.imread("peilin_plane.png", cv.IMREAD_GRAYSCALE) - radon = cv.ximgproc.RadonTransform(src) + radon = cv.ximgproc.RadonTransform(src).astype(np.float32) cv.imshow("src image", src) cv.imshow("Radon transform", radon) cv.waitKey() diff --git a/samples/python2/seeds.py b/samples/python2/seeds.py index 5507226d575..002f61cd29c 100755 --- a/samples/python2/seeds.py +++ b/samples/python2/seeds.py @@ -12,15 +12,12 @@ import numpy as np import cv2 as cv -# relative module -import video - # built-in module import sys if __name__ == '__main__': - print __doc__ + print(__doc__) try: fn = sys.argv[1] @@ -41,7 +38,7 @@ def nothing(*arg): num_levels = 4 num_histogram_bins = 5 - cap = video.create_capture(fn) + cap = cv.VideoCapture(fn) while True: flag, img = cap.read() converted_img = cv.cvtColor(img, cv.COLOR_BGR2HSV) From 9c4738b527bdcaa522442eaff12d454de1c3d3ee Mon Sep 17 00:00:00 2001 From: cudawarped <12133430+cudawarped@users.noreply.github.com> Date: Fri, 27 May 2022 09:36:55 +0100 Subject: [PATCH 10/45] Merge pull request #3248 from cudawarped:videoreader_decode_all_to_nv12 Force VideoReader to decode all YUV video formats to NV12 * Force decoding of all supported YUV inputs to NV12 and log warning to indicate this is taking place. Add YUV output. * Update to include missing CUDA stream argument to raw frame copy. * Fix copy paste oversight. --- .../cudacodec/include/opencv2/cudacodec.hpp | 9 ++++-- modules/cudacodec/src/video_decoder.cpp | 31 ++++++++++++++++--- modules/cudacodec/src/video_reader.cpp | 13 +++++--- modules/cudacodec/test/test_video.cpp | 4 ++- 4 files changed, 45 insertions(+), 12 deletions(-) diff --git a/modules/cudacodec/include/opencv2/cudacodec.hpp b/modules/cudacodec/include/opencv2/cudacodec.hpp index 552010c8bba..24f939af4b3 100644 --- a/modules/cudacodec/include/opencv2/cudacodec.hpp +++ b/modules/cudacodec/include/opencv2/cudacodec.hpp @@ -326,12 +326,13 @@ enum class VideoReaderProps { #endif }; -/** @brief ColorFormat for the frame returned by the decoder. +/** @brief ColorFormat for the frame returned by nextFrame()/retrieve(). */ enum class ColorFormat { BGRA = 1, BGR = 2, GRAY = 3, + YUV = 4, #ifndef CV_DOXYGEN PROP_NOT_SUPPORTED #endif @@ -394,7 +395,11 @@ class CV_EXPORTS_W VideoReader */ CV_WRAP virtual bool set(const VideoReaderProps propertyId, const double propertyVal) = 0; - CV_WRAP virtual void set(const ColorFormat _colorFormat) = 0; + /** @brief Set the desired ColorFormat for the frame returned by nextFrame()/retrieve(). + + @param colorFormat Value of the ColorFormat. + */ + CV_WRAP virtual void set(const ColorFormat colorFormat) = 0; /** @brief Returns the specified VideoReader property diff --git a/modules/cudacodec/src/video_decoder.cpp b/modules/cudacodec/src/video_decoder.cpp index e5e9353e4fa..c05d0fd7305 100644 --- a/modules/cudacodec/src/video_decoder.cpp +++ b/modules/cudacodec/src/video_decoder.cpp @@ -45,14 +45,36 @@ #ifdef HAVE_NVCUVID +static const char* GetVideoChromaFormatString(cudaVideoChromaFormat eChromaFormat) { + static struct { + cudaVideoChromaFormat eChromaFormat; + const char* name; + } aChromaFormatName[] = { + { cudaVideoChromaFormat_Monochrome, "YUV 400 (Monochrome)" }, + { cudaVideoChromaFormat_420, "YUV 420" }, + { cudaVideoChromaFormat_422, "YUV 422" }, + { cudaVideoChromaFormat_444, "YUV 444" }, + }; + + if (eChromaFormat >= 0 && eChromaFormat < sizeof(aChromaFormatName) / sizeof(aChromaFormatName[0])) { + return aChromaFormatName[eChromaFormat].name; + } + return "Unknown"; +} + void cv::cudacodec::detail::VideoDecoder::create(const FormatInfo& videoFormat) { - if (videoFormat.nBitDepthMinus8 > 0 || videoFormat.chromaFormat == YUV444) - CV_Error(Error::StsUnsupportedFormat, "NV12 output currently supported for 8 bit YUV420, YUV422 and Monochrome inputs."); - videoFormat_ = videoFormat; const cudaVideoCodec _codec = static_cast(videoFormat.codec); const cudaVideoChromaFormat _chromaFormat = static_cast(videoFormat.chromaFormat); + if (videoFormat.nBitDepthMinus8 > 0) { + std::ostringstream warning; + warning << "NV12 (8 bit luma, 4 bit chroma) is currently the only supported decoder output format. Video input is " << videoFormat.nBitDepthMinus8 + 8 << " bit " \ + << std::string(GetVideoChromaFormatString(_chromaFormat)) << ". Truncating luma to 8 bits"; + if (videoFormat.chromaFormat != YUV420) + warning << " and chroma to 4 bits"; + CV_LOG_WARNING(NULL, warning.str()); + } const cudaVideoCreateFlags videoCreateFlags = (_codec == cudaVideoCodec_JPEG || _codec == cudaVideoCodec_MPEG2) ? cudaVideoCreate_PreferCUDA : cudaVideoCreate_PreferCUVID; @@ -98,7 +120,7 @@ void cv::cudacodec::detail::VideoDecoder::create(const FormatInfo& videoFormat) cuSafeCall(cuCtxPushCurrent(ctx_)); cuSafeCall(cuvidGetDecoderCaps(&decodeCaps)); cuSafeCall(cuCtxPopCurrent(NULL)); - if (!decodeCaps.bIsSupported) + if (!(decodeCaps.bIsSupported && (decodeCaps.nOutputFormatMask & (1 << cudaVideoSurfaceFormat_NV12)))) CV_Error(Error::StsUnsupportedFormat, "Video source is not supported by hardware video decoder"); CV_Assert(videoFormat.ulWidth >= decodeCaps.nMinWidth && @@ -115,6 +137,7 @@ void cv::cudacodec::detail::VideoDecoder::create(const FormatInfo& videoFormat) createInfo_.ulHeight = videoFormat.ulHeight; createInfo_.ulNumDecodeSurfaces = videoFormat.ulNumDecodeSurfaces; createInfo_.ChromaFormat = _chromaFormat; + createInfo_.bitDepthMinus8 = videoFormat.nBitDepthMinus8; createInfo_.OutputFormat = cudaVideoSurfaceFormat_NV12; createInfo_.DeinterlaceMode = static_cast(videoFormat.deinterlaceMode); createInfo_.ulTargetWidth = videoFormat.width; diff --git a/modules/cudacodec/src/video_reader.cpp b/modules/cudacodec/src/video_reader.cpp index 3db9f1bbb15..5a21357189f 100644 --- a/modules/cudacodec/src/video_reader.cpp +++ b/modules/cudacodec/src/video_reader.cpp @@ -56,10 +56,10 @@ Ptr cv::cudacodec::createVideoReader(const Ptr&, co void nv12ToBgra(const GpuMat& decodedFrame, GpuMat& outFrame, int width, int height, cudaStream_t stream); void videoDecPostProcessFrame(const GpuMat& decodedFrame, GpuMat& outFrame, int width, int height, const ColorFormat colorFormat, - cudaStream_t stream) + Stream stream) { if (colorFormat == ColorFormat::BGRA) { - nv12ToBgra(decodedFrame, outFrame, width, height, stream); + nv12ToBgra(decodedFrame, outFrame, width, height, StreamAccessor::getStream(stream)); } else if (colorFormat == ColorFormat::BGR) { outFrame.create(height, width, CV_8UC3); @@ -67,12 +67,15 @@ void videoDecPostProcessFrame(const GpuMat& decodedFrame, GpuMat& outFrame, int NppiSize oSizeROI = { width,height }; NppStreamContext nppStreamCtx; nppSafeCall(nppGetStreamContext(&nppStreamCtx)); - nppStreamCtx.hStream = stream; + nppStreamCtx.hStream = StreamAccessor::getStream(stream); nppSafeCall(nppiNV12ToBGR_8u_P2C3R_Ctx(pSrc, decodedFrame.step, outFrame.data, outFrame.step, oSizeROI, nppStreamCtx)); } else if (colorFormat == ColorFormat::GRAY) { outFrame.create(height, width, CV_8UC1); - cudaMemcpy2DAsync(outFrame.ptr(), outFrame.step, decodedFrame.ptr(), decodedFrame.step, width, height, cudaMemcpyDeviceToDevice, stream); + cudaMemcpy2DAsync(outFrame.ptr(), outFrame.step, decodedFrame.ptr(), decodedFrame.step, width, height, cudaMemcpyDeviceToDevice, StreamAccessor::getStream(stream)); + } + else if (colorFormat == ColorFormat::YUV) { + decodedFrame.copyTo(outFrame, stream); } } @@ -217,7 +220,7 @@ namespace // perform post processing on the CUDA surface (performs colors space conversion and post processing) // comment this out if we include the line of code seen above - videoDecPostProcessFrame(decodedFrame, frame, videoDecoder_->targetWidth(), videoDecoder_->targetHeight(), colorFormat, StreamAccessor::getStream(stream)); + videoDecPostProcessFrame(decodedFrame, frame, videoDecoder_->targetWidth(), videoDecoder_->targetHeight(), colorFormat, stream); // unmap video frame // unmapFrame() synchronizes with the VideoDecode API (ensures the frame has finished decoding) diff --git a/modules/cudacodec/test/test_video.cpp b/modules/cudacodec/test/test_video.cpp index 980f393302a..0a9a3c5362c 100644 --- a/modules/cudacodec/test/test_video.cpp +++ b/modules/cudacodec/test/test_video.cpp @@ -187,6 +187,7 @@ CUDA_TEST_P(Video, Reader) {cudacodec::ColorFormat::GRAY,1}, {cudacodec::ColorFormat::BGR,3}, {cudacodec::ColorFormat::BGRA,4}, + {cudacodec::ColorFormat::YUV,1} }; std::string inputFile = std::string(cvtest::TS::ptr()->get_data_path()) + "../" + GET_PARAM(1); @@ -201,7 +202,8 @@ CUDA_TEST_P(Video, Reader) ASSERT_TRUE(reader->nextFrame(frame)); if(!fmt.valid) fmt = reader->format(); - ASSERT_TRUE(frame.cols == fmt.width && frame.rows == fmt.height); + const int height = formatToChannels.first == cudacodec::ColorFormat::YUV ? 1.5 * fmt.height : fmt.height; + ASSERT_TRUE(frame.cols == fmt.width && frame.rows == height); ASSERT_FALSE(frame.empty()); ASSERT_TRUE(frame.channels() == formatToChannels.second); } From 63cab1b0eedc486c6ea86e00a13043cc7ee931f3 Mon Sep 17 00:00:00 2001 From: Vincent Rabaud Date: Mon, 30 May 2022 21:13:25 +0200 Subject: [PATCH 11/45] Merge pull request #3243 from vrabaud:3.4_ceres * Use ceres::Manifolds instead of ceres::LocalParameterization. The latter is deprecated. * Fix typo. * Fix version check. * And more version fixes. --- .../libmv/simple_pipeline/bundle.cc | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/modules/sfm/src/libmv_light/libmv/simple_pipeline/bundle.cc b/modules/sfm/src/libmv_light/libmv/simple_pipeline/bundle.cc index 1a1568da831..1d19978937b 100644 --- a/modules/sfm/src/libmv_light/libmv/simple_pipeline/bundle.cc +++ b/modules/sfm/src/libmv_light/libmv/simple_pipeline/bundle.cc @@ -24,6 +24,7 @@ #include "ceres/ceres.h" #include "ceres/rotation.h" +#include "ceres/version.h" #include "libmv/base/vector.h" #include "libmv/logging/logging.h" #include "libmv/multiview/fundamental.h" @@ -485,7 +486,11 @@ void EuclideanBundleCommonIntrinsics( PackCamerasRotationAndTranslation(tracks, *reconstruction); // Parameterization used to restrict camera motion for modal solvers. +#if CERES_VERSION_MAJOR >= 3 || (CERES_VERSION_MAJOR >= 2 && CERES_VERSION_MINOR >= 1) + ceres::SubsetManifold *constant_translation_manifold = NULL; +#else ceres::SubsetParameterization *constant_translation_parameterization = NULL; +#endif if (bundle_constraints & BUNDLE_NO_TRANSLATION) { std::vector constant_translation; @@ -494,8 +499,13 @@ void EuclideanBundleCommonIntrinsics( constant_translation.push_back(4); constant_translation.push_back(5); +#if CERES_VERSION_MAJOR >= 3 || (CERES_VERSION_MAJOR >= 2 && CERES_VERSION_MINOR >= 1) + constant_translation_manifold = + new ceres::SubsetManifold(6, constant_translation); +#else constant_translation_parameterization = new ceres::SubsetParameterization(6, constant_translation); +#endif } // Add residual blocks to the problem. @@ -538,8 +548,13 @@ void EuclideanBundleCommonIntrinsics( } if (bundle_constraints & BUNDLE_NO_TRANSLATION) { +#if CERES_VERSION_MAJOR >= 3 || (CERES_VERSION_MAJOR >= 2 && CERES_VERSION_MINOR >= 1) + problem.SetParameterization(current_camera_R_t, + constant_translation_manifold); +#else problem.SetParameterization(current_camera_R_t, constant_translation_parameterization); +#endif } zero_weight_tracks_flags[marker.track] = false; @@ -586,10 +601,17 @@ void EuclideanBundleCommonIntrinsics( // Always set K3 constant, it's not used at the moment. constant_intrinsics.push_back(OFFSET_K3); +#if CERES_VERSION_MAJOR >= 3 || (CERES_VERSION_MAJOR >= 2 && CERES_VERSION_MINOR >= 1) + ceres::SubsetManifold *subset_manifold = + new ceres::SubsetManifold(OFFSET_MAX, constant_intrinsics); + + problem.SetManifold(ceres_intrinsics, subset_manifold); +#else ceres::SubsetParameterization *subset_parameterization = new ceres::SubsetParameterization(OFFSET_MAX, constant_intrinsics); problem.SetParameterization(ceres_intrinsics, subset_parameterization); +#endif } // Configure the solver. From 97a855fcfc798ef31d62c22746576ddd3aa3757c Mon Sep 17 00:00:00 2001 From: Namgoo Lee Date: Wed, 1 Jun 2022 15:05:20 +0900 Subject: [PATCH 12/45] remove const from functions returning by value --- .../include/opencv2/bioinspired/retina.hpp | 6 +++--- .../bioinspired/transientareassegmentationmodule.hpp | 2 +- modules/bioinspired/src/retina.cpp | 12 ++++++------ modules/bioinspired/src/retina_ocl.cpp | 6 +++--- modules/bioinspired/src/retina_ocl.hpp | 6 +++--- .../src/transientareassegmentationmodule.cpp | 6 +++--- modules/rgbd/include/opencv2/rgbd/colored_kinfu.hpp | 2 +- modules/rgbd/include/opencv2/rgbd/dynafu.hpp | 2 +- modules/rgbd/include/opencv2/rgbd/kinfu.hpp | 2 +- modules/rgbd/include/opencv2/rgbd/large_kinfu.hpp | 2 +- modules/rgbd/src/colored_kinfu.cpp | 4 ++-- modules/rgbd/src/dynafu.cpp | 4 ++-- modules/rgbd/src/kinfu.cpp | 4 ++-- modules/rgbd/src/large_kinfu.cpp | 4 ++-- modules/xfeatures2d/src/msd_pyramid.hpp | 2 +- 15 files changed, 32 insertions(+), 32 deletions(-) diff --git a/modules/bioinspired/include/opencv2/bioinspired/retina.hpp b/modules/bioinspired/include/opencv2/bioinspired/retina.hpp index 91c81484ffc..8e6eda93cae 100644 --- a/modules/bioinspired/include/opencv2/bioinspired/retina.hpp +++ b/modules/bioinspired/include/opencv2/bioinspired/retina.hpp @@ -251,7 +251,7 @@ class CV_EXPORTS_W Retina : public Algorithm { /** @brief Outputs a string showing the used parameters setup @return a string which contains formated parameters information */ - CV_WRAP virtual const String printSetup()=0; + CV_WRAP virtual String printSetup()=0; /** @brief Write xml/yml formated parameters information @param fs the filename of the xml file that will be open and writen with formatted parameters @@ -389,9 +389,9 @@ class CV_EXPORTS_W Retina : public Algorithm { CV_WRAP virtual void getMagnoRAW(OutputArray retinaOutput_magno)=0; /** @overload */ - CV_WRAP virtual const Mat getMagnoRAW() const=0; + CV_WRAP virtual Mat getMagnoRAW() const=0; /** @overload */ - CV_WRAP virtual const Mat getParvoRAW() const=0; + CV_WRAP virtual Mat getParvoRAW() const=0; /** @brief Activate color saturation as the final step of the color demultiplexing process -\> this saturation is a sigmoide function applied to each channel of the demultiplexed image. diff --git a/modules/bioinspired/include/opencv2/bioinspired/transientareassegmentationmodule.hpp b/modules/bioinspired/include/opencv2/bioinspired/transientareassegmentationmodule.hpp index d5f5b2fd1e5..86799c26097 100644 --- a/modules/bioinspired/include/opencv2/bioinspired/transientareassegmentationmodule.hpp +++ b/modules/bioinspired/include/opencv2/bioinspired/transientareassegmentationmodule.hpp @@ -161,7 +161,7 @@ class CV_EXPORTS_W TransientAreasSegmentationModule: public Algorithm /** @brief parameters setup display method @return a string which contains formatted parameters information */ - CV_WRAP virtual const String printSetup()=0; + CV_WRAP virtual String printSetup()=0; /** @brief write xml/yml formated parameters information @param fs : the filename of the xml file that will be open and writen with formatted parameters information diff --git a/modules/bioinspired/src/retina.cpp b/modules/bioinspired/src/retina.cpp index fa6e582cef7..15962f61acc 100644 --- a/modules/bioinspired/src/retina.cpp +++ b/modules/bioinspired/src/retina.cpp @@ -148,7 +148,7 @@ class RetinaImpl CV_FINAL : public Retina * parameters setup display method * @return a string which contains formatted parameters information */ - const String printSetup() CV_OVERRIDE; + String printSetup() CV_OVERRIDE; /** * write xml/yml formated parameters information @@ -233,8 +233,8 @@ class RetinaImpl CV_FINAL : public Retina void getMagnoRAW(OutputArray retinaOutput_magno) CV_OVERRIDE; // original API level data accessors : get buffers addresses from a Mat header, similar to getParvoRAW and getMagnoRAW... - const Mat getMagnoRAW() const CV_OVERRIDE; - const Mat getParvoRAW() const CV_OVERRIDE; + Mat getMagnoRAW() const CV_OVERRIDE; + Mat getParvoRAW() const CV_OVERRIDE; /** * activate color saturation as the final step of the color demultiplexing process @@ -445,7 +445,7 @@ void RetinaImpl::setup(RetinaParameters newConfiguration) } -const String RetinaImpl::printSetup() +String RetinaImpl::printSetup() { std::stringstream outmessage; @@ -692,14 +692,14 @@ void RetinaImpl::getParvoRAW(OutputArray parvoOutputBufferCopy) } // original API level data accessors : get buffers addresses... -const Mat RetinaImpl::getMagnoRAW() const { +Mat RetinaImpl::getMagnoRAW() const { CV_Assert(!_wasOCLRunCalled); // create a cv::Mat header for the valarray return Mat((int)_retinaFilter->getMovingContours().size(),1, CV_32F, (void*)get_data(_retinaFilter->getMovingContours())); } -const Mat RetinaImpl::getParvoRAW() const { +Mat RetinaImpl::getParvoRAW() const { CV_Assert(!_wasOCLRunCalled); if (_retinaFilter->getColorMode()) // check if color mode is enabled { diff --git a/modules/bioinspired/src/retina_ocl.cpp b/modules/bioinspired/src/retina_ocl.cpp index a161b36a4f4..5bbda144c71 100644 --- a/modules/bioinspired/src/retina_ocl.cpp +++ b/modules/bioinspired/src/retina_ocl.cpp @@ -203,7 +203,7 @@ void RetinaOCLImpl::setup(cv::bioinspired::RetinaParameters newConfiguration) setupIPLMagnoChannel(_retinaParameters.IplMagno.normaliseOutput, _retinaParameters.IplMagno.parasolCells_beta, _retinaParameters.IplMagno.parasolCells_tau, _retinaParameters.IplMagno.parasolCells_k, _retinaParameters.IplMagno.amacrinCellsTemporalCutFrequency, _retinaParameters.IplMagno.V0CompressionParameter, _retinaParameters.IplMagno.localAdaptintegration_tau, _retinaParameters.IplMagno.localAdaptintegration_k); } -const String RetinaOCLImpl::printSetup() +String RetinaOCLImpl::printSetup() { std::stringstream outmessage; @@ -448,8 +448,8 @@ void RetinaOCLImpl::getMagnoRAW(OutputArray retinaOutput_magno) // unimplemented interfaces: void RetinaOCLImpl::applyFastToneMapping(InputArray /*inputImage*/, OutputArray /*outputToneMappedImage*/) { NOT_IMPLEMENTED; } -const Mat RetinaOCLImpl::getMagnoRAW() const { NOT_IMPLEMENTED; } -const Mat RetinaOCLImpl::getParvoRAW() const { NOT_IMPLEMENTED; } +Mat RetinaOCLImpl::getMagnoRAW() const { NOT_IMPLEMENTED; } +Mat RetinaOCLImpl::getParvoRAW() const { NOT_IMPLEMENTED; } /////////////////////////////////////// ///////// BasicRetinaFilter /////////// diff --git a/modules/bioinspired/src/retina_ocl.hpp b/modules/bioinspired/src/retina_ocl.hpp index e02dfc4c41f..e68948b50f8 100644 --- a/modules/bioinspired/src/retina_ocl.hpp +++ b/modules/bioinspired/src/retina_ocl.hpp @@ -643,7 +643,7 @@ class RetinaOCLImpl CV_FINAL : public Retina RetinaParameters getParameters() CV_OVERRIDE; - const String printSetup() CV_OVERRIDE; + String printSetup() CV_OVERRIDE; virtual void write(String fs) const CV_OVERRIDE; virtual void write(FileStorage& fs) const CV_OVERRIDE; @@ -663,8 +663,8 @@ class RetinaOCLImpl CV_FINAL : public Retina void applyFastToneMapping(InputArray /*inputImage*/, OutputArray /*outputToneMappedImage*/) CV_OVERRIDE; void getParvoRAW(OutputArray /*retinaOutput_parvo*/) CV_OVERRIDE; void getMagnoRAW(OutputArray /*retinaOutput_magno*/) CV_OVERRIDE; - const Mat getMagnoRAW() const CV_OVERRIDE; - const Mat getParvoRAW() const CV_OVERRIDE; + Mat getMagnoRAW() const CV_OVERRIDE; + Mat getParvoRAW() const CV_OVERRIDE; protected: RetinaParameters _retinaParameters; diff --git a/modules/bioinspired/src/transientareassegmentationmodule.cpp b/modules/bioinspired/src/transientareassegmentationmodule.cpp index fd8672ed590..0f36d6dd47d 100644 --- a/modules/bioinspired/src/transientareassegmentationmodule.cpp +++ b/modules/bioinspired/src/transientareassegmentationmodule.cpp @@ -142,7 +142,7 @@ class TransientAreasSegmentationModuleImpl : protected BasicRetinaFilter * parameters setup display method * @return a string which contains formatted parameters information */ - const String printSetup(); + String printSetup(); /** * write xml/yml formated parameters information @@ -232,7 +232,7 @@ class TransientAreasSegmentationModuleImpl_: public TransientAreasSegmentationM inline virtual void setup(String segmentationParameterFile, const bool applyDefaultSetupOnFailure) CV_OVERRIDE { _segmTool.setup(segmentationParameterFile, applyDefaultSetupOnFailure); } inline virtual void setup(cv::FileStorage &fs, const bool applyDefaultSetupOnFailure) CV_OVERRIDE { _segmTool.setup(fs, applyDefaultSetupOnFailure); } inline virtual void setup(SegmentationParameters newParameters) CV_OVERRIDE { _segmTool.setup(newParameters); } - inline virtual const String printSetup() CV_OVERRIDE { return _segmTool.printSetup(); } + inline virtual String printSetup() CV_OVERRIDE { return _segmTool.printSetup(); } inline virtual struct SegmentationParameters getParameters() CV_OVERRIDE { return _segmTool.getParameters(); } inline virtual void write( String fs ) const CV_OVERRIDE { _segmTool.write(fs); } inline virtual void run(InputArray inputToSegment, const int channelIndex) CV_OVERRIDE { _segmTool.run(inputToSegment, channelIndex); } @@ -368,7 +368,7 @@ void TransientAreasSegmentationModuleImpl::setup(cv::bioinspired::SegmentationPa } -const String TransientAreasSegmentationModuleImpl::printSetup() +String TransientAreasSegmentationModuleImpl::printSetup() { std::stringstream outmessage; diff --git a/modules/rgbd/include/opencv2/rgbd/colored_kinfu.hpp b/modules/rgbd/include/opencv2/rgbd/colored_kinfu.hpp index 68b23124c2b..3aac8592846 100644 --- a/modules/rgbd/include/opencv2/rgbd/colored_kinfu.hpp +++ b/modules/rgbd/include/opencv2/rgbd/colored_kinfu.hpp @@ -255,7 +255,7 @@ class CV_EXPORTS_W ColoredKinFu CV_WRAP virtual void reset() = 0; /** @brief Get current pose in voxel space */ - virtual const Affine3f getPose() const = 0; + virtual Affine3f getPose() const = 0; /** @brief Process next depth frame @param depth input Mat of depth frame diff --git a/modules/rgbd/include/opencv2/rgbd/dynafu.hpp b/modules/rgbd/include/opencv2/rgbd/dynafu.hpp index 55fe36d7e12..32875ad5ac7 100644 --- a/modules/rgbd/include/opencv2/rgbd/dynafu.hpp +++ b/modules/rgbd/include/opencv2/rgbd/dynafu.hpp @@ -95,7 +95,7 @@ class CV_EXPORTS_W DynaFu CV_WRAP virtual void reset() = 0; /** @brief Get current pose in voxel space */ - virtual const Affine3f getPose() const = 0; + virtual Affine3f getPose() const = 0; /** @brief Process next depth frame diff --git a/modules/rgbd/include/opencv2/rgbd/kinfu.hpp b/modules/rgbd/include/opencv2/rgbd/kinfu.hpp index ebc15f9bb81..696b7265587 100644 --- a/modules/rgbd/include/opencv2/rgbd/kinfu.hpp +++ b/modules/rgbd/include/opencv2/rgbd/kinfu.hpp @@ -251,7 +251,7 @@ class CV_EXPORTS_W KinFu CV_WRAP virtual void reset() = 0; /** @brief Get current pose in voxel space */ - virtual const Affine3f getPose() const = 0; + virtual Affine3f getPose() const = 0; /** @brief Process next depth frame diff --git a/modules/rgbd/include/opencv2/rgbd/large_kinfu.hpp b/modules/rgbd/include/opencv2/rgbd/large_kinfu.hpp index a7a0b7adc83..31839cfca90 100644 --- a/modules/rgbd/include/opencv2/rgbd/large_kinfu.hpp +++ b/modules/rgbd/include/opencv2/rgbd/large_kinfu.hpp @@ -136,7 +136,7 @@ class CV_EXPORTS_W LargeKinfu CV_WRAP virtual void reset() = 0; - virtual const Affine3f getPose() const = 0; + virtual Affine3f getPose() const = 0; CV_WRAP virtual bool update(InputArray depth) = 0; }; diff --git a/modules/rgbd/src/colored_kinfu.cpp b/modules/rgbd/src/colored_kinfu.cpp index 0aaa58cc797..449acd3ccf2 100644 --- a/modules/rgbd/src/colored_kinfu.cpp +++ b/modules/rgbd/src/colored_kinfu.cpp @@ -150,7 +150,7 @@ class ColoredKinFuImpl : public ColoredKinFu void reset() CV_OVERRIDE; - const Affine3f getPose() const CV_OVERRIDE; + Affine3f getPose() const CV_OVERRIDE; bool update(InputArray depth, InputArray rgb) CV_OVERRIDE; @@ -200,7 +200,7 @@ const Params& ColoredKinFuImpl::getParams() const } template< typename MatType > -const Affine3f ColoredKinFuImpl::getPose() const +Affine3f ColoredKinFuImpl::getPose() const { return pose; } diff --git a/modules/rgbd/src/dynafu.cpp b/modules/rgbd/src/dynafu.cpp index 002be7bd6d7..3cef71f3e0e 100644 --- a/modules/rgbd/src/dynafu.cpp +++ b/modules/rgbd/src/dynafu.cpp @@ -100,7 +100,7 @@ class DynaFuImpl : public DynaFu void reset() CV_OVERRIDE; - const Affine3f getPose() const CV_OVERRIDE; + Affine3f getPose() const CV_OVERRIDE; bool update(InputArray depth) CV_OVERRIDE; @@ -257,7 +257,7 @@ const Params& DynaFuImpl::getParams() const } template< typename T > -const Affine3f DynaFuImpl::getPose() const +Affine3f DynaFuImpl::getPose() const { return pose; } diff --git a/modules/rgbd/src/kinfu.cpp b/modules/rgbd/src/kinfu.cpp index 4ba73aabc71..14dbdc7aef1 100644 --- a/modules/rgbd/src/kinfu.cpp +++ b/modules/rgbd/src/kinfu.cpp @@ -138,7 +138,7 @@ class KinFuImpl : public KinFu void reset() CV_OVERRIDE; - const Affine3f getPose() const CV_OVERRIDE; + Affine3f getPose() const CV_OVERRIDE; bool update(InputArray depth) CV_OVERRIDE; @@ -187,7 +187,7 @@ const Params& KinFuImpl::getParams() const } template< typename MatType > -const Affine3f KinFuImpl::getPose() const +Affine3f KinFuImpl::getPose() const { return pose; } diff --git a/modules/rgbd/src/large_kinfu.cpp b/modules/rgbd/src/large_kinfu.cpp index aae042cb0ab..4fe7635434b 100644 --- a/modules/rgbd/src/large_kinfu.cpp +++ b/modules/rgbd/src/large_kinfu.cpp @@ -121,7 +121,7 @@ class LargeKinfuImpl : public LargeKinfu void reset() CV_OVERRIDE; - const Affine3f getPose() const CV_OVERRIDE; + Affine3f getPose() const CV_OVERRIDE; bool update(InputArray depth) CV_OVERRIDE; @@ -170,7 +170,7 @@ const Params& LargeKinfuImpl::getParams() const } template -const Affine3f LargeKinfuImpl::getPose() const +Affine3f LargeKinfuImpl::getPose() const { return pose; } diff --git a/modules/xfeatures2d/src/msd_pyramid.hpp b/modules/xfeatures2d/src/msd_pyramid.hpp index 9fc3243a320..9b1d9037f18 100644 --- a/modules/xfeatures2d/src/msd_pyramid.hpp +++ b/modules/xfeatures2d/src/msd_pyramid.hpp @@ -60,7 +60,7 @@ class MSDImagePyramid } ~MSDImagePyramid() {}; - const std::vector getImPyr() const + std::vector getImPyr() const { return m_imPyr; }; From 0eda296f4038ae058d76e2d828da8d0e7991cf85 Mon Sep 17 00:00:00 2001 From: Alexander Panov Date: Thu, 2 Jun 2022 11:50:08 +0300 Subject: [PATCH 13/45] Merge pull request #3256 from AleksandrPanov:fix_aruco_axes_docs fix axes and docs * fix axes docs, tutorial and add estimateParameters, change estimateParameters in test * update docs, add singlemarkersaxes2.jpg * fix docs --- modules/aruco/include/opencv2/aruco.hpp | 68 ++++++++++++++++-- modules/aruco/src/aruco.cpp | 43 ++++++----- modules/aruco/test/test_charucodetection.cpp | 6 +- .../images/singlemarkersaxes2.jpg | Bin 0 -> 79898 bytes .../aruco_detection/aruco_detection.markdown | 5 +- 5 files changed, 96 insertions(+), 26 deletions(-) create mode 100644 modules/aruco/tutorials/aruco_board_detection/images/singlemarkersaxes2.jpg diff --git a/modules/aruco/include/opencv2/aruco.hpp b/modules/aruco/include/opencv2/aruco.hpp index 91d57cef27e..e9c9e57a081 100644 --- a/modules/aruco/include/opencv2/aruco.hpp +++ b/modules/aruco/include/opencv2/aruco.hpp @@ -40,6 +40,7 @@ the use of this software, even if advised of the possibility of such damage. #define __OPENCV_ARUCO_HPP__ #include +#include #include #include "opencv2/aruco/dictionary.hpp" @@ -219,7 +220,55 @@ struct CV_EXPORTS_W DetectorParameters { CV_EXPORTS_W void detectMarkers(InputArray image, const Ptr &dictionary, OutputArrayOfArrays corners, OutputArray ids, const Ptr ¶meters = DetectorParameters::create(), OutputArrayOfArrays rejectedImgPoints = noArray()); +/** @brief + * rvec/tvec define the right handed coordinate system of the marker. + * PatternPos defines center this system and axes direction. + * Axis X (red color) - first coordinate, axis Y (green color) - second coordinate, + * axis Z (blue color) - third coordinate. + * @sa estimatePoseSingleMarkers(), @ref tutorial_aruco_detection + */ +enum PatternPos { + /** @brief The marker coordinate system is centered on the middle of the marker. + * The coordinates of the four corners (CCW order) of the marker in its own coordinate system are: + * (-markerLength/2, markerLength/2, 0), (markerLength/2, markerLength/2, 0), + * (markerLength/2, -markerLength/2, 0), (-markerLength/2, -markerLength/2, 0). + * + * These pattern points define this coordinate system: + * ![Image with axes drawn](images/singlemarkersaxes.jpg) + */ + CCW_center, + /** @brief The marker coordinate system is centered on the top-left corner of the marker. + * The coordinates of the four corners (CW order) of the marker in its own coordinate system are: + * (0, 0, 0), (markerLength, 0, 0), + * (markerLength, markerLength, 0), (0, markerLength, 0). + * + * These pattern points define this coordinate system: + * ![Image with axes drawn](images/singlemarkersaxes2.jpg) + */ + CW_top_left_corner +}; + +/** @brief + * Pose estimation parameters + * @param pattern Defines center this system and axes direction (default PatternPos::CCW_center). + * @param useExtrinsicGuess Parameter used for SOLVEPNP_ITERATIVE. If true (1), the function uses the provided + * rvec and tvec values as initial approximations of the rotation and translation vectors, respectively, and further + * optimizes them (default false). + * @param solvePnPMethod Method for solving a PnP problem: see @ref calib3d_solvePnP_flags (default SOLVEPNP_ITERATIVE). + * @sa PatternPos, solvePnP(), @ref tutorial_aruco_detection + */ +struct CV_EXPORTS_W EstimateParameters { + CV_PROP_RW PatternPos pattern; + CV_PROP_RW bool useExtrinsicGuess; + CV_PROP_RW SolvePnPMethod solvePnPMethod; + + EstimateParameters(): pattern(CCW_center), useExtrinsicGuess(false), + solvePnPMethod(SOLVEPNP_ITERATIVE) {} + CV_WRAP static Ptr create() { + return makePtr(); + } +}; /** @@ -240,21 +289,28 @@ CV_EXPORTS_W void detectMarkers(InputArray image, const Ptr &diction * @param tvecs array of output translation vectors (e.g. std::vector). * Each element in tvecs corresponds to the specific marker in imgPoints. * @param _objPoints array of object points of all the marker corners + * @param estimateParameters set the origin of coordinate system and the coordinates of the four corners of the marker + * (default estimateParameters.pattern = PatternPos::CCW_center, estimateParameters.useExtrinsicGuess = false, + * estimateParameters.solvePnPMethod = SOLVEPNP_ITERATIVE). * * This function receives the detected markers and returns their pose estimation respect to * the camera individually. So for each marker, one rotation and translation vector is returned. * The returned transformation is the one that transforms points from each marker coordinate system * to the camera coordinate system. - * The marker corrdinate system is centered on the middle of the marker, with the Z axis - * perpendicular to the marker plane. - * The coordinates of the four corners of the marker in its own coordinate system are: - * (0, 0, 0), (markerLength, 0, 0), - * (markerLength, markerLength, 0), (0, markerLength, 0) + * The marker coordinate system is centered on the middle (by default) or on the top-left corner of the marker, + * with the Z axis perpendicular to the marker plane. + * estimateParameters defines the coordinates of the four corners of the marker in its own coordinate system (by default) are: + * (-markerLength/2, markerLength/2, 0), (markerLength/2, markerLength/2, 0), + * (markerLength/2, -markerLength/2, 0), (-markerLength/2, -markerLength/2, 0) * @sa use cv::drawFrameAxes to get world coordinate system axis for object points + * @sa @ref tutorial_aruco_detection + * @sa EstimateParameters + * @sa PatternPos */ CV_EXPORTS_W void estimatePoseSingleMarkers(InputArrayOfArrays corners, float markerLength, InputArray cameraMatrix, InputArray distCoeffs, - OutputArray rvecs, OutputArray tvecs, OutputArray _objPoints = noArray()); + OutputArray rvecs, OutputArray tvecs, OutputArray _objPoints = noArray(), + Ptr estimateParameters = EstimateParameters::create()); diff --git a/modules/aruco/src/aruco.cpp b/modules/aruco/src/aruco.cpp index c4e895b908e..9385d434206 100644 --- a/modules/aruco/src/aruco.cpp +++ b/modules/aruco/src/aruco.cpp @@ -810,19 +810,31 @@ static void _identifyCandidates(InputArray _image, vector< vector< vector< Point /** - * @brief Return object points for the system centered in a single marker, given the marker length + * @brief Return object points for the system centered in a middle (by default) or in a top left corner of single + * marker, given the marker length */ -static void _getSingleMarkerObjectPoints(float markerLength, OutputArray _objPoints) { +static void _getSingleMarkerObjectPoints(float markerLength, OutputArray _objPoints, + EstimateParameters estimateParameters) { CV_Assert(markerLength > 0); _objPoints.create(4, 1, CV_32FC3); Mat objPoints = _objPoints.getMat(); // set coordinate system in the top-left corner of the marker, with Z pointing out - objPoints.ptr< Vec3f >(0)[0] = Vec3f(0.f, 0.f, 0); - objPoints.ptr< Vec3f >(0)[1] = Vec3f(markerLength, 0.f, 0); - objPoints.ptr< Vec3f >(0)[2] = Vec3f(markerLength, markerLength, 0); - objPoints.ptr< Vec3f >(0)[3] = Vec3f(0.f, markerLength, 0); + if (estimateParameters.pattern == CW_top_left_corner) { + objPoints.ptr(0)[0] = Vec3f(0.f, 0.f, 0); + objPoints.ptr(0)[1] = Vec3f(markerLength, 0.f, 0); + objPoints.ptr(0)[2] = Vec3f(markerLength, markerLength, 0); + objPoints.ptr(0)[3] = Vec3f(0.f, markerLength, 0); + } + else if (estimateParameters.pattern == CCW_center) { + objPoints.ptr(0)[0] = Vec3f(-markerLength/2.f, markerLength/2.f, 0); + objPoints.ptr(0)[1] = Vec3f(markerLength/2.f, markerLength/2.f, 0); + objPoints.ptr(0)[2] = Vec3f(markerLength/2.f, -markerLength/2.f, 0); + objPoints.ptr(0)[3] = Vec3f(-markerLength/2.f, -markerLength/2.f, 0); + } + else + CV_Error(Error::StsBadArg, "Unknown estimateParameters pattern"); } @@ -1221,17 +1233,17 @@ class SinglePoseEstimationParallel : public ParallelLoopBody { public: SinglePoseEstimationParallel(Mat& _markerObjPoints, InputArrayOfArrays _corners, InputArray _cameraMatrix, InputArray _distCoeffs, - Mat& _rvecs, Mat& _tvecs) + Mat& _rvecs, Mat& _tvecs, EstimateParameters _estimateParameters) : markerObjPoints(_markerObjPoints), corners(_corners), cameraMatrix(_cameraMatrix), - distCoeffs(_distCoeffs), rvecs(_rvecs), tvecs(_tvecs) {} + distCoeffs(_distCoeffs), rvecs(_rvecs), tvecs(_tvecs), estimateParameters(_estimateParameters) {} void operator()(const Range &range) const CV_OVERRIDE { const int begin = range.start; const int end = range.end; for(int i = begin; i < end; i++) { - solvePnP(markerObjPoints, corners.getMat(i), cameraMatrix, distCoeffs, - rvecs.at(i), tvecs.at(i)); + solvePnP(markerObjPoints, corners.getMat(i), cameraMatrix, distCoeffs, rvecs.at(i), + tvecs.at(i), estimateParameters.useExtrinsicGuess, estimateParameters.solvePnPMethod); } } @@ -1242,21 +1254,20 @@ class SinglePoseEstimationParallel : public ParallelLoopBody { InputArrayOfArrays corners; InputArray cameraMatrix, distCoeffs; Mat& rvecs, tvecs; + EstimateParameters estimateParameters; }; - -/** - */ void estimatePoseSingleMarkers(InputArrayOfArrays _corners, float markerLength, InputArray _cameraMatrix, InputArray _distCoeffs, - OutputArray _rvecs, OutputArray _tvecs, OutputArray _objPoints) { + OutputArray _rvecs, OutputArray _tvecs, OutputArray _objPoints, + Ptr estimateParameters) { CV_Assert(markerLength > 0); Mat markerObjPoints; - _getSingleMarkerObjectPoints(markerLength, markerObjPoints); + _getSingleMarkerObjectPoints(markerLength, markerObjPoints, *estimateParameters); int nMarkers = (int)_corners.total(); _rvecs.create(nMarkers, 1, CV_64FC3); _tvecs.create(nMarkers, 1, CV_64FC3); @@ -1272,7 +1283,7 @@ void estimatePoseSingleMarkers(InputArrayOfArrays _corners, float markerLength, // this is the parallel call for the previous commented loop (result is equivalent) parallel_for_(Range(0, nMarkers), SinglePoseEstimationParallel(markerObjPoints, _corners, _cameraMatrix, - _distCoeffs, rvecs, tvecs)); + _distCoeffs, rvecs, tvecs, *estimateParameters)); if(_objPoints.needed()){ markerObjPoints.convertTo(_objPoints, -1); } diff --git a/modules/aruco/test/test_charucodetection.cpp b/modules/aruco/test/test_charucodetection.cpp index de474009a5d..905d5eb7674 100644 --- a/modules/aruco/test/test_charucodetection.cpp +++ b/modules/aruco/test/test_charucodetection.cpp @@ -439,10 +439,12 @@ void CV_CharucoDiamondDetection::run(int) { } } + Ptr estimateParameters = aruco::EstimateParameters::create(); + estimateParameters->pattern = aruco::CW_top_left_corner; // estimate diamond pose vector< Vec3d > estimatedRvec, estimatedTvec; - aruco::estimatePoseSingleMarkers(diamondCorners, squareLength, cameraMatrix, - distCoeffs, estimatedRvec, estimatedTvec); + aruco::estimatePoseSingleMarkers(diamondCorners, squareLength, cameraMatrix, distCoeffs, estimatedRvec, + estimatedTvec, noArray(), estimateParameters); // check result vector< Point2f > projectedDiamondCornersPose; diff --git a/modules/aruco/tutorials/aruco_board_detection/images/singlemarkersaxes2.jpg b/modules/aruco/tutorials/aruco_board_detection/images/singlemarkersaxes2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dc8edee15d9515c00e3c15cca0f1940501f8b7f9 GIT binary patch literal 79898 zcmbTdcQjmI_&z#}D2W~tVi*aD9yJ(ki0ILKCtCF0J5e)63qlw~CtCCpCCU&{V)PPa zFwu=(20x$gcddK>xqsYy_gQDJbJp2=pXXWcS$jY4yWev=bGreXP^1og2?*W1L?h_Fb{ZEtsPt|P~fQlGz89xk&_Y8nfg$Ja&}KY9Lwjh%y2=#{XDC`3&5jhwuKqLQ+}Ti?L^+uXt)9vz>Yo}FJ@{>KXs0Q_HCclG}xFRD9U z`2X2}=s#X~_`Y{1kc!~mQ+`5f8BHQf_lM5}!ii~KC+F98lduSC9nik>_;ddetI*1G z?0?k$huQx-ViEtpnEhYG{%>A$08${{o$-KF04U(HYQtdd*Y3!&=eAB&(QE1X6Q=s1 zia`?2g6dG$kup`U!7kRTt1Eq2qu`_UX&+&W0H1P^hdT`T=D1hi%icF6Z^b`gb&5Cn57RefZ(Xhwo2xxU0Ph7)*Dj2CgG$O?Ecv9yRsi)W#4xcx?uKh2n$TM=#%bg-015X`y$Wv=DE z{`VJivrH6ME;uVyF8R<0#_Ekq(N|%_7mva0&={%yuh%ghL<{H8pV~q4SjOUE6>|X#rl@R(WY3?&odoh0ohDE#nt;B0v`=bLvXIM zB~z|-I{z{OD5?2gYYI`Zy4ufdr^R0tdops~?56WOAlOm1UQmI(DBU(puW!Pi&MNVo z+)-+E@lZ{jrR3rku&uYb2!g-xtawS`I#n?eu(tN3iJLby73t(#URO}MpzdJnETo_I z8DANT+7Fm`en4!qTIg#_2aS4-0UTvdR(@b|sFm=+2*Le+9{SyIy6o6(_q{5Xloa#S zNkiIlsWd1eC%j6Fa$z5kW8;DVK2h{RskNIDz~cCFGR>Mrr7hj8qZ6&6e3b~W4w(3E z88e~l%gU4u7tOP>@}%iP=MPj9W501(uI8wX3*qSEkIvHn% zVljX6xAEuO+CILy`G7pSV`Slt;fBnhv7AG%H{wGnZ z`V_*)j7YHh&tj(tz(w0i0J`&HHS0TDMjZ&C$TLkpB6hM724(?1^^n=b84PR@hLjZX zH<6II$y#eZ_2_W&e@6~E>FVd`v7mF`k-UD7SIjDUL?1H@d$f5XbWxxO`0gSRp?^8y z_^U$!!X_Rq$$tx&-pGCUd;Tj&lSG|-VVG3QaUPL9%+nbSl{)hKD{ztln%~aiUbzLt#WQ>6avzL5@cu_AW=C`7 zSXkHW_Xb<>Mq>2yBl6}!06a%s}us_**^56DKlYNzVL2JeRg&r=;E0H9ibg6-F zGAG&BPN^7axiFl5uk9K=O}BgeZP8A3{6z{0ra{TB!IoblKmv#C zqrX?mr#n#PKH;1{W7D!HmtdofQ=~?#SIc4g%IhIK`Q`4gW;Nn|nX39vOAX=oD2SFo zfq|MF0=yaAJT(amhk>WZqf-sKc8&`f#FA4Ogu^X>1n2pDsw8w}31ojz9b4b*^r~gQ ze#%~d2-a^KWo1G0$?6b+$3ie=9veb_4j+}p%70whRRIVLqJVrmPT!y2vJHKe?@8%4PjwI!aETh-{chQk8dh(=htxQkS!PznZAdJuH^m=9@EJX$9}r zWm+#Oy^&WJMEr50w16B3HOXU7O1zN;yRl2`gmji7EjPh4Ni1~Gpcj<<|!al23 zUM;5&3^-nBv7^%_p8ihWqfK5CYE(7wWG%L1&rfvC`}Azsz#Rh+OAsWd)?C~AxU&-fz;Mej!C~EGf~WBj5KKJr zf``oDX<5AR!oB@zXTGQAKjx=Clm3%(N)m8`rg3P(0#3^Ahp7*tF2`8N0c;HbX$_7M zvD0GJ_qMN_=~=EC+%+hDK^AA%9B(+gUP4YbRk;dCxWqvt)n7?8(lsbpZJUP!Weg0` zKXOe>mz&IdD{w5`9JX-GqPMXQO2X@*2pE6)`=?vW_Vg3mJ^#x@^Zs(-#0&@s9sh4F zLh{PT9WLb%UP)7Hz3h!Vt~Pz|C-Fng%$Ydd;$agM^GeT|HnyiH_X}gix0Fe8MWp#u zzOY<&0q-J`tpj=;>~JwFQM&e0h_xT{mZHR{L^xXH7Epn%Rv*pUW%|XPxOhB>ERq#Q zz(lw$EP^>N!ci?h9iHg;_@tPslgvKB0IWP8hIMKw(jKzyHle%8QvGa4mfOpHO=3T% zj0CVVUXfBbTyCSmabEkR?|mOXR6jJXxTmocV&a}_0!%DBtxoG84&rM*U&r>If6=Zx z^2#Axv3vJVS;facj(_X}@HfkK8spY;u79rgAEXWDRPRJxiGx4f0?3?~j2bk*ndde* zxajAK$${HO3mskuxcm*AE?fA=UqJZ>*AtE?IlytXeC#=G^cl1xwJ3i<7Jj7o-2)*C5Eto+{l(`EWkjpmc=sK<|6fY|UCMGNA?=9`9pb^q8s@Ru(`DJnHu z4mL&c+!Rb+a>=oW38WCU7Z+^Mn?jgD11DZV+$%1%QtMeiKMfQ0WV zy4a*{0qRB#&ugi_j!^H-CI$EGx$9($rN~GI9-(f`LGX1*a=0v^o)b0y7TMD{ z-t<0e_ru7r9{RzrGQX1a8x{Ngc8~MFl60?83*-LgY<|!FUcpWPa@h&}8Afd8D(H}% z?IGr-;GhflPEz7!&2~Cj4t$rbLHlwrZSCPq=t(EJ{XR#A<8MB^F4K0rnVU~!71YLn zm%D=x8f;B_u{de(3bDol(tw(ey5f3^xOvO_E_gNmtcmK=uay7fxG*2i1fkdoa@yBC zbJb3&5bx9N*HTn#BN7N;bXF1M{E?&dxblM9>=Y+tTie9TD(fE&FyhxgH8*18$Ymse z0}^P-e!tB(B>tsl3?I*ZzrJ}rK(PoRpbC%R6GTo`FV35onf-2zjbL$!|26yz;Sabo zf^wL>uaSpj)F%=Fnz@l%K%mRATZshBJu(S^uGD}{)Y^kSs;sy@zyl$F&<`MkQ1$=38G`j#t)p4EGPp$m#B3lmJx;;( ztzOaa_Zh`T&q8+s_nKllcFaaMJsZ3;Gmq&*YvoAqvtO3t5k8$}9T&^+W=!g^7S%Tk zHPnku8Huvz>VV|UOH@jrKfa)1ISYd?=W^3;b+r$x>8rdF>-WP1b1vwS^uPnKveE(x zCi7_{zj*P?A%nwOlZ5BQjcvKaM^=y6-|>vPbxq6^y+>+UGySpS&aV7?&E0f6l3NP` z(t5^CVVurV0MaF z;G{w<%aGeazG?5b_e%m13QmE6A-aq^87YPm!X$1*1xaBBq_u-5Yr*1w0`Z>X4lS)y zY}`o|1o|^w1pK>^w*aHM2jbnQ*>x`#zLGT$(yc{ipOpuE;`KTb^k<^i-!?ou-}FWog&ySM4Y)5^_?!U zs3m%#hfjbU3`zqRI!U2ORJDPj&^^E#eZ7NL~4rN8<3+Ed&SesnkoNVc47E~f@4N$oGW-yr1p zn6_v|oy^>?t?!9`RDE-I%a1nr+?-^$zc1C^X6m5CY2jjge4Etqb(#b(_=6Pwxu?28 zdvQyHc>KvOAAMYHgZhhG^U zfi)TuY(#0F9@L>o|WEW;aC(<87mxqi*g2vu9(c!0awdG6PcPN#F2X9$Emd%p*9^&M+ zFT)zBxQwv(aTDDrq=en<#zOL0SoV2cmNG8-v{XEd1F2#Y<(Y8Wy785Sc@iN2Ioa-$l}u_T*x3?13pHwgW=rHNVFjyyS1z z9}d|XyrsR>BFj4HQ=DX!eAgbmmf!?DCjS=jB8;n^9x4D1t-d%b&$@fftZ=Bk9=;*@ zOvYp^8&sOmQM!WK9nr%W`sA>4?!l^;6wMfG)?X{kNW#5{HBt(6QeqF_(rT()ZD2eb zv~KNZzN?Pj{^6;sPIT_4OzegVi@Eqh072Gvjxst#!Hk@l8oX#seS6#>( z4i!UFtDOr72MY)%p6UclCxi@zl3~shvE=5lV&Ax(g~|+t4i}l06A|WLIl68E_UG%4 zTYd#_c0Kljxzc9cW02K_PPe0Hz%Z}77nh3lYZnvTqTuGfhKdCRUN|~yBH0%v5>lMy zJSG1e4(Y%5-gB0&&c3#WlcgJj!|J*SwcC*lZliEq{>$RuE0UnIrFIQ^ko%sy>Y0&B z*^_(lQ6?)hDa2%Q=+`=t%Nhj}yW^_aUGflX0#98wR9ZM4x@56j%VLw&zp-eg-+hEX zb^eFNmJ~Qm*3v_a%B>C}VdCapZzlU_>!zBTtSBGh+a;M;?3 z5}q&2Af^wYoTrRdKLg`z!XijOyk?nI=7t9qa@jo-p}vuqQ`oFvHN!E9Dfzix5yCu= z01ULqOAHxp3rl~43`HJRg=~>jCXOUT-VDFb8Sy1V6l(L`oQhsf`lgZ%A{~+w3C>E5 zC5=n>r#|=kbWy~wcf5S_{rrWYVd#X3{Ms$R%R}%oe$#ZG!3VQcWbHRHTOGtwD%0fl z=qeVvvz)tbsHdz`*q-_4L&nP&6vIqlp9-?2)a-R%iZ2r0BMyYbdlefV3L$<{$Gh^j zOE7?R_fvT{(f=mx3I#XXjNb&2ORj36X+gUUQoBw{9C{Ztj>c1lx+5R(%C&uiJKI?1T5^0{{v5fQSW;l%L>#Bc zaN;Kq{AviuZd+Hi7bcuGzyhoRn@L8gxcpWmp0R|TZVvV-y~!YH51QHRqT3!VIV_EM zI$(DpL;qmusEpNzV2{NO`TKDs3U{PQkH?_0-h7Ilt^}xT^rgwsA;s0f)71?IuxN&G zV3_>i;JM*b(71>mJyw7a1h?Qy6f0*?h4d7yRtQh4Qx z2&c0ad?|uiQ(c_{6j*EMOUA=4)V-NB^{ZpfG%5|xq zYEIP2bm3Vg|4BAn@60YB@8P$3QqCM+LMG0d0#4EE1`7i~)}c`xExA0W(FE!1H3DL{ z0Ogda2li)l7!Ngh3Kr2pMo}~@OYyNF_j~tr@J~e|M8^+K#xdHQ!Wt)f*+MFhJ`b1j$OeM(*IPn!#W_moGTP&$<>Y zcw9mWIyBo!7(&AAoMHtE-4s}IO_DGE0Haue49wi_>%q(Xk<)NB$$#l~e(1ms-Rxa@ z3^)Udq500>_vel85OELp@L+!cb^$x%U5>AM;Ox4jSjk4Jl3lxoW)vJ?-1YqQ?tN9-H&8Ui`pq9f#X1XxKwtik&CwPz)qJ{QsZU5~SwLuivN zq=R6A6Hx|X&CI9fYPW#v0NwlJyWGBbQ5rvp&OL(Pt99vTu_?yWzSB=}A3WaQ#BEx@ z`oI}&P%f7XKc+k19)&|F>arVT=LQ{L+uzuw!0_4WdMtcz>P@4P@nh(8wPyd}ZRZ}v zJ0hRn0`vqt2PBw?(bWB$qfgT6B9y6vsh)PnNQiubXZJf~ElsEQ_l&|H>nBYyu(- zNT(}re*S$F`Y2@VNyyi&2r`c6ou3Yr4l~|aNwyDC9$n8JGJM4vOSDjghlR(EYNJ31 zvDJ=mw}1%8b2SxLa=(vA5uX(!f9D;Z?8X3&bPYm2LbIpytO4;O9*t27o|}uEFUL6C z5$Y?iqc>qse97iZ4!`{(`&J@p-(~GSd|w6vtS*A+gpb|=%sw?fpV1|-CXb2t0Py## zK7;w|_lAO|&2tnm!_+uZ*Qs%qBs>0ts_OM-1iLA`m2htC({6#rQqM?+|x2S?dHlh+u)G_WK-beB=!R{NclUO)IHtgZtFJV0Vn;MoYBk;u2=yN zLUOl|j^n8)HF?1iKC#trOiJ>=4LZ@{g&z&y1WZSK3fWC(uu(wPBcv0G9n)@HI=?9l zwS6f&LgKTz-cR&1HgKdShG&(pzG)_cf1z=rc1T%^DWTiY|NA6ViVRcn}lk-PRf3`V9~L?)1{q^6VX1 zoHhO44nFW-m4Z?9-pHJiR%GhJY3Bzh`A!I<-;17}EGMF&rY0SI>-88d%yr4Tk&uG% zR##p}3sT&V^20JwFFG_EuozmzJ3GevIiA1!OtNhu!~(4E;Gd1vxoLycSqI1`{JpTGQhEw;ANTmx^|d%PcL6I1dm zt6O-NYXThLpb9L`Tn{)413u+P9;U)}H%-*zLO>(1Wtr^AG5xWWc?EXy6|Ih&Nm-8>wP-|;Q?;$t{Q7G|?@fdN^pY z@%>pXMGg~;i1#Fcv&@K3Njfi+b(aLXm z-^rQ0&^>AiV1h8pNtPrJ5HZ8NHuCh@O8W@~LVxD8+c8`s_`64!-L>AUJ6`>%8SEwy zUf;fSSFfvwkIyobjuV)ERbp^wX;csSp(?=7PbZsGuutwLjVY1jG?;}kM^FrhAKn5? z#t6p5_VCOT+Cg{G)k?{wA$%zYmZ$1OcZjyNHHr;Q&};}?_8B;b-}bH>YgjM%OKuVH zNBV~ZVhf4i!2Y|5ck+ukSD;^`Ac&ABu{FRO-P9CN`atQ?18dTW==IZ#sMaLN3COqj zTUnqspKAN-(tBy_34FX*Ud|JgyQa7n{*|5&yg`>Uo#4}SKP2^ z_MNz>H=s*IyY(L*@uxhmNGx+6s2mD?c-WLiRIOHk+o)&hj5fp<qVtjLDQ1m0P~Tx|0eP*2p~JH_J!t$*5{ZjUQ!4CZlXp$ZQR zle!CN<0>3<>m4!hVG=6UFmO*3NB>2nZ;5RP*OJyb2yRnqZNv{x%N5Rw35H` z5`02CtO-6lT6o=LDFR&{gmT*XTKqriU(m(BdOq~HI?FDPK8x#XS#6_04R>LP-{xjw z@8n1C`YUm^ltKZJ3*FO1Gj-R>caDD{2Qkr===N#vQZJNHF?$qlEBRNKb(qq2_!6U| z${Xy9j}M%b*F@$)~k{lG!w(n2k@6Y%;m(d5(YvFMw$I<2Pzf-eo$Wcpvy%5 z8d#JSBXNR@3E@_fERV-FGc@Fdb?%9-m101e z4}H&$S%FLO09XQw`ZH^77c?fodXOW-$IaL7y-Q>X%cfTU`pYN61lG#|VS(;ynlvsU z8ng8GTA=0*fU2<I4l1aoOGg>Kd5+%NeXZ{51UE+}Zo_*}Lc%YXINY_6&t5q5PvDtz}ALdZe-lV+*n-58oe@x~)M_ z-hK$>)W}@-hhlKYg}MyEfwtK#;Fs)z1ZMurtA%i8nZdz3Rt}Ad8iiE52xyveBmodE zh*bOZ@agN*jdamCW_EM~Cu3Ja3$3W`KT128Z(ee2`C~~1C)6}+k~DBrnFinF?B6|z z4tZpC=usANV06EvI?+jyL8kojo!ZZ>6lL!c1z;*A_9Unh15{^Zb?8Y(MbScEpH8Yt znL#fX%eoDclSeQ56w7N2m`aDkp1TxW&*(q9qqHHO8Ar z*Iw;T>8JeJ;fY4c%-tiQqtl0Fv~Gn?+Y6}G*ANh5f=YNS;`u0WsxY2OdQP6mEIM1- zGaGmlI_eN)nMAaR>MXgSLD@Pl=G0+aOgtCRbDvchTR?VU_C{}kk(zbzHV@*>-A%J` zJt@x&nSpY}ce4<-?Q6&E^T<^$72;ggw$H@OFcb4Yco}$Aem@VGUzwn-8~Gabe4;9x z>;5`zKpPW&qK6_lA!z$b9;cL{iZYhI^7u0J({92>p3VfWPy@Kf6s9vcYw)i?9bu+; zl#NdSyEDPR|4Vq)^hR-8c3$#1%~Z z4bnef66GORRsAw{yboG6?b&*Be12cd+0qX?*j0g|e2BzuSfXxVR2)05iQ&oK%&m7W{*wZJ-O7cy#kBKSGai zPT*U~#GUh(N!u(txE4N_yhg;lymtIRmD-t(Q5`hKuTsY5t9_1D_0_ex@F$=a9=A^C zS)vo&R~5Jg;B9V?VQ`5?Ra!09_Zrb`L}h&n%x1pc+bUc+MY3+{>Th-I+{I%CZpxyg zm#IdD`kgCN0>E1K=o5MAj zK$4$jpL+BNg#Lu)-d2uOJ>Ku_PO>1jRYg*x=6k7aXnypoyy7uIJA)O9Ug zwxR?85>Z;zrWhh|)tMUA8v>qi-oJ}djBr&v*!Cp@9Vq6H%dwQFF-QJ$RGiHAu^y#R zE3O3J3v#|rm2l3pSNALkqHS);67flpfChpbS!&ng$?34!$ypb^NU_el5Jyag09oK^ z$V5S-LRZSh+_rW(*x^a~IC*7TQu)@@_<;UHyQweS>Dz_YkCQ*g9_=+1u46Xw=l>hA0To+ zdm46r3%DWCykheE!^#&V)%?fS&HZI^$WY`K?HTV%{4bsNN~WgJ_RWNVK}WmmP*aKo z=i=H)9W(o;15=j7wB;OAHA%kE`dWm5AUS()EAJjP!&n2TPluF1sgcZyR)bdlJL!Wt4@K)L${BhV) z*>D9JZ-bjLP345$M2rGZG}gN zb8i~dEK^;E!*(Uxo>UET_8Skfu4SifACH=Mp?;_EI|h9kxgR}xKF*%4Dr7-B8xA{R zTL5lrJt%u5Vo%BYOJ)&K5?=|*;4I;@^zh|3AN|LvpvA)1mYkZ?SqwD-lvJj=u57Os zE^m5v$n35+VMb~1)Md8{qtwl|7M4J4_}J)gGR0i|M~3%P+U|#+ENiZ@&85s1VY7rC zkrUjklR|OzYoCyq*&+NECuiREHu$$m6S|lyc zA8xm1EalVi9MDpidyP~j&)E<2mdg@Zsj>0LF4|`I*=C)8w?$!hThH?SN zu=|@?Qw8h!T(4YNvQk{B7)rbY8y#<^GuG_0p69VANlv2bVPR_oG81qJ>oS$eepm>8 zkPJ4eAvg9_+ZQh4T2PrC^ZPY^fZ{t%wSJcDhsoW{UKLA?W?vs(;m=yleHD)r-SYSo z77#2imDcMg7i4S|ELC?pbW)r3b8B_Oy4ka3HC~If@L@xfKoT8nk8ae+OJ|B+Xy*ji z7w2|4zoBmwqF`4xgjHE#>^ya;tJ)ItkdhiWy^Kp|O9k!nCV3)>2uQCTCp)Omo3yYL z;k({Ys;!5MODg_+{!gZdY&F*9UmpOF(ooPwhnIS62g_u=V*+n)2uIr_0X%hH&ZA$h zDcQrL5KRyv1B{?h<67*%`||1B{OhPU#r}&Xf74S$H5Wg$w6w26OJDhHk%C~NLP~EZ zeRSd@GPhnI_w+_|q8-W>ZQT+-&Pu*7D39v}oicm4&%XF?_sgon%%uR8BLwmNy{ z^Q)bzaJvYO{3JJUlb5b$Jm0KX+`0u&<7E7r2y%DnHTa#ZY*`osU=+oH`c*jg)`PV5 z&KRj@;79fc_8p7|WA&}|2#achLg3S&WaKYt*`(eGfar6b0(l{nds`FQ%1AUGRl!q~eThYS=)4&aLU8kHR)((y4p~lnZb)m!L3Mb4m%!p| z#PfW*H^V9GrcB(hr+&*QoU-?E((_MICO%t)T)Wn-jR$=dtnpH#HZIC!?F4ZGgkn}e zx_y)Lc&Z{tkuubW-jUV}0VAJ_^rCnfrn9JUNtXRoRPr8YyWj=?ZXCHj@Oj(GGT7DV z;^9v-k@K>0htpQJXbJbq2RZU9T67OJk|KW3vPLN0Ct^JYJlQ@Dh1~+cJ11BBBr@eM z%A~SfR406W*t@AoGwIUqOaDZ(O^_Ft8*m`)dAIkUK@uRYj}Ip<1t6HC(0~WRL|D>r zRHrts%;^0eicEy!GOy-5&anqaai!4>zQ?H{(X^7n4<~3)DPy~C54Pu)C z8^K4i`iXsuCPJQ~dVRrN6)w8*xz{8yBNeG8{f|pf%djZ^u|TFTr3|`75($EH(!cZ- zxR6eUtsSUKl9PM(JuJ`QqA6R5HPLAdQtE3t2_}z@vO^?r#U1~e^x5$wDO|WV)X&+#kyE^cjveAH4Z4WS zLvh8zx69=mX#=Bg>&tSz`+_Cy8CX>|lFY&Uj3k$O>Ta%@SBe)tmrwk4U1WgU@&f!3 z{sWT#(6wwoo{G4yNN;tjV~aHSVK+KmqcE!<`&)oxoKDUSZkqdYMt(3Abj;H)KH+f=7yVS5dy*H!>cdfLgn1Aa zI(kxcGxjztz^+qyr4aktj!Tqo=SjX4UxM*Q%d&^S3>)sa?1Fl&6%o!rwL z!OcHa;x5r(;>My*g?=}Arv(FQ&3 zGY8@Q3~oZAvx?u-$T2?PE)fv{=FZWdD`~=Bm$5ka|3MMl0%UZ)&c1ZBE9Aie@B^`W z)EiF|koVPyIx%Lx&gd`xI$|SAn1luIZA4l=5sYV1_SGOA=nQfTkV@05B`c}j;o*aN zOW$CH#yu#4u23SB;c?pV=96zr4;@qAw+1r2Wys8ydoP%n4L7)qqPNAX% zi|Sf;p~t^`1M{KY6xVgKDbS9MI{nm^MOQ2JVMJw~y<@(8t^E*nneN3TA;3A$0&fOD zLm{It(bHwKIe+KGKsnc}Qk6-7RSFoR?8u=Sg0kJErtu_eQb7-e>A#;D{RB_P2F-$7=~d#Y+1IXCFfPv)ZIy-2x(GDB_;oIS~ds zDU3{9&TpL;e1aS1C=FoVGx1U%E+@e=4{q*b53YVfzk4t3Y-8v9QvLM%5RVQPhcm(@ zj4n+wd@|lALmJ~XX*}qPVY{$L@(E$D6`3PvZx-&P=F%Lnx7Ds2O8@>*YhnyeyjZ3@ zwkqvIlfGwt>2Di7a;itq8EC_}l^|Wnl%Tr=vIrv_-}ToW*H@t=jz-Xkdr{+aQ4{TE zQ~j6kzWnNDoKvpqa%Ro6k~WoLuBn#XpwZ)B`DC2(_t*BpblI7X9~XaFkI+$>Zqa>R zhVP$t=G*ipjdn|lPA3tX2Nb8U@eqldF)=R$*Dc_~_8TlFwgHbZG2V+Jc3N@NXO^SI zE>nxI_ud={j2iIP((xp8c?s6RY4(rT1{*0WJ>^qsIrYqMZp*o_pmMRE^;1Sv>hFX% zP9WG*EeecX40JgQN+I=Veu=lw6`-=(mnl2Doq%q0yjLHgpaVD18d;xQG+57UAIr1u z$4~LA{s^Xp+9%G=qobP+c#c&)k*y+84GO4AoUCUke@mujtRYXsN68AIK%Wn3-Fh12 zy=&x8j*ufi4m^~2CeXZTUmX5^VQe_~tZcxJ6i5&svj3ANJg#ET>1*ok<9x%mebPRs zS54Xcr+T>+1`?nDMaLEDwS3?{1B-v@^?m3Uj45W*oF}wf^n{lxfw#|`T8h&T-TyJL zd|__Z{)u|8>ATsI#zr{w)o(My>qB~5Z^O@oHga?(plem4g!bo>tD7Evb;>Fkv5M)t zWpW#|tjoKL*&wFCWwdCu{3i^jBVzERZu}M?xaO$!!QmC*oVxrKjrBPBDh(~tp+YKN zdf_3f=g+HYUCFF_q;4|wdY}j>IT@dEHEU_VJQVCSX-aok@X7Rv_2M%ZQn80)Hw}ra z=24fAKl02C^_e|Fov`PMr(BLebRxtke>sjNv>QM);WOKcr&}d%n#qxg%Dk)&WxW@< z{m=t7*IE7kz`3Jk`CJ<=;Flaid+uCB=T@e|266Qh>@&ylrN5Gr;#_qfM&}84YdR594_n)4NnR(o`ykkLMZu z^+$23-On%sVvy({X7Ays^0h{w4!4@<{*AjvV(#vm_lMA8Y0J&*pA>2lInhSgG;^I$r6z9v z?KE~h{zzs!SikG49C10@O?GeX_4~W^*RmU!=x~_Jc+(yFowwKU3A#75&Na%+mi0@5 zxcU{S3$e1y?m6PY3E_BEC&!`6V>&wOZxd%|bu(H2PTr?Jj~1OzkH+>Xmho8|LdbBM zFP7bpMR9)B-`e$W0R+j{h*uP^aI~wgOn#;dRNqmikEgc#0sN-@Yl-hdpB$-EhIX@c z@cRWf2Ju}N)H@C&Hz_!;LP=F@kqEq$Kc@&ueWsr&MUU&Qew_|`Nhsqu)sku4!++q7 z1TZ!K6D^;fUr&?R6yw{CP|$*Go5sX-zEeMEXZwKL1b1Ch#H|_*^}I-{>{U+Oi~7u! z4DY4p|Jw|H_%F1+BqHt0Ds-o9^`i~!$la<~QNJ20wx@kUS9oybD4+ZL(KAVH^JgV+ zDe$?-Dg(FF^RY+k%NogVQ|g`K6(uMY=~>`v_t7{^*7)jB74QY=L7wHOi-y7Bd} zdTs@HSH^~j;>6=+?N%R1n!m|{o_q|p9X_}5H}jm(>(50mMPCjAWzHjuM_)W%Yi_0# zz#vkraz~<0%DGhL_un)`mGA8=p>OOyJ1%_>S3Err?5h=gc^~NyeCi(AZNrVXIRJz= zYxn zKANtS-hS~m#rP8d-xnYH^oEv}YqHEnq}Hoct8gK&(sLt`(N6_Aw$++&aLynfa3&+}7P%0LS}NtwnliDzlZ&>tB5Xv-yk6Qp~o;uN*7%jOjz` z8WWhRv0s#YV!Vukv`stIUPr;o*RBLBgEd@e3;up=1G4%Mw)37^ox7sJMCTPo6FAG2 zp&yc+bzdWpAB z8aCYm0GeEs4=jhW(e9Al=?2lEC*k#Q8dB1b=539q5UL95Ar$zgbMr0rx0Eo0mnNj? zy9;Yq)BH2WpJchBy0nP6D@Ip|)T>|Rd>bfH%hYrJ-S^JKt_6U|2{@@3`T2_VusuoM z4LRmrTF>|b-OnU49>iwblA$%p-1x@CZCq4!DePT-pxfJl#8uZb8l&te`V~ z?991sJl&wk&F#eVH*kA<@PoT?Z$-jqm)py<{!PY_K%e4p5d!_F>b@p^RJ|F zl;^x@=b2aqCDcpdJyYn2psC0>U8Lw}86a>P*;!wLqjBvErOcZYT8d4A^zlgt@>H2`x^!%Bju3HCU}p_BNAWXZAd?ByFX7VOm5)1*a+ z=*#p2zI17IcdoS)z=7lw!=P;c;vE=UY{{b(U#Dw4EH93hcUcXw|M){Y{i>;z>tec} zReeH7ROT13F40^IqyEkP{8fe&{kw1=kyok4vE#0-Asu<|2OV&fpIX8d^UeH|1>r*@ zKf9HU=$A>7l|QCqo(_>m+IQyuw%i|(IvK)wo%38--3)cmrLI2iherOT!h)lBk(b`1 z(R=sO7QegPH>c#6#?SDnVRD`uy0$NX5PNL6nB=g7<)V@ zJ@BteW^N&hg&E$i-v_1$y>mO^A$Uz@tQ~%g>6kN>mXn==jbtRU-^$h{O0H0J2SBhcK-1mm7#T&iQ%!-`HT-z>- zAy+psv&dE7o*(j<9lcyip?oM^gkv9EUGuPoC@C1StJnMU@ac2dpuh>)!)Ss)O8@E+ zG8Mf*6z#UT$skuGhIN1jIOdGhgxxnLQ#Wf!w);Nm;+$LBhn9E0I1tq3Zg489&L8<( zH6(j1fAF_}(jsJS#Zkb&s*9E5l4>}JDTFgf6(uX~*P{s+>Y^sosRbh$h4T+W5QK^`t(wpJZ#uNdM4ZFEIhYY@Br;r@ZsF6B%%8^6A zu8nro&m|h~+{dzynwBfIHf{lmd*G<#v=c zDczEKU>E(5qHf4OkU=PuAdE6ei6tALCazD4-dX>3cJwNpf^ub))_cv~#4lyVjrCLZ zWz%|Y(Q65PsWHO=9RKJK%c`~_(_g2_fBm)l=dGJswL!7Sb44f0;BxU~0UPBc{hA`% zCW$S$yN|=~fBJn|6rWmHvaLfC3>&acGa82{XPBjA-1Y~=_1EQEH1qv9Z~spbC@T08 zrc%7_v2ImK*}Pv-+#Ks5ft&hKoEPDl_7` zAc9tsf;X>}ujR|Zd5%sbfMO5N+|W%kU2F_8z4c{=_o}%lb?MZl^4+hH*x_3Mz6Aif zsC)A2pPA_7dVNiz?Dtgbv(ouXm*@qzJvF}pahVgyj^of#dA$z7G?)5??Qf+Qmcap~6s;qNd*Z%7^~fb0R?RwW9G$J~{hYFSLamK1Vuu?Or5w4EN7*FG= z+}84NgdB-x*Dzb=ofLM#s#@m=wI$5^^1e7wvee{I`SG-rl1`6;bL$U{teh{+%U_8MFy@sv#=Vx-(BUDmaTVh9ju6;pE_gLiG%sj2Z1lEeChj`Xl`8wpg8Dfj_oR* zcXu+6oiFa}1NVQ;*czy7Ry=(Fh)-aqpvS%M-A7gW%Zy5%H1ETEzu@R;8*`P09rf$Kbu`d$Wb1zmCHILPR0r3(G19C8*S&1AO`S;$ z^PVkUE(&Xcj`8;1b-_i%GC!uN$x`xo;Nd-mA$Km|0S($}n4bDQ}mSiHnn z-!e&9W@70H=Z7%i+KOLvYJ3Mg3@m4Y2c63WSfW~_+0}pdpR-Nx&c#Q5{zYa@(bjc$@4hJZwh>oW2rC2y_Qf_{YK}k#vi<_Kv3JxkmxRdK%y80#wpgc z^^738rY<*yC5*M%C=Kg4zqsgtrJwel9N@nrMCFFPAI#|3ut&&^n-vZDHfT<3w{_wIQ?eOQF50 zqS0^bQcZ-dr=1fzKU(v9Wu~yEO7Jh9;jb_Y5gEe0dJ@Fh_YX8^HRLSC$fY760_?mc z^^_5BE3zV5YQDDmhYNye)ihY`BST(`&*WN!aD_cRK7A$h6dY{$=4m14PLdJ(#1BP3 z7O#@8k$+US^Bj12TZ72#<><0~qEdso14>r+9*<4s`4_W5VjEb(4_7CR0xL|NX(CMuWY>F zW8pO$9*a2h#h>{6H0^ zk43OCMQ)>^cg9uu55h*ImX0khas3w74rcX)@TAg?8U2U6S@#(HM>D3^ldH z9`K$ooI}{1p!V$1+I#D-(_bUZDZs7A@?CmCTF&g)vUNl-7h2Fn;1-g2QOdAU@zOtT zuyOT0GrYm^VtW9F+d=2Mi^nF8cKrT#WN~F-2`s@9K-EmUvnB(4yKV4*rJT?LP|G=8 z+{m*zC;JIE-)@NOV0>srG6`0Lx$m?_Ycb@h99h2KCB;?U|126uw%S?f<>QlFjL z*uTpE&&ku<2SchQhZp7SZ*~r_Gosxs21g&crw?@ewi4P#RmqzD54*(9E3>YwlXu#W zQyV9NkHW1GCalq)Iu^Q*7f?Sl8ScLX-9o=O)U^zr$a!@d+q^y_BE)s}2))=(J)SAb ze%+YD2NS}zV_T1uwYblOZhT*R#I+tsHr@6-PE(eNte(M^1&ZVDaiM!bJ9ub%Iw1Pc zxe*w58Q<*}XXkq(Qa)KicC{c$v|W-zP{geXX8I+ij-A)vq$S48Sp}*Bu-lMQ^())4 z5PeU@P`ab-T6dIyOI*YQWB+1E(#&x!3?gU&UhmD4Gsdft~hN z=%^X_bRnH{KZ2V$RZ6H1vK!CT)($>p6&x0EsdCzSe~vc{3jLiI4w-L(Mh1&CAxomY zg!P6pVfwKgNxBO-@h#gZOaBjV6l)v|UWeQLwxWi7T9xcMQvw?0H+$Q;)M@mGzm0}q z4L--=D6_4~#Ln)3e;^7!CX2(Oy;>UwF`gHn`X8l87`d*gtJX{Q8@4V4)+{vEx@&9N z?#A9OMESPrryHZwD-UZPY$^UKC|zbM`>ub`RfQqN7ib%ID&1>)p?Pv45WLvp6IVxz zB#3tL{PG@HVv%p>q=t{D6BHsE$zB~rwt%Nw?!SfXJ|`?B`v= zI4T6!+By8;Y^?J#aVpS_93zirRs)%nP=L0jw>ntSwaTfRp_@b(=d*%L zw!Y0;{|Z)m;cxg+U}`sWX5pRw)ohtUO6{fc4mt{IJKD)6JD_B=ebq^#XDN7}bhT#w zA-N^*{W0So90IoDHu9^p`3V`_Ju7R4w9Is`rb~f+|I{eg`7g@y zCX1?*=WcwX%KmW_4{nFtdEfKSrLk-vD~3patN<)!L2LEbviW_!D6@? zuq7Igd6i!he&WUO>4N4RS1{|eknqK-G=ry0jVagbTC#}|v-`5G{LJYBT?409rvPrW zv)BBfwIIUP{)KY6k#l>afh^PO1{{T z2O9ZdOY@W*4Bnb1?g@pn2Iu0WeGy{jPMr2iE&AT36|I|2g=_>m#gZ?jKR3IcS<=33 zo~%+jpKllB%n{5I5X_gaYs}{LYO zrsg6vVXpE@_hl~~|AC%#e=16n=hXeS8_VI(;=PkgLcNls!A4JYBUxcxA+AUbPLAtPMoxAu3sbAlVYq*y8~d5AvR!}%B0#;>%=eS9h$daJ-*fkuLbZG* zc;Y{{jp`{G^F>fK*5|2P$r_@b<;0(`!740`NpD8){J37&vFT82@CX!G%VfRg7P|J zTQa4s7EAegYfXc8@|fEe+Q_R!W7;S^%Iq6My@`&$x_W)0s)`nTLfx%~uff2vP^FGa z?t)PTiYT*U?2-5FHEm(Wx;f7gd)W-soV$4=|53?`SntDx9fxgWdWvaL&za}CV;G*X zXV%Dr_gCCbXXk6sC9h}0zv-yfw3~>rDYZe$X%BJ}K7fM$R7MwBh}e`ZRx(}K1+!I@ zTJUzNmj(a4Y(0%yqeMpcu1k4qvp?SAYkzNrd$QvkRk_^2(=2;D>RDnV5{1f7@m&L9uTZD4Z8-8@yAcjK z-0g$1GQTVsIZ~VMh7PL-?>0~auSDaZAK`Jza>!|RK3FL2GdX`Bd@#Q| zz;d;8kBuf+YIJtu4*k^?)*f(fCAu6GH`|syo+MEf-*s^~xtVhsh>Kd*Ln8VM(0N9k z2=fpT@h?t;&(!>EuE4kcdhqPCyV;J@Gca+TLf_?OYW3)L-ldQ%HK(&a+CT`UqWM*MU!7n0r~(psAt1GC7m3}Adrai{X_ci2aX5uPzagSWEA#c&CF4@YZN;_ zx8T@#?y&eEO7TRw%dNYr%zw4E*kw_F*Q~apu0E`X@aVhhZcn~}to~mexV-$Ow5WcH zM;_GFjab{&Rl)jpMpiJBcPg*GbYQhlx9;<(-JwRe4oPRC;JfS)g%3$}^I!@ME|TyQ zNF{F4wiZZc`@DElrPq3?XvvrCH)rXw+MMMhHc*MT?xW`*5f!ix)|0j;J#^CCr_{Ce zJjr8fYLk5Y7rBSJG`C!w zNq~VmTrhM*s&BoxFEov8h@_1Bm}NX_)mItC2-xNc2=a@6to81lxb-!FY=*|4RO41T z{sPEqW|VyQKTw>v1o?js6#qad^BD->NseRS#8N7Cx%FcJmB#jk`%vJtJe}|`0sNEg z4+_2ke?@=)91+JQ}y<2WQm(K(z?7jenrNp#k>pY_JdYbM-&7?n4fmm-4nN zBdQ!+Nx}7_=)*4H{J&TfE{S~Eq*M!=Zx_YVLi#?Xtr53>vM%lp{|5^4(0z&gy2Zx~ z>+%m_9B@^*_b_DhZT)VqX;KS3J`qaZroJI5rT<4%_-oW6xSFa7d$3Kr(LsjJM-^A6#*R*BaX7>xC6BV-cY9LH? zlS@Y1@b2W8mgxA4PTKW@oc_7~orFeZ_js>nJBgmS2`-HoZ?yzDbxSTIyzndfqZcTT z9yV|g^>Z{OvgIGBSDCf0wYdqE?!eTa(GVOs-Ih#R=cYm(V)x-A`k~ZHL`?|6!gXhL zp#KDQKZF1-{fWA6{eb8(?(fw8Kp1&1sojPshR#eIEldz4t(YjgW!F2`gaFHwbGP}c zG$w7%%}^L?OO{xOb-gOS)yD|$1wkGmD=kHg(bG8*$R>xTjGWFo13w!%Ue6zMvhu5^ z!z1Y#sdHA$mxlN}OBoqKqP2HN9d-*jLb2Q}FNniv-69VrXR1DP5qOW0%&$O51K-)x zr?ZFE8yN0?L=+C5Nx?Ug8T-#*_zr8i_JD0DGVr}#5b4d_h1d{LT% zi1iBI)|ltq3gS5ypH(U-y?Bg}F-)rAN%hR{nhwW}`EmC}+L8teQuq%;(-Bg0 zUwmxzLaOmXL*rt%6@TeSdmB?whROW{L3RvYkex|LE{LhJpZ zy_q+4SPo;4pXDw-5L{pODTMLEpmXO$PJ<00aYTSYUA%sEhR_5_oIFjQDv2oc+R>y^ z-E+H$)41ay{U6`bUDd0E5i^sAYAbh1A7$W4^vZJ;4c1)V7lUVoj;ydAb+}YB1b&cc zAVC@tLARU&RZv6Nfh(-ndD}k)CDxvWJ!MG+++@Y~nL~=|Iuk|3%HHo;MYiRAU#Kmq z;$q=&7M3^Hi!TEgIZ}u6oqUk$+D!}I9B;fbOa&`l(gQVZwPc$e==wS7AG+Mn*Dk)GAhs5(Zjs;o+gm&kB zBF5oZrt}W-QD&x=3SloX`B6>h^^>>F7{LSl29G&%u&coHx*K<+hmmgzElzk4t2}|a zfNi#$2MhA$`Qyk&I}bK=qH6dDIy#WP%KE(x;eYl$6}`XKq{EJ3MqIFdFkLq1&2*tO zP-T2gU>RPrY4x2=bh=}@sHH6I_W-%p&CBhDXytC;kspUDaP-;N<-q%L6OyO%IbVOC z3Tv~Nue>WB9JdrimSnK5T|Rg&UjI4|$bht>_^r8l8YBDqKk#p9!qHO=r1l2RVEzy! zGYA*;fS->3y}N|wB^YHXk7EBX?S$}>A>HF?zfLJjZm`{Xzkid)&vu1e&%==e8Jl4{ zp5)!4wlb8)dMnu3(p42_3YD`kv)h=r(kd0-8eZ=QvW5jK=U!KgG9vfF2Ub7^V6t{D%E|=VEvifna54*hK+{bV_;g0No_QQ^dG+HJQ&{ZaKZ%EI?3#>Qr>IOk4OeDC#~X%P zj}8~0{r8S(gT_8StqL$g2W=?{U@b(hW?njeRe!QI@|P}bi~4>(v5RB7nh`$3zQk*I z-{>Wp5DN*%J~@++RXoqoTItu=uR2raUw$4xZn~|%7$exxayBi7OG-SeKiUZR)8XE zVF>N?-iDcoz(sc*kp3%Za$51?{ZtH5{CvI5xxQXPTkrMA;;*DmiCPhTSYHAiZu-S> z-nYMnriqbt!b&HfMHsW)&We}Z-MZg(9UxeVP7w=QgF=;5^+_3LYRfblv^}SW7G2>$ zAf~?^XYPXAl@)>S34I6paxySDagtHUg*rYsH0M4Py zO1tsbmEdQI#1BjDnmBpr?`G&ls;WARr$d71>UpBok|LSsF=;8`Hs#He)A=kd@oPZp znth(MhCV$?y9fjrRhvQ9Q6zHG@%Dsa`Rj%8E>4G`hIO*d(9`fj{zj+X6zlV_6l=OyXPvtgy& zE+@W?#AOYR6g3q~Ii4~Z=i$FEo*!2yOTCGsxi8-}ByhN}o`ge33hS$1mWKXnT30^e za;Q9KvHSP{ei%$Jj}fQIsr&S^K8idHUKb>45@n4y@1y-Bizi9t{P_$aX~*S~8z55W$vsJT)X!YH;=>T5GYP`$ z5t|SSbM~XdyTo)gOi-Hbh8$~@D}&phQewdPd_=?eZp^JU?@zcWD!F?yflUOh?t*Ij z+t?w4TuTLlbtN03oNfmaiHKid93P7c44!8tEnV!;d4D2eObNZZrZoFBtcE6>I$D8TWZwUaWOJ2*^`+>B$%5B%Wj0J9%=3 zGZ3J=bHeFNFq5k)w>EWo4JkTSt=_L1@yl^I^xI9KfA;j?$yHmoltR@w@jAlcF+0(*Qr>(}Ck&&mHg8 zdwW?b|E*k;XwvlmCiaw@#zBhifWJQ=SZYA9F<<_$tFZU27z6H1c$hCEQ04oFP;)n1 zPik4@$F8(7St`JK(uJ5gzk|sgKnSFI)L>kog>?bxnn7(;6DRt;)t8CjaQ4I?vYpTd zF5=H!x{Lle$Ci$FN@%Cx;h?S3e5V=6VkNT+GJ*ab`j|ZwuF18t>*{}wrCm^yNxb{= zYBikJb~%A60JShvV7ug#XZHP2&w6J`L;0>uMje#9=}HF4UZ`|<3a;p*22PSuOleX+ zDx1YMi2pT(WF8u%57-Y~L0h4fLyYSj(U$5Slk8CndYW{*!v8ZF!4C9erbFfQ2pES3 zAVM?vdgRO++YCVLQu!A;!!(|59=+TP!+#*9QWBO^dpV`*`HHO-YbnCvD&Szfuy}fp15o>XC3Ehticde_Ht+5lNM4IUUpv3Glh2~$ARhBM&Y_XZB zEwk`@(~tsQpgQLIA&_MqLL>*eU@WsFMLB0~!^lj~wJOb-l`fpZ3|UH5g;W(4N~#aY zB;+ZgIvW>!|JF9WRao zCIgneoa>BLBW-@grO~v&k>#evcBgaQEwGadC8V1F=N;iDt)HT@o&5YxiCuvE&z=$q zdqvcyrA(D9^u=9R3l+#`1kh54X`?nyR zYyM*C;ug;L=JKn5SN}>}e}Aj3!?w+?o0rMSl|=Mu$KM*y;L{1kkHGJ6`3*1m{Yz={ z{&Na*MsB$j(xyLu$e;>gj?DZtLQ94fUpAr|RPP0rPdF7_$;~g>b*yGS2Y;dkUUQKbgC+WIglS zt15n&!+9}%rqImyODzACJFY>ix9Wb-gbS9|r#EUJOJO18Gq{L@tqb4G?kzmn5avqn z`UiS}XY}&QXSsQJl2s0yv1^3;QF!?B^x5ta%W3l%c_fT1bSV(GFd>BCmJjzrY6plY zTayGp)`!{0@s_Yk&qA?b@z6*Ls`puw3&`FY`b;+*|(S2jLYX-f`Tka9hIAl(%o3|NsnZp zVdO=Y84V+!zYJYEbGwb+KG01Lg+9U|4ip-k623rhV+kyJ`g%d2MY+3G1#DB<8ppg8 zgy@e^o-j(e*1q^4$EGmbG%5e*N(F_f3KI~*$mLE+Y4}o790bzjAf}~%;~<{6Jk8x2 z6!q7{IS~Jj$v3=@y$+6{IlPKM2wk>rBFi`X+u>1_f&5TNwiHvFhZN5qyO4~v#F9fq zs|IUYz}pdTf!?d<{|h?oHIX?&I3t#i&nTdfeJkKp>2B{}UceaLK?2q8%aot|F-qJz zja{b^6}i-*G+zc2RjSa~b7b*#8G8%w&i^RNe=2+W4C+Y>dD;h&;p!fIt*~*6%DGn` zXzLUjX;ba9krQx@1heqHyYI`E`ZHBRoaSVfqxZ9TWhbMsaxA^Y(pF!u@;?%XR}DKW z^+(J0e)di>a`^Xo9gIU7OhLD~A1F&@EisfY|A9!(k?l$Izh?ctXJfIsM&E-{Km>T+ zmzY;Emdui!Kee;>HsEq#4VD-j6Fj?) zzds|oJ1;CW(4&(>8^CsX22BKud=gpl(D>)|3Jhi!D46`qJ&g) zvj>SOhsU{C0#5m({|StiF_KmNKg{LMn1#~CyOblmAv&gq^>bhTOjUX{H_E_xzR5?A zkx3}~FbrMdf{ngosL@4R@Q7gE@V*Y}`T3p$yy{f{IOHqp5I9N9ih=^t;2IRh@Ib6W zxYEl^`rH~Hk??cqV1czCIOJ60@Y0mcAQI8$nM$}Hu5Jyqi3TREX9`UDZ(us;ax8Hy zs^(P9Nw9axJKS2Kl-;?yp*ioh+c2pQIPRhPvkHR1g$ZR5K)nDEx>4u-3Iq9mUVeTe zcjaGO3#4iqKr3f#I{#UR{O|S;TFcR%YgtO+z8cekiY9Wwj6bff1WOK}fl_m#vGgZiaQ@f)6zUD=4gH0k03Ek>2OZsH6+u(n1vp7izQ&pb) zWPVvabU17M`^~{3%el6N=L??SR_pnyLScs(OT_MQU!+9IEXxJf za26wp{E9x+{e^UnEo{53$`2r|f`PuiYtDK@7|saL$jWk z{{qlV%jo^2_;QCF>x#Icr^%m(NeGpQwu|Kk2v`<0i+mS)78HWU>vHI`y{#V?*kg^h z)5zp*{kJ-{XF70$hhTL?cR;O2bOaV>#UzsERc`w27-yNM&}p!u!u)9k+{I_((7jg~ zXxuH)#nx?+0%UK$4ojlL$d#<_PSG(CD8WBnomNpHFZa0d0gj%4*yme@`B~?jKjVj+ zhZ`3zYHkarRS~~9Wm1kzIt(?nwTXB+8RfTOgG=rDSCVM{4VOe1J4SrNg$8YE;&f?Y zO8BEu7B8&W^PF+O{b{n_AYKZ&!&#!iibWysaH3XU^^o=8m^vsuujxeL8m@I8-ys1A zFCRcZS`lIXn|J;qB-Q>#wnLYmGT*7HH&3lcbAxTW$ZLz{J|aW5wt84g)6jgG{S!vx zzLvh14bp*zY)lfgF8k_QN1eEVFcB%^qF$2+(JZSHd*AAf%B zU9Y3>jTN@>7XR&AOrR-p{8WyFXNfh#?eh+mhyp%1{AvpQuGCj_D-Pp{EF8OPnX7nX zS~QnRQVMn$rWu^$j;SKA2(Az)1R5CLcqvlF-aLLJ*7II%`uVGO&Xp{(Pa{7J-4tZF zs*2-;C$+`h5NlCHIKEJ9UvfSG;KwMt8OtKgk*q8;o;t_;Eq10(+f({L5=2A`UB@lp zW~zU-tvy$VRvV2O;>9I-8Ys?ehX@-UuD&xI)dj}kNqM}4g@)j9Wj;1;)WiJz zMazEQEM-Q-jN@kx--0ybos73a;oh{|8fQ<3;yJ7tZ`Q`~UhTC~h)LGhCgBj|+zMV% zA?vcKearhKS%*05>(!R$;uT?opy2l{sMN3@J6SDkw4S?~1sBZ*4JNf&@?4jbKQF%1 z@dg98l<*C6Qp-cmjzSVbqd2Q{wE02AL~kEX-*{;9_`X%Py|g-y zQ|0noI-IVoT4@37?V%s8`3B4$FP@s$tu7iLmp zIWqB5`L^+lgG&Cf%)k+k8lo!$jk@)YWClOs> z1gp{Pn|1RT2WgXnRkCb`FEz~%wV#n!xvwLcepcCY@&0H&%wwN^j9iPf-&}T=S?Vd< z^_TSW-kyt*jK6^@WViy^iKlM{G68bQ)08AS~WeDIun3Uh0RfFx)>O$kf#9<`_^`J*g zVtBhDO7i!7ibgxQ1V{j%t`r^Z;$_@dvS&EaBi>GGhg9P8PRV>oTcPk}zxRJyL5@sF zAV1Gxwx1yXKD@o09PH50#0yht3mZ2cxpF7WROgP%eS5o??rkI$VX#*aVD8{(%2gYb zCj5SSSG4~9%OascgtJEu5{4Fl^(ql;0j*WZHhU!am4_c`JuDP1{du?A%Se{VJy8vx zJ2Pq^XI|PZAwvj9PnCS~bSdDf4sCKLf_B`5hk)ttls{gr$xhcjBDESY&8shJ_)@m) z@CH#hQf3n!c;NOc-)lP>qP!WSj$ye*W%1=`LTn)98Z;jOZs|VsHn5===Gv^HOVf1)%*PimUQl;}U5aX53msg~}L zQ9y1_E2QdPq}wQ(AYp@bLKVI!Bq$RAv0_lJ4&XQH@w4>Fv%Xj~;fiK%G0WKUR-;P|3-` zoE}9c$*Hu?7ySs}kcQ2mUF?+M5hh}aB z*h$SdTIYH)PL3Z~fiSc{!`&aekSBTHBE|mOWxM2vC?Ab$H7jzAu(S`<^ze~MB|!#o5Rs&)RZNB;2E_UzT&C!JuO$jFW!ku2kRrd||G5B}$+@K?REbqZBiBGGePN4J zz|pZS{B{6W;`Z1yqe#nO|B+15&_(8P1-JC{s^y{DT#mCmh}lREmunxG#7ci5wb}1y zM99PsZ-8r`SW5L)lKinSQy^NVo5M*t`ewoHuSIIpZMc%ZK1G7yft?@j`e$MeKE?sTGs;KyH4*;lmp*%##TR(KE7@2h_XK1bAz zrdRy_8Z!=0_wwO-r&uB#406b-70&C?IlNRCkt-xn> zaWN2!U~5NW*geS2Xo^PRajp67u+;g_>M)PyY5)FO?RbcIck#OUu^5@GUPs2rm#mp4~^9OQ+a#EWhyi&PAUNA~8!nI3`gN4MPlB(pf zD>$``3Tly`~SQN>{y;U3#tv zsoP0@b>8Uo=?3a}?!CQ#cD3u{%&s28Wj8cgy_{;5_YZ`0wQPl>4>tz^4*Pd{*V66@ z432q^E^xGW0%wzxeHCuQ=McB7`_Rg}1V6GR=1j3&>A+QIaG7pnA3oS$;ih9qj)&&c0jJI{O?U{)Q zI_w)1k`jaag}fB~7Uc!;%KFGHNdtW-_r1``F>ZeKK?3g`X3M#YFM0N^G@Wm`+Oh_1 z*}@B=65hA}E`&#YhyMeW)#V#4jt=eg>t3ZgIR`((7$YB21}?KWB+ zzc6de$-c!LYnzW4gcH1B*q{$$6ZK*73ppx;OrhH zNh>UIF<+?l9p_16qRr*D^~Gk0*2O-l%R0O+&v>=5lOmb9kLQug_QJ>C^VF}HyK?3M zYVa%#w7;I@aWK;~1D9mweZyrwKp6Gx_jy2Q-k3OgtFcYA{3!F2mx&>p433@PS(-}M zyWF?iYy;`Bhx+gUviAC>#uQcIiZadLlvb|YKt?LV@WFs`<}vJ1C*V-88%(+|zC1Ql zt4H=%9PBN1#<`3a&XoSqTcq6UC7~FLBiy0RIfQI{W1~gx$3g5ZBn=lTgzcELQ+@R9S897UaAvz)w=^{UKwA@U zBDKJd03`n2|A7YWR(14Xszb{6%n(_03d8-om#6x+O7q$W^fJYm2c;^b@({u~HFU3Z@@2XwvZ;A(A5{txwB z*U$edU7q*Tk!I>^=as2is_9?yulmUaWP`1L+Z!c&RC{h&e5R;O!Ra2vy`i?)i>yS$ zH+OfKP9y8@;uTXVZhi#f3r`b*#kwZQ0^Cwft&yqy-8@J0V+kSx#`fnt!3o$gNDbXL zr2olj_Zlm*y~kMQdrn3G+_u@Cn>3Y&N3pr^4&rG)cRQ<-Px*tmNCO^@`mD-MytLG(yqf zSgw%*6uQr768!gB6a6Zdp4KI)HSq4gKYKVzw-31w&KwOL#a5aC!M;n?RlKa1@!UV2 zZ)>OPM;Jd&G}UxX(gqD3SI3?5)++)BAdito$Cf3JY8PffXL)6L}Il z=m&Nb=@aX8sf0TrF}~UE&Z+cO{O<@3=_B@2wGqv~I$CGqeg?a|`XsYvt4gpN$yU-Y zFSv;h>KYt9{p%aMjWI7#1HD?w|9t^#dcVJOnuqPHuuKV{9#P21KUL{{pf-gw)bBrt zp&1>3l4IBl10hJCnB$#UuLWd1VvT{qFJ-X}aQ{6}Tiwwb5l^cy4sEiV5Qc12%4Ufd z0SzO>j+KqnfJ*O9A4NL1TS$e|muiNej7LGnrpEF%+)4>!04PPt6D{RCQQ3X0mO-kf zb9>b5mfDBy6Ck`0kl}I&aIFK*QxHfHEP_jaJZE|!D98oa!3sSm=_3DMU@iZrHaeZGMfQn;LR&Yz%pjnmYBv^>~)*HA}+6 zN{(~Ff{=05o^@{Ki9c+habSx&t+>V6*Q@mJq?zZV5Vas6(bADJlgpasfEnVGB7bY} zf#4>zbI}z*FFRI*WstsS@T?-44QRxSX;xayH z@7*YgLTDgqWD(u*iLbDT$NxaZL7c2XIG>=6mhzk59&GomJ*k=6v}Jc_Ht@zhDR~14 z3v)cc(|8j5t~NoP#et4*kl6qzQswaSyS=%iJlD67yu6Fvv&24+h5zMmzig95!u|t7GR3=C{ohVWe{bT);B-zN zYAm{CvA{+#dl}h=TwdTAIp8ikh0yR^D9oRjB5DP+ktaFjRjSA9SQIA|X#czHlfG)B^c>@pr7BUV-rIX z3|K9#_pT8lS?hONY+!vauj(BwWJX5D+TMIWjpa^R$%-bso6Q=&5mWZNFYBNRB)rdn zI1+*hkS_s;397bx2`@*xz++2E_(^@k>`IM}QVv*eBAgh{=IpoaTch3npnO0dBz_;= zY3G9Gm+e1Ig0Mgxtil_XCc;`OtEZ(D$Gx{`>N;mdoSpaH3AgslFl-zAmztoVJ>_Pp ze&>Vv2$X38hfo5H4s!I0^FFT4XwxO;O~;cYgbpgNv}}?)YfZV7ZKHSxHY zK7NoXv`AxQu58v^)VtK#zAZt6OyC}%1W$vnx)D&HBLTCl+{cg370=dR!jv!q_wY8N z?*Lo!77PVm4Idqy??|sqj^bUSe~u7(7xzkQmD5bRhDz>Y3RiZUBU^$>Q0_uK9O$fB!17sW6?%M(NWZgs%y99y3bwlNOzRR7#3=bD1i`m(2y_hR70cC9>2RKG> z>|^2%yQjtuUbVgQ()HA0X$Wo`yGi1kkfWFAK}4x%8uS?GwH1N&tBRPNM*bjb+rQnw z44s^F#&AJ>B$tzx{N}}C1c;fiXzpvA0rAHA$G!ZDOQ=?R@Tcg|^2XwmCfrI; zLe^WAe;^;JooCFs4~$9&X*Nm1iO<55WZ5?SnmS|Xihd_-E$wxISVG&zv4~O4r&#IsQ*C-S7vS!~^eklU}_t9e^S_0Kc2MjR& zVcnU)Smj{{@w`!&QBb5;$-)5}$yA~XeT`XY+C6xW}rNfl2U2RmWBowX>W(S_^FnU z6R{ab5_#wDJ>LV(XIDG^$tCR~|AjQ-UR)d}WzyJOr*x4Pgdd{!V87C{-WQSpmpR5d zo3eOEo(=7s@d;@%Laff0J`+t0aHS4`iS-!2e&q52U!5WjH0&tvbbYh8*24iOEUWM2 z27YGcev%Z7bkb+j%n`kvM&!ILV8}&PlmASDfzJGa>xi2AR{H{mB^sDc9Ch!m60J5r zeNr=E$Hf;06zI4~ZdNV9eAO-UZOm2Z-~s%st$(1W9Sx1Ix(L!<+S5W%TlHu!V6%av zEANbhM*x*zun2bIAE;*Xh;b9Tr1f{uB>v9AYn7YHNnU8bsg8OeVZ7Sb_9olIiLWj1 zm2bW4uRc4fM3Yy%+Tb#2UfPop^ONZ`qwCG`xdX?=!dWrck#nI`amrx}|rw%b- zH9mzpS)ut$x=ED}uPl(Aanh4GXgyJ80CT*G-Y(btR58CN-5##rIqRhRD%C0HLp=WQ z(z270J^knOG3Z9pKM-v&UhrsV<9Z(iAG^jzgUpVo`?`v2fT$OS3@(|=(%sU*U4Fjc ztM5DDP$%Y%p^#s~ks4OU`wOWrIW_{~xP9-lm1TX?yTt9W0(a!SW-#&<9l9@7i|yU_ z>sOJjeOL4Lj`(d;%l#zXdI1JEy0q(;a8eW)(rw~G;U;4prDi&z?>*kQ>lKB zYWli|e!ac4oM3mQ4b6$4xmFW2N}@k$%#FH4HBUTu-?TPPuRW{QUS^@yC7{B)q=r1y2ly z^0i5=l0NW3T|yk>f9)?YWeKW zR3(=F=!60^+J>pW={u}FjU zTZvs@3Wh=gL=4G3bc01-cqqul$!D0h7mf2z3|nd$r8@--tIy~(Pp98=gM~NSau_X{ zr92YWo=9{ElouCRX1w3m%h*=-Q<;e?zS$o6`%*1995?w}6w$t`)%nd-{@|F|_n`~8 zAh4r+VvjKWHfr{lzyOh=W;W4QgKxY;S}<()={!$O4tz0@gPpa)RcA-$vPGvBuj~T~ zjD^M5+b4v5i2W!V6;Xwh_`5;`hU}AQ9G20V?@r5af>HGUxcbg;HsAN}n6dZXEoybx zn^3h|sjVn2T1D*|wPMt0t2S+IMXjRLUJ)yH>^(wg>`jBn_kaJMm(Ou{m*mAQxz6i6 z&(AuvC;`fO_^2@~V(W<5r<-rdWIz2I;Ve`QVsP37bA?1u_=NZeZs$nv=Ha7F=ohlk zQhgzT-HBJ^&z(7uIYmGN_zw>aWy1)TeBXYP49qEVU zic=49RK1WtiI2r3n>=%sx4cNUn}7c6Xz9lQ%~+FDGDZIjT~Y>3Qok}v$D0+!Yj|$4i`I0j)!*t~R`B zLQaKE*W_WgSNBwyTEzv^dLMOZu0oV3UOE}M)(fQN@m@=9+cUf?d)@Gd*nwKGbN~#l zW?hcy3082f8H(!`t9o+~CJ0PS&(w;$_ao8~ODS|x3E$4-uyFUL1k>Uj4<-jGp2DJ% zhEV#`>b5X70%{>K#`EW7y}MeJ1S9tc^yxx}F<@2@w>WTr*Mmq%k{32mZM0qWrkyUx zJTs(rnIVK2hh3+S_z%SR4#g($jaz?CFK#WMYs+{rg^t@r5gZ?pD{+`df`5tQzJ9*_ z6#2_-X2JgBy?Z@&A!9ODtH@juV*Th^=-@J?02_zgWk^d-U#x^nu26JQ9ly9O+0dh4 z=Vm@k@=6PgGO*S5NE;tC`uUx;e78`t5AS=7+U{4z=;&aDLK~Usf=}sk?voI+4z{r2 zE6eU1Ybg5)ZuW8PSbwyW9@#+3!Ou@j^usckPs}Us${H^8jZTLP{90Xvxk9dJv;3 z^*Uj#o5;7+>Ss;qvcQ&;)BtNs>a_c9uz*8zcD7xGmi6QZu@YJv7s1#>KpUA`bq@RE zx=M<7{yFzu-R7IB%_~1q6j3~_8-oYErs)1i5He;Fk;kUWUWzxtiC|P9%-d`Rbd4^o zf2?<`nQDj?TK_lk3H{U!}jn}9X$?#k_%_76KYdy-ANgKn) zSdy88+JD}5aO@jacs z|AD@m&-=sKiRcM*vj*E1QmAHKa;{rH+Z|KG0ZNe!Iv#P`SwO|&U#%Sic7sO=-`<|( zv2N*yHRv52yUZF2F0MQigbO=1eY_wadJpB(7fOxtt^OjGabme(^;&U~Xf=1a6I&(_ zMDpp3t!G*7sh)Oy6RqmgAB{^w_a|Gk(SBDm{}4bzswnOhlRj1_bh6hIUHzc**6nx( zYNpU8l4i8oDj4F7;>U;0nwm_JSiSi@NH%?1u$bawu5`J&7_OY6Y`MHj*1;O+xHcNg z9*w_Me6PO9RpIwjyVoQczEgQ)??_m?Klhd!yQ&a$rpg*C=5~NLQ+E@HjvDgNt5kD$ z<^378gEAkPsJOI0nlFvbeob(8acR=fV{?iI*nHU)priJb=CXql z4dTTeJ?Nl3jj$m5q#SJGnu$!JU9fQ{hu|hA_wpj2T#92s7o*pziOvqng$$JFwWbO& zXkb?HVkMiLf+CRW%PWf)VmxP9epJ1Jf8hR&OH)@;JG~aZj8oHuB~Md>?)Bn{A17)i z)9INJ!!c5k@XlExM!=s#ZMz&`z|hduflhitnjrE)$2u1~8Y6-XOHs(DkpA-`A$M_d zos~QT&{_5Ttmdhl#3Ad_t=pt*GeuaUMY8^XRG|Mr5tbEfL`@DxU=p=ZwO#5pps%*< z3S+4mUm{YBQybuYTooW6?46|kN#f0KaZUB_DB{+ad{%RPMti>UV>NKC*Cf^H{VlnIi*FQ1u(Qo&)fT-5m#HL zW4t~`|AF>LLQ6UIxdTz0ki0dG7lobz5jlNLkeZ{s5mUf@&K!t8M9Ex}kl@c@M?c{I zf$pk{CK4#fu1V&N8m*t?wz^0&IpnUIPq971@JQQ>@dj-;PvlR*(GmQ=`wAcna)Q5B z9y6Mbw8N*0LkoW#gg(mlom6DK*!DBcL6U~9eCNlWl+MrV87C=kjNc@FAh_n?lmSrZ z&VxM~@aZ(5_XO-w@V`&^=MFD!<7BR>aSC=v>bn_Ln^^xE30vt);* z3w#vFE_H`;daf395FMu3j)|u;*-qG(K2Qxy08BNYc^S7H!>e2D|MK^g*E@4@{rTrR zivjKpi~sxEK^c;LDEPWIO+{JybER@%<;g(9#{YYzy}y5HLpiG62SmsNtl6#8#4DIu zKOzM^`4hy37=^l`k2){5n)xFRZ5kcxUQ$ppiM>q%`{ME{sZX%3 z!9GA&A1Ud>Qey|T^VT5h8#AI8^4M_&xk>zasmX&T+9klKAVEW2`y^FHVw<0PQ=aMC zCboHbKcUbVB1faIuK$M1gt)Au=y4=nnP0!)4MW`DE#DqAtk_NEmP`G8L0SsG!>SU`TL-uuzy&8=trDpUC3BI7rj6QZim7vV?b z)yJmj*w5-*TMEWDcDtFh(>cGs-VLb{iZeG%q9hS=3U@n|xtPHXT)z*@B2cLTznlxz zBN*}l&T>U?+F`;SFX;T!{-)Jn{BR@T4ZpE;JNz4GfxbD$s-IJc*b49^X?XcTBu-Tb)J(i^bSZyLZTtrA6bfpD;c{HgPygXrR4`aP8yocRK6YwiN@%A6v{OiQKhF zB;zRiN2`!fS4lI;G^z9QK{=jmmtSyp>XRnzq19~jAB!^=weDdud3S+LovL72Y-GIn z&r9)ULv$JID{-@#+7x@XuN|{)S1eZ8M3EN*kY!ADB{ffV-2f+NKX0I#jD0SD{b8Rc z?t#Ld?zQ?Ze;ukfj}uSYdejl??M%Bp{_F5>lcTR!#_|5M!Y_LC;?8#^r8R^c+MpM7 zz;VX{M&5N__npxIGn!EjN(Spvsdn|zE^qj{7QypZbbs?JrJw=A$5>NwloA0>zN8IV zXyE!#Vb(Qe{K>s_Vq7?&-f%BiF;}4H#nEJsMd!@ZNFR?g-V{rZ!TO8+vMZcIxJ>_* zLpb@ccP{v>Xo?yE6VlHXtUW5^;R%{Dlz|QD^wD#monZET?WtQsROPxWyzf!xEPX@r zV6BO)zcMG4Q;D#WOWABu5}|DD6u~d|Zx&VU-dh>J3qUKQSL{y?R>h{XT9|ibSSXFY z_lUZh9PfWp?hiom5Tz?e79wB$R<_Lg`d!)~o~=>(ha|+hoA#r#`;Nypm_$4?hCn58 zY`u74fkRXU@|y=A*6b==v0lu3(hQJIclxaL@om(#>JXoz%g-k*vQha=Hqr?%L zn3r}L_cQLKE2x``HkGjK-?tn&R4j2Ot%TKwWrXoLLs<>^wYVRqRt%PaIhuZ^T?!9p zZ?f!dm!2%kmjquoz&N+yWDv*IMRo#3DzFt_kVXP93#&}L3k&@?O`J54)+HpJMv2yLd-+t`Uw(I&HfR5+X5Hi zZzH%`AE>bZ1h`b1Z$81DVG9at;4U@l&X#?EVGaMdu;`L`7~_1by1wc0;o?|$!A>4a zMLY86@|FhV)yvj@JtfJ2-z-(m-ABJCKb4qBx4ydw!&sYn+PTYH)-UMp66pSN;8FZp zPH@vQ_RpOTg{&glr&GmbA3vB^DE1kd-7QwpzH_tH@O=|<1WjCMB~Sx*5*Z0Vg65AR zPys*itZh5S*$}xOUkn0{GZGzYR!W-~3~FR8=uZV(m?$>nb-dh4fw%E&+sWRf!O$hE zZ2y5Ap>d4cvsb?|*!`_n<7BU6uJ6{)8CgSaQ4w#eEDIK>_?Ji29bAUhGyy)!A#n6p zdZs>gj=(~a1IiHRcbMIO(eGACA?VNlfqnxX&BsyQ03ypAXm3+Cn&p)n>wDJ9MxSp|4BYI?zYytyMdtaV7lPcMwaUMmMWuiK>yL4Otwn#Xx5 zfq58op_O5w1VAfQ3yBM1_Ekm|x6Nc_Ib}&dyiQWK+(A5*slWC;fo0FCVDe%+_>cRt z?j;mD@*fg`F#bL&*y49%J%g#UW{x(rTrHLSzDMlqjffi8SM%->sA2X2&mWQ9lE#9w755&S~!hy&Rjj zA#>ln&X+iGAMYsJjxvlJoF3}tf@!~_zrW#-ywi~XvEUUa_Y|ED(S|PqJ#h-vYGGv= zKZnJ%!F~irYBy$%iq72(-16#->PKTv22iS0izhQ~7A=#-U)GneOE~;Q_}?Wgc}GP` zYyujOz=Xese$#ll3S{ZxeIhsAAzc*FJ)bViw3o5KR$TgprIeODYpD1sHM?f$gdGj@8upo{x1{cw*H zsP)PLac5c@aS`aDbW754ZABsZ zQ@6Ziwb5Dash=s6V^HHEk42c7C-Jzfv6Zz zyg3vn7O)RHYJ7f#{a7@G=eu%_yK~gCZ7nO)HD0vq^PDdrExG-L$;JVZ?`ElpkR=k% z3VRK7)qyf*o{WHyBe&u7SI>e=PV_(BXm6;jQH zcZ{)$k_vqHW6?Po`UK6tTpbdM*|$R-d0xE3!H!ZCf%OooaVNsd&#;JQF!=IC>L0k$ zUEcwv>gQu2g7-Go^&5SBpqRy7t<%C)B% z7h`v(KI&?Kwr_%R-6q=Zts%JjR_u+Id`)q_U82#JLnO)Zvu?#(+r*@Los03QcD^1) zd*-fluTy_=ICjN@Bwo)e2iG81`a;DbB8jl<|GGC)4l2jrSc?3)il9uz!*6K%*u9=({siBqgcWD z<{Ot79TDpI(|&ZA)wa=Bw?rxRr)`qFY8`3FWcFCK%S&%$F^MJD1XH(blF9+{gykpn z>+n5PnG)jc=l*uYzZTkdF#3IQDix8ca}wASb(&Ag#4vA0RI~a?OVGKLHC#zv>C9eS&Y7{N)r%d_{?n|WCWH6E+|VRaQA zaG6^??>e#Vpht$%L`b`A?@vBOO`{`4@n1&rX43k~^+u(p(s!VbX$ZV!-5x|1)Y%(s z8+1_%ktqzCnl&XWnA3{)AuHLNZoK*>b2pHnbzG)+4Du4lbBsw(U| zCiUv+Q!FUy7lv~mmX#q!8E? z9T?Bq&m#J!es&j>JQKiEe>R%-JB;?7mvFgiZ~G7QihZoUJFb{&{7-edH}h+G5WO}h&WI0v_;>%KN+2)BGv zqaUTPW3x2wkX5n(CqBjw_k^9wbx#HhE8*7*$xiC^-vi|IclgJihku5JIo(+;$`40a zxm<#LW>P~v3M*5g5jXiEKvPP_0(5T+xJmt+A9$7P+c}2NqsmO2wHrJ(gDFcdYgEBQ z==w=_P1?P|2BO7AmpDQp*u-w4EaghF1z2oX6*^)e`EYJz{cw0DUSb|7q2vHqKoeuu`|ZZ?p+7@l%PDS z*#B;h8_cw{qYAooA6Q+nN=+>rNS09=F@6=yb`)ANyL>e?T=#d=zkNdeiGR~)`%X=ed@+YQ5qCGQfwtl)b?+5nsFrsNGmX*dsKfd6JX_imHmk)nuCEDC= zsQat30;YdvWB@S>!1f4Tk@uT+v{)8!?BP4*{Qik?&E3p{6H{5K0YjIs{+8t6=VM=@ zr*;1%djEa#m*VeGdcmp{d ztaYXhg@*3Ezm_KqUyTr&k1W?h8@|BGtbefwBnEr?Nqw|x%&7Jj`ofigc=eEk<>CBX z)7jZa2O+0yAg1H$eCMT|*torwvFYyMqMc3A#R)_Wwbng0a;H{*I+xqX(poawaNgHf z)W{oG?Ploq-1br2ABJVHRxZrIoXF{9KEWHIbY4uy+TgyWB+@Py-}-%wj}ldY{26rP zmVX>uw&7Jo(g`87T(T3W71q>(Jm(9wah2!_Mz%6^HP^G%FF_OLF^2|?$xfd`q&J;x zm+EsLIjf{72r?enSIM+f%er!$hA)9&#qZho_t9$n>-2lCjzzk!hW|-vdak)uKH(al zwiw;UD}2{fc*$L)fp}$3J3D^E2gAl;m4t zYzs+%rV||=SP3X&vQx+*SLoT#FoddVP#)8NAcwqIuFFaF?h8-4pIrc|fc+sy?~7mZV?~aW5Y(sbt{JOrVdbHHZAiQAWry&faFRElf<%hT+Rjwb z!6d9TE1~kuN@jiGc7AyYPdEin$TEb>OT;6jqr2q12-INdPp5nVkW|*AX|jnq{E$ZV z&Fj<&nt*rZaX)_NVK4a}Q}bo4XEiH_#J{=Fpvmd<2p;q~4}V`c@rSRkw)>VSuX2~8zTHk)E7+c7=69H=XK&L`7F>e7W3KTBN>(H$uu zBP;O>q}I6`{;7cp`M&T`s^TU0YpFA}FG0A0C%z!Coy%n@;Sl|A6py^Ryzo1*bfPeKJTY)-qjhq>b^?I2fCqtku&cVc|DkU{R<_jsAzU zXL1RswZn@w{yrvK`Yl&yyrXQW=SZU|)H6pEqxd#Sy3^S&=p8>9++{Vs1_4QstFWiF zYLxC~D$h9Yd84L?==@h*h-W_f+zFxGn|*u)`%nom-0Jj_iIHQeyEYfrw9ELW$mJ~W zjU;bd-=5ZG3BF$O`;rDwEo2xwVIYsPjYxWjeZg%vGa*3k;1|jL&_~Z9hm}aVJuXOW zl^X=w?Mdd-nYrJT}=TTHW5rimXb!{*`*; zE9JWF3c=)~)hOY|ujjJ8!fcP91He<8&~0sX4PkGT>(XSK8tpIfU-^uS)w?!N7ZIe6 z4j}LNyyBPr|D5kVepF;USqBj&-{ea#<9SoDnU3a1?H=lL&=nZcz$oNynFIrb0Aa_R z*0>sUG;N;?V@+h|29-~&A~H+{O4&vxmH6_!pd!vULx2@Y%3n}An87KM-%0Shiv;ly ztrV`+4rTieU=q?7-W$FIIru;)!n5=c#^=C68zXsvkCXz*68UM*mq3S zeC(x+<;e~LxMXQXRlW#J=HrNML}_sNL?J|>N=jNDGTO1S_&hf{K;!?K(hO*u*=GOUZ8Z<|=98Rxvn0E|Fd4~l{I8^D8@nduM zn@H5+-g=+y>kq!bW}{!$YP)LS_Xqko&Us_oB|-V=BAmO6X9da|8^p?fS7Bzg3PU@T z*NB=`u;TV56}17Pjg6`kljJ?+O$eH_5#e~<2R29Yoz1f+ZFLf>V3Pm@Z(^^+u_5&G zc0|H!&fSxmjfC%4Fzbi?SFjg5@a1SJP_F&a&kno_lbk(;!?`G3~!$M+tw}=Mw(b1}~)HBw#)%t+_MKu4s z0!9}BkIWT6va2qCj(0fr-3kg_$sK({xX5JudWe6|OnRA6m&Enq!MPVDp^5$QI_hO(3QrWJ)TzEKv&+R>1sFKD1XGrvq zz_!sW!i0+mAC&P#ufN`YE}xmBH^@@_vpVBf!F`)z6`zLGw;M?F3SCO&w0{Y3|LP<4 z!^9WQdwuk=_a<`{WuvwJya)%2l&snQ>*D{I3GPFE$YsNgtOXkmg+A!*;HjB;wWrd#e`;twO?3zlU}o-`76cv9MaNw9Di?b&mOaUvptfT1Z^bXC#uHgQHo@m7Z*1Y zC5D1Lc%pa?E^fc+d6E735GBZQ~ltj)w9a{K)xkO)8r=;g#d<^ zPEXY`9(znxUoybBO+M}-$ZQjIzc)1?j9xnTBe`M{EUS#X$Khu!x zl+0tS8_38mlv55GP&EU1nxx>HN>%;i3ZU`k?f^J`2<<(*Um4EXV)8%`#^lnHGu<<0 zz4ST4^$$uF=tNf&C`|VJbG5_r*ifTAJYK+ao}2wJ=TzkA12-eB2RqE|nID?DI+QC8 z$WE8JX8u{oxsdL4Qq2Az;6ArA%4{TUkzbYh$I{HR(f#4oriUk=d-vavY;P27Ci6sw zSf&#l0>^&R6%$zK^jB2#U5VkmOb^^22?mc-JoM2g#U#(-uO|8mG%tzHhrG|O2xgn{ z4bO@I^XZxF=9jesGGlz$E3^L64=Ln*W6Ce?OS=@zIi}Mna=!H@$T~d4XFEm#-0goO zg6Xffd{<|I+LAq$rl;U6kdU{Gtv>x3;~1_9zMLE@Y!auF+0I?XX1=lXAIc8*QTEoL zV*C^ZSSx;6ns&F>oe0q^e*T8*BGoU;5y;DtJE<109{QHZiMGm>yS+nk-b6!QKVT0V;3pHo>91FR6znNb z)eR2#Ihd@^W8t4LSko1Eu&D4>rlpG2sVRvVcgnwT+v3&0(2IXp6H;UebC-f5d~kzX zN=qR>FLiv3$X>>Fn;_N-AEwkf{A$@gRkHgm=**^BL^Ou{`aSZTuI3@^R58X;5!Ro>kHd1WOT^$dv@D-XKW$tOR^} z@4E4nS_(;$WV-fff^skWk+?CE>5oXBP)JvH+*Yv_cIqmzFH=*eD-ukycv4cj&?Kmc z;T0uH?0#jW$qP>xeRZAh9DgGUzIPgRMvavHI~4x`&6fkkYEVxnP@Hc+N#-=j0CVmr zPxR?Q?!H&z0W>*ZR5vy0+9lr*OBZ3Yu?6S(I^7j~Y3CLk6uOTm_QB$pD9!C%4Mv*N z6U<_}+ljc=x>cE{^DnA822tt^8Rh@6teV4zBhbEU^M>J1F%sJsJ!|2NYOuEXO}B5jax$^zBP!64WTPTs;tk0av#_$@!*6 zB)f5|zatiM#21;~?MZvMh(Fc?4{G9NpcX{l*(p*c#1SVaN=Q!N*qTNjfTZJ{Orb-Q zQHITbPHL2}+@(YcY-sGRR@ClfCUwfnYHwm!YM7O8LhyO7KcCer%|SqkJerl7omh}x z8-ZFm=RHjvOu~`~s$aXPg}b>UFvmaHN3G~wA5vd%;!N zl65kK<7oa^H*pWs^fCHpZVX{V!Qy0&Vk+^y0T18c)OvGK3iyBi9t=nS@VC|3YL51X zRdIr=e)Ye()(9?AbKywOac!3t=?9r~XiARlh(ilL#Q~Ye>i>ie25y2gzvRk41Z#Ln z!qWp5kV?;M%c+SaWl(2|7+^ZCW&R-1SO$2coXdtQQ8#)E7M!2=-im$}D8h3}U}5=c zBBY_S2_p9XSN5y4^YW9f;s&8)vEJxt*hZ0rq^y(hT>Qb^6l1dkz|S)FbXW7C%(qc; z3gb?JB(>&U<4D67DXwpJkyvK3iaI=GdRjJN*PwkZ^Y7iRkB{Ntlwqy*X8b zRNUatm(^l)@apj%OYuG2*NAh9lI)7VSJk;}UJcEt_WpQ7;>vVt#k_WC|32AjDW8t z?qAwcTFx17c}T7DiiU!pJtiN=f;k7GYw#_z#i_5q#V0Cuzm`>3X1!%1@2>tvU*EOf zx^ku>zvPK|iKtp;&50%8mbSi$>irLdyYsc%?IyU_Kd7%H|Jm{zg|6uWdWQ^4lWrF` z@kbzC8^$*}YwgJsI(MCxR;?s96Kt=N7fX18X0a$dgAeV4wV$omhGD(xEaMMeO@^_D zylHWxb$;6VOr#~oIkQV~^U+6EC+!K6uN-vF0=M5@!nK(CHfT{EovHmNzX2cV)%baL zn(AJj85Y4Iqhd>IssU?`4;2WZI{k+~YPafKWXDG`My5I^J>UJhu}D0*B}1<6C=w>w zhF#2b>3o~6h`1X{sK1-6g8g?_Ly)wL1ClOpwQ0`zINt9zTyj&_o9g$gd%5~(_Zu;F zi{~E_+LH6SkBich<^=y@BarsSnJ*obEMF$0)ZjPv8s<8PX8#Ae+L5E=T&$_3*zH7B%ZX&t;b@XGUID^U1m5TEb<#8gNLHJoyNeQQZs?STsM15SBeg z`gmSQ33aE~Y+>_Haso!_Y7D^L7>XY*?Q@FDAo+7ZQd1uNFG?WcgpR56g>tQpjgY4E z5mE7OUpi2qn%>`U_C93d(!j9CBWQtGjK6BUJo}-<5w-`?4GF%2rD~}3zvefMXT-e@ z*H@V$83lW!f6s5y7A}0tIAFJm-PZpbZoo#m;S_Lz;gvFaT;T~36^TILA=ciH`}MSi z0sz2(tNL~%#pr=P;a1|%$LC*(dnNmJkh9oA8LKQ&YCz7FRGRQRp z)^9Iu=?L(fAl7~kh$PY)Bp;PdRx1+^Gc~w<`S`651v*nJ`MN6>5`p%+_#s_YS*sQA zoOcR5LhBL8G%@I;dLCvu?T$ST=w7xlZkiGrYtt0qN+15!=P9*eajCE3bF3QsS4p}t zBz0%|w^)}`LiTyJfIei6>86&N^%{y7R_Djy)JJL{yqDJ^*N|PvJa#Rx)_3SMyF%*o zKXYdt!#20xUi*n6mos>_<1lIncTs6u5_iJ0E*(nL&A98h=k<7^ zOc-)BK%8g~KJ>{8a$o)i)|d51qS=z9gf$%l$oc{;*X+an;2&4iroJS^u8rKrn$38& z#eUtp$T*h$qhBlM99kOhU!I}ZNs6{yHZHVbw3aQ?9yDbHP4)KTOnPMxDF(G8sl!YgH&;o@3)BiI9GeT#qn)_*0f;yif$JTlsw zD?Q`dD>-BS>u$qqm=TzRhxtZgEfFVx3infkT?8Ma_7RvTf_kyN{Cot#om_$TRP9{s z;HL?R=5<3ymR}b4zPgvz?%(fI!mJ81u&Qr~56W~33h)J?8=?L)$tM zL`EDzDf%K7?XrX#6N`?jKiTGst_+_#GHjoPluvhGKZAd1ejs#V@7Li_{$&Xu0Z_d` z_{N`K*9mw~f+LK~={A3ZE5qId(8r_6NpjQHQUX{pCF*s!*>D(R6f+_Nw3tY08?HD58G%%R8+s|G$Vq+r>gqvwI;x+wJ{ zc`%25#V-y~g1ov$p<6`#(i9}6XX|YN7axz{b9gP6^&zL5l>+uu-)Qs`4iNK}fg=GY zxA(}MyU{Uz{kOUh?r9(#!Iklq%cEH@dL+;HFER!6PXL$(I?J7^zK9J@=%R!~ZRsDI z77F4Lp+-|tjv`qJ6ky)VixQ5{KIBTHZn@hzBwhO(0fz?U2l`hNyd@Ys#VtR&TvW0FDxxAmKd(u7X` zw^a%=r?mp#H)J5!UvJO42s*lU>7Z#k7RuN#ta#3U_YXrDifuzf1>Kz9jsfEwevvYzOgee}C=ysqtDeR`Xh=IVwv6@-W8I(6jT zQ|pFDD;cammeZuu1u!m3|Mn*`DNO4^I&a1_h_FSGt`FOlpkSgt5h(r&ZaiaN))huh0{t6J44YRYlf8W~Y7+_h6wWv&E?&FAd>XR4;dnVIB-(3ss}l<` zNTnLoA>w}EG zaVRg((jxZHgL(i!sC765P{yIx;R|^{Cq-hq2JME%GHSX6lp#yqOkHQ$GG=X9dEJNk zp7h*rb{}s_Vi;tJNcszxtpwskR~zJj_zfN09~y1%#=)bGZ%uqrY)h77c}HHT9@*nW zJd5;hcP1>J_WWy1o5ahPcQU>boPyl9T-3*Mt%M#PklybG)m}PnxXT?CPmiQm(AV4P z@*RKzNEFWi^vkXzm@H#+ZVSpBS4cT(n%6E`;>W4jHEZLeBj1o1YU7UoX_NNqS&s6z zHak96aid~r(9`&=cxTavvrwor$HD^Sv&AAzmw+IM-qZr_~GVqtNxEo6i)@uV?q1-2o`A_qpc|hqSt(d*LMiBkRuD|BFRMsf9(SV zd@)YDlDXV{#xsRuPc;j)FSLPtb3z&#E;5ka-%LHKYIETaaHQD?Lw52vc?2oE`Vo1f z3+6#AL44X7m%L6LCVSM#1Qq5$OBcIHCerfcN(viFUgRuI-lt%>)8(YZYd!5U)n$_UdE#t+pDk7Y`P+K^ zw+*_6^m`Cswa0S#nKl;i7D0Gdy@xE!Qn2SX6M(5kbv6&UbQmFTA1pRrl-*RGhst28 z_vI>Za0G#G+@laaStYFf>;dl``B{)IlV^Nr=F3yQ+8%%AlpG+M4!yZL4>mutXf4JC zqZ@mmXmWjL$4_g-`rO*KO7hd!Ruh@@Ov{Pv*DI}e!hY*@`_uWRUj4ffm%VQ^ zIT!V>osd`jIH`8?BQ`16@wjk^iiBKP*BfyYWI4faCx16)&Zc1J{fJWf0L?EARpu?_ ze}L1Y={gAiYw%gE{u$mSq@h!#CYO@4Ph7Z(QDD%iK}~Zf6sO)nI6L{FeMep(L1ruw zwUBm;r;Jlc#o+9pEUPHD3#s=aST-i-gGeN~<{4g$a*0Sy*KzV{Vj}~A5)?QdA(p4j zFrnCpf0tje?M*i(-H8tJf~12;Nd=uUl)#e!@8>VrjV}B&3W1#Xo+51)e+^8brv+yd;5?w5y6Irzq!oEd$==kRcicimr{5 zx>mC?ClZNHR$8mN11XjRAssz?^9|GQ^ZGQOh(^VFgdXuV3mv1Njv6LbGB0?1H3u^@e3)U5h{puy}UCnt4jUlb%fpHw9>At)Z>4-iwi90f~NlvxQ|`%mCa zvC!Xn#_815fm~X3CkRa;%TX@ae#x=LP?sGmt`Gx_CeTi*s;qxMoAii9<{e8W%v`L! z)W^rxKSdZat;&ORu<^5W3et#I1FDZMO?N9>{I;9-Uwx;8$QOuDy)u$AjfR9(Du+y# z4X;sL>+5EkY^JAqoSo`UbnS|le;G{B#C|$~E_^!p*Oj>%*&&=&CRjgS-i4ouaS=Ne#uGTSMqd5X1;&UM z^L|hIaA)++Y^77^xTHU0w7;XEz&l%-nvi#QIvym{T>3*k9KSo7`xdO%YZhI?-8_*} z#!|m4Dip@Pp5dJOcZn#mII3^rVl1-aiFk5r*+b(>(#m^F+eKf$Z_kTtzE%X6ANPhZ z1GRt%|9H0O=3CpnYaYBpechC*{V^Qr&view(6*vt$!2j$OX^L@YrR|Ef$!!%e*bN< z*3T~%NazPNg|rI2dDL~hc27Lyo7_laHtg(mv2Ejo{rLz7qrVS3bM;<8t#NQ5-|WFY zP~*u4j_G^&X_s6l0%a~7jy_zG)%Pejv8CWLj!PPv@3}}zRBZ&~IM+#4TqKpg73y-s zv~leA^p^689D_V_Z(Cw#{ro?drd<8vfy3i+gkK%XjSSYcTz^t3rulcP+rjH_x%VMc zqP>5SS=OZ{TyP%_)K=O$`$&F@wtGjOU5&E=%9bBOlwSVWp5^)wcRJwdMy$!L*I!;d z*hkH3PMR7HW}P^+?1|LOb3|@mn$^CUJr>Q`=b!-~7XI(04Wgti|8^9SGjK(~EoA+e zx!T%xGRr45xM6wm^3RqdD@ReoLpA|?<(_UOQRC(}=iG=^e^mYL0v<4!S85^683^ZA z1QZ8rR`F~mH0)g-;^;v^6xMH7M=`n@FF=m^M5qm)mWnUz(18; zP!9!c$KK#YFxNNo%lajO6LD8!ir+Ht}L5@4>T8GUmotr}VS#!HoL)-FGyVK*xz)&3RGkZ1!{^kh;vk zuBDwvE2B`b8%PjP%DwH%4yviDCJPjdx=g-&S2gIp`6b@zpuekJ3@|)aeiaBw7YsL= zSHeq!%BX9e&r>g^0}VK&lJE5_BIyNlCmPdkrR!ZoDsG-^#U}-5G*wGARY-Vv9G&fp z$(G9aoZs>VV^8yG1XP`}opM+6?9NzdA~iT;sFDJA#ESGdzSFO8E;b5bcHXQE$~R0X zi}H70nY^-1Kb6!K;5j|}mk-KIWr8Ro3kYVSK$gY}OzzZ}<@=?4^hN-3BKrfWa8VzW z(>8(-R!209x5qiI9e%N=l|y~#kZ75zJllsEr3{{6^x?s7A9Oi{h3r|#`rske?OS$KTw1DMUqc=uAw6d{%`U{hS|%L(`=vrS$GM&f5Mo&kBGw_JyR zi2DN28JIuQ6CM@=kUc;TuytN%@9_wN^(=HJ;!~>s;5b7&$wg9RkRDP7oAbUoosNyC z!2SoA?#*#9Ld1HgE?yVK@gHc*&*sp6Bjh=tmO)IIfU~ z96f~2v#nooyAXu%+K*B+05r9JPu(~dihm#EYwsBQMd{@pAdh5I` zW>`I$PjVASHCN^=w@|{VUxBnUTW&akkgGF=@Zn0XEy9+rPyoA1xwp{e&giw3p}SU} zg6ZQVo9*5iedmk?IiXg8g$j0ZbK7iBNdEtkbk$)^zVCN3U=o57N{4`yA}K>!q=g}k z(jlVMK&3ZAKw)$#jtwP7NP~cMjs_V5(lA1#M)$tI_jCRJ*~LF&+w;Ef^PKzK=RU`- z5`S^s{CS+X$&bql#63-TkxDVg%LG#UL6>`a1NFY@^cFh`^8#K)%i<@T5UIL=0$5WiP5kwW zX<1l;od~-l=qCh7un!l064h@MD{}Di;kMA_CP`?h%x4wNeZ6pesDhloAxp2QLt%Rj zTc4h&d}+x&yTO095WE-1Uwb!`63jT7$X2MgG3O>Y-|_n&A{f!DS`{7kKaeNy6mK_{ z?(G*#iALt^lS21KrM*|o8Uf-e`MXw)Wml(qjz$bqSj|+MspZT>yX$H?z*~SnK?n zMfA+mS?R9d@0gVVx4;Bu&=ZSZn;}cQEWzN*Fcne62$Hd8x|sLoe*qqvA*^k%ib~G6 z`UM=yWnBN>(x6p{(eX0z3_F#`g)c3n=VgCl7mhc-=K0G57V$t>sX(XQ4PNUp&l(#I zqY=+TwlaNy9Pj=6gO=duG8Y>jwOvB5(5?|P9@A5WItb>lR@s}II( zbfNWfo(Ri%ClVKg9d7r*8rjL-{T7AcZ!h6e;+oyseEG;k*S+bi%xqWuoiZ@(bcMut z1Ur!f9VRR zX(VU^!+MEov>|evzZ9(AwLqos==x{H-u3Prugr_${h zK%9Zc|3H5j?*AD+vfPDzcze&!Xg42Q@XD>k|AmbTWqD2flD3z$Ni|lsWAAVQ^ZP*c z;`&5aAS4tJpdo1x!zz4Zf_BWq@S@iw;UkZ}=hl;h-tqb3A8%W(imn#@Xm4e1bBIzY zb@=%8^6UF=%k}j$B_ST0|Fi)9n(E^XzKmt&m z)G>e--A>&}u)@=z@fYFg-sk3$l28ED6=muK-)zG>kwab%#5dT~4YS*73f#riRH1_#J8q&8pT9@F2m*yLyqw!}rBa9F{|8ULe5%)k;@NJ8t)jL)a8QI#di;s;PUkgI1tGQNi4BGku9d1DC;&rFYsqw+ zr2Z!-Ku?R3aFXRxG&f9X%Uhn%DYP{Majh02J_15i^Q59c&INp|f;Se|>H=ljzdxQJ zWu~2L%Ki(Sqt%=B^2~FDW;|RRD9Urcrb$IZD4KpRxKvm>D5B79$b4j6$SUH~H|nMg zTWeVu4g(TJr-{7p8?WthT0GQ>EcU8Z_VZ$9IwWFLU1}Kwix;?Fuzx;W#%kchs4v2dOX~hzcNz!j zL}|&;HNhCb@j||x8@2!gdP%Go+!I?dbN&{c8B0o7nIjPT?b<5gUFDLHHjC_``LF0YUKNt=0aZ!u=1) zv}s~Jb})TSdTBLaAyZed8uR)~h&ok=0c9+FKeYEK%KS=)MZf9|RgVO=jTVyO|3FvC zy*!M&0b`y@`u^_00S-r`p2H*BIxhn_Tqd}*~KY4b*^ zh?8tTE?vr#>6l5xj-#`oB(Go$_wVgY@vOTBOm$UR$ZD4FNuGO?tk%*z5WQol$=$E5 zcG`^+Pvi3pF#@ukX1=TXnfZ+1XK%a*_d_ykn!Zo`L^q-Clt4JiRtr1Pr}5&E=}h?+ zVzD%0ldkuj28x2jMvYQl-Rlj465M*Vj#p8p)>b*^LjG&S;Y~mXPE}rcnL(DnKs4_% zWZ+KXk@Wu_wxHy`e0_0L`|g{5(BE&F3%N2nB~1BN6>F8(Iw!UoBfq_?^(O^THH6c! zsT^LcA|{cQwuR@k!Hy)mm}XY;C=biYn#_lq$+uo~3O{4I_xe`o-(~1??fs1U!zn6} zms-v?oq4%9g7iTC@wFwbekt9UuO99wkfYmrNEi2r@#KlSNnf3Rhu#+skN)VpfSU#D zDfy<=3!Be|X^^Xra!r#H#H@b>S9(s}Sz0fXZA=*RAyW_Up;t?|9$1-YiIt;9BiRe~ z+v*0Zza@K0B-@MyfBpyhCi#T*u-`cH8k!lk-@vU$hUr(89Las)zBtF1W9&pf9`>*8i}u#=s#%~ITX)gHi8|*Don9Q8=8F(1bm8D_G?>ysE&mLaQYYpT1cA{QKiDrO~wZkO2@I z4Yn6xKU>Z4W^datxMFz4_Mnj&I6Le+eP*?x>pIjkqDtf(QVx61QMVhyduyp#@+Ix{KKQ!y$MMwhshj&S z;a-%F@@{$HMRzGWAV2X-rLx&YQ~xq9gRQ+gta&M(B7y({iCS0mvqg<7aKc(v#*?gc zI~3(acc@n*bN{;X&@H_I$T4bUjne?dR`->V1WU`6@Hdv=rH$bL7EXl=?X>@Y$3=L@V){p zP>XCO@W?(9qtNn=^4dRGH!v5QZ0imf_Cf{*@u#d9!zYiRo63K$#Cz8poAOYr4zC-~ z%2P2hK}>pw%N7l9=qA;{F@8ujQGG3jRa=O|r6x$W8^pvQ>CXdrwwRb3eEJ!QT;p~W zVyDw|*P-!!;uB*k0Us{E_*#kFRj)5ntc;su$S24M7c{F$OZl)N>y~qrd9gkDyPFYpa=i{ zZrz#^gW|3&@`y(t7`Pb&Z0dkLA!+%4pkiy_Zc{UDAz7>ToO-n(E9N)Tv7C+1NjFvh zbhLk5aLUIAlbNDg@s7ue?q7!9nDaO}gVTk-0dtq|Y2gcIPBKm7^Bd$2J4Wf5-5d;5 z3Hzl5Hd;m=fL#_YdGY#>x6-`Xg4qqm4b30cCA$Gpy8U~D?8}Tz{}`0RMZ?&JS|7Rs zo@@R=sVkNZSw}Dpuzn5cQ5zuIl+3^SuIs%ECpJrYzvwU@?J^E=Jma&sy4BX#B<7F*Kw!a@^Oy zw(Jx6O=h=brh9IfEue1my$`Jypi%?TTQmta^y@g;mxV*r@p+L@}mO0GP1Vy%LGYhIUsQRWhoFkk6TxR$t*0EuWbimh?sh{-B?BuBhzhO&MZ zhvV>Wk##9Dc$N0ef8zUN|&)eN-%8N4~8e@>~K~5Q>o8Rm5LM}R?}9+jyI(Y zP%!AiF{cpVfz{<#{QcQCUix{u0qUK)GRzg$$}*Z~ zT0t27cc`Z1JcL)2Vjob(BK4me%3~ zW;T*RTABq-k2+ipn~jn0c^3*X)Fs=jC)9J&`=(%mj5^)vQZDMxOi1;dQOK{r4o3H! zre#&IK!L6MKN91tP{T8%GgJq>jZqeL7O~t?HdF^_kn!wJP8iha74ID>TSJrtd-0>fKh54hSUKpc!cUd(yIC_`JI@VnMI;<8I5+{iwHIGxIj{ zC0mVrYj3EnELAK7wuf&_86ps9u3+u!j&L>=u|8q;Is&@bB)P#TcnnQM#9NPzMxOWe zZEx&*-s^VUBP%ZcODYr@EmqQGFRn4zA9>4{TWuJ8V2lgekxlX3n6NP!5U){(fC5K9 z9Xkp6UQ`|6#5#2>G|;<;M@4~mYO_z$dk0=Y{qNfXHcl6sqWQ9|qN7vE*fi03_T<*fsV9kxUpDX40=sXIUAgCnC~lxaKlzxWeYasH zT5qKK6VbJms;|o@`v(OIqXT+T-v4T>(`Bq^m94_lm5z zIavS|aQ5t7HNl3<8U1WlmhhX`@gn3h4C#!W((ewmh$la|xc2KX&Q-H2onZL8Bsa5r zNil%kvU~5D?u|5cdQ%-s?N$9MuP=9Yy2?)OMt5kOvjB8gEg<5KGKphu`zhTQ! zl_5{(L0R$1)lS-G5BmIru-E5Lp{)#32_uTg69v^HNK&U?GsOwco#wP^aatHMu|4NJ zuG-0Xa<2>!*AXMTnsK}HN512Y76GpA<3|&=q9^)J1^Sd!v8OsaGL@xE_?OULVfH&e zrN-P=lk<0fnb|=1w6lDD+T?}U_HBO|r((q_8^6>{tLzIAZKW3n1lm{FF#^|gYlUge zicDftbQPfOx;D8f?;3R9sc8P!zg|ei{z%xgJC~Ng7Um&_30_Slol1?_uqWjsqs(^g|cmtWv zDU{(i1=U~67`D=*uDeN&k0`oxxr#zH5&no7yIQZ#GL#)lFcPrk0)}$AcbSL_<~2q4 zFK#zePsp)Spup(URHLrlxu|d+e)>({J&DDD?e4=au@Ww$x6kE~c(pMICLL2BJYpzjdJ{_sW_V z*ULV$*T&k4ITSk--v#RP>>`n&$)dBOkL_Y}1iLvsY^Ft!?f*db!Ro+GE{W*mk-iz- zk;(Ys;*Q6~u&h6_^Zxy*4v)5!t$ui1gW&`B^Q{KI{`l8qF0*sNNGdY_g*HS9NMf7n zz>%+5O!+UQ@m-k-Bz%sk=s*+EJZZmFxO>(lT%!|upOc>PIz;Yvbj@&6X<^=ea6`(@ z%$1(?1&<;hY*s(x1?uXKm=n}uE7--{V4geDy^UC{YkdG}F+E_N^|MSTHew)$2+fM;=I=0sw z<5fV&CXaXv*0wf^u5+U1eKL#XpHDYfZfJ6?t$37TK}zx5)H%4(l&ojO&F))~=zOQvZW8vEOUXYr^lwjAPD|b*K>} zui**V9Hj!TwO!Rk17m?x6q=#wf1r#L)6X5g7&u;TBtYC$j;8fqn{d(wS(a&0dl+B# zieZ$t+I4+L$4LRCKpW)I!$RHjCPDZe8_1d0OK+8Y*WZcEv~$IFQuDgJ`T9|V(%buN zf(6GSmLYH(_82ub3T**QmU2LT*Xr{pmFc|@fjX6$(N}-r?^MjKI<8Oq3kU##O-VMN zuH~RGGACr#`dyRu^+g!x-1>u!SXX4V)jJ-Oi&n<3GcA$63~+gz{A7r#7XuopuNOT1y382A!{p*0*yHf^xnf!*H!2?JVAX>WF>SK z#S@f3Zh8{>pgyFP1hai;q7TBu+1l3i_bL?Y*Kp6`_Mifzxl}I{%dZIf9v-xAzy3Q6 zewnZU^k&!3=tTA%Yv+9d0GD32ucD>MTnji_cB|YtvG>Q~AG{-IHc$O(o_L$KP;~tf zwwRUo(d&8c{^nJm%>cpU(##vbez?kyB&qQ!!tsz$(aM3MN22ssyDd=26b%X%Ry7c@ zY!9zGo~&ucPBQ|>U=HhPtldI7dFs~Cc!8?9n#C}b*R6Aw#c2fpj!Pl6ipUa!*H%m{wvUM!K#yYv7?b2*cUW1sHp)59$KY!}bYsv|=%F63jG>*CQb=LqBdU=zq z?LWipo1#~e;@dm6&wR-t>xQFr4_FdxQ!#df{my&d+N2}mML;%H(*7R(;qrO>C*hLY zjj`4YjK|FNM^NgD^fJ0R??KPLuY#qj`)~zRrO5*w3$z5oMEV*NGz|$!xh}KNZ0~yZn{Oz<8Jle)LN0(z_%crpLa&o!Y51y5ODbfU+XN=00XXPjKiu{uElmoEBX!%EO4LqC2k zxK2>EV+h_2rC(+^>GlaI{m$?HVe&c-lCU4M4;81VoV(sn8n24u@Ea%Z$M~EtPhV6k zc`-#&!LjWMC zUKXV&aOq&K23mAbyZ`~c6yC1F7Nr~*-b8LrE&Y=E;R6T=uQ>dVIj}IJNj6jm%0kSm z2X6h9q{1;`%6|ZxhF@ntofH1JO_$Aw#fN&%Cq(nEC$0PdX!F5w&PzwyG6DAQa|>FA zSA?P%Ys6LHZ0nP2Wus}$hB`f5DVJ`&`=U;v-gJ_O*;o6PeInM^w+!1IOzV5V;y+WF z1(?Txz=)HjL~(Rq-Hf5(?_&W#NJCZZPWx3J9t!aS`iM}`G_^tE zFLocR1&GDALJBt2y4jW!ghBi*lkq8ubhb=_Lj3dc%s3(d9cE!<|bZSzAZ=z{yS zu58a1_sG8RVnqDmsQ(SWb+i^eaeg1{jeVtoZOCfb-I1Te&m6NK9tlzZ3cFCFe|u2I zcaqpLiLHT!2kRHsrnm&)n&O(;*50=5XS}#2=nKEz%XWH3f|Iy{&WqR)qFeg6)N6l< z>W+HCzUk#u{j?r$&Qu*xGP~@)C4yIGrAOQ}Xo-?SXVK_+eS2E8IjyUoe*8=sJ9SR< zyO4?AY}f@8tL4uOn${v4`H{$6q-P(~XwX4eWJ$x~NbMJI#}BtF8>+)hw;k+&!@P~6 z87SJ@vj9Ng?(b^VQ2u-Kkdgb}FAlu^{rAXJjn339`V_Z z$hJ--3#4QCM3@@4c;oA36`2`(%k3F2_9zSMu(#}?Kh#ZFT{~Cpn+CRAn+Ct$n&!q! z3>g?JZ2x$|xPT2;od)!iq-cL_gOMTJk)7~X)abY(Qn7!AY`b&+-QZv?bOxSD#$Ye_ zPnfAlV*?eV$fcZ!+~lcsAq%F1w5+8}g9pPSw|0E}rmkN&1L7uObJOZJV1x%?U5u6J z>tFV4^eVX?6plTRyvm0@+}IhUW&5h7ggpwfAV%j~P&lXB=P&t&2QKd)6y|@NZH=qU zNz~rUdzqy5=+^Eg+d4`faW5V@wk?1E{HOjFkK23y|Jk$rVB>Vr17O#ehf z-1lsTY_7QKPmhfa)y1H~3JPnij6%IX9sYy40NxT%J#>v~`AeHXrUQ`EZU0xuHVM^* zFJbBlfWvD3oG(|EGIfY(+$_lnV&^?_LA^h z1y$fZ&p;?-$3t}W?#cd!+QpyTgtxv}oSK&kFb zpFDMx2YIGqoY$}b>4i3q*mr2I7XyV(Y4;>qXR^&t}44B3MQ((TuH!*((;5e#A^v zpEL_SfKo=YAr0JrtPlSwo*Csp9laHDZBE2G(Ys^=g}PxcoZMu9qi#HHQXAe@WUh`Y zYsQ||>Q?jCZ1a}pzQ~XvQH^0fRLdX2ri3NscGS&G4nJWyDEiC^Pk>MC7s_6$9Gm~P zAC*0vRi4>k(J1Jb1fzE+fFyz3v~YYGVi?2q&^2;fN%LXa7n4s{KLBTuhz{4;&ATD3 ztn(BmS1ZGjv0P;C5NLt>!8A9cVYCim>|Rr@KI>1wf3k?$P$s1tkxS^M(!l8a3uxuR z)cj#}apIh_;1(JZkm6Suk_N_dAHVRi`l6Vi(o!M|g{=C9d&7<2Q=b0WWKj0cHyUj# z*I-VD;MUcpsi{okX6uD(10{9N8{>61x@4&&*y1Rl%u2KuY{ft@B2xH_ z7Cw{rE;QRWaTZaRCnZXy!6XSe$iA8tp(U@-wy=a@M=Yg;OR8TQTruafT&cp(0`wMQ zUdj)n7=|4WWotz#uz|6*Vv-uAEL?|cQ`qxvW1Jw?7N)wE@AjSbc_BTHa*J)JTk#Sw zUgJ%)Oi6}9cM=Q_nRP7aN9p%;#Q{E6uZABbF%k`csamHEZ52)&<5}7kVTv zTMkml0KD%k_r8Df3|q1e!rnL3PpnGtJ@4sSA{`Z^*}E;4^=KG}BWyh3Z(~4gODZ@0 zwD^o(fm$Dnm1HbEH~rVuZCyGv93<1dbT^i&^OQ$GtgBwkY-v%#!p&;OwQPCfjpl0? zH{5egCnwKD4a!%EZ5&iOBlJ{HM?lmdwrKl zd4W$+fatJTFi3WhO;by(*F7?o(xcV>aOhVzvP6qbCLr0j0XK1gD`@TMoo$7n{qp|O za8??6KTOysoDuGc-zY#;Myg5Di2tR!){o6#U!RvHy>k{=NYtHba${KOHbv1d1O}lO zjHD_R#B1LvD|7?K7lQ5}o5mDC6EJn}pP8u-wam`?)12UGb>SKCMU&X`zu*<9d->2U zFoD!{8#Exp2YLe>)o1IY#mP=!mAGM8-s9j-Zx$Xk5Ndxs7t$i^xw{qP6Cn(8ijOWu z(gL1V5h5mpYvcf&*7OLgH7^j?QovzBK_RNn$G)-u4)Asn=)W;>ke9+4&ayFrag(&j zSH>d&Tt)Rk;PIl0Q3vYm%s3rBA2Gr0_8_HUc?Q()Q2(D+qGt+Z&1H`QDAU|3Db57o zX#?uHu?m~cgEe4yXsjq;N{g{sm4jyf!}0M&0k{XD82sd<>FdRlUn(3IlDpKAxkAH{ zhaq0(zu%VdJl`w-6M~h(a~Rwb)WZv+wXc-AzT1D~L9)QV1_yo5|4Lo!@@;- zwT=j`IB=o=pJeI2)`;4#>vn^%&Jx~E1pDrfoCPei%^!n+#VswO{^#nG@U~`}EZ+W8 ziCk(rp5RnG>AM3= zy2c9iQhN%&wz~)$AcX_f|xA(Z&$d)325D4(*{v8N4_Ak z3#Yg`HDi?emf6wEmC7vGv~irTLIwL^ISF}U+hfXKia>{XuL&gIEp8HC1JyYUo=bZ zmNo@_zZ=NAEn%NY`Qqb@@{*=&0Ed?*tryW~!GPbcb_H0uTx1(mIveo^L-j$jZU622 z1v2HtK78pK_!+4-;n0rGKemKQ&yep6nariV#BlJO7(6J>i z*c|t1<5F z93L4cr&SxrnvJf!_Hdd1^87vl8_`lzyIFYt_|*glq}49LmTTAJ5Ow=`kF0)*_@ih= zeZahM%Ligo^WkrI6N7BW1FLv#o4N3o8!Zd-mw+e}46H%ytN(%M4!L#k_CF;Gy$YY- zbvxgf4p8X_-|XJW*kYy!N3;ks6=`p+Td)~5U0rSA>n~m^)~k8AB~PuNaMb8 zFJ!|r@UxaUx2=DSWucaF-czmJ*cdkgP8p3WS)J$_abmwC-omDYD^>nyw)g&dgezg= z)L7~*mA)9Ke20JYUg_=V5_Tk^AYUQM^W>~7dgeLRibFnoNq*?}lfdtw~~#H!o8`5JD!l6bK#C(?t`BTQI=cT< ztPw1WVo`S=G61S($#(W;Td6UV^aq0WB*s_N5b4rUC}d+$gq=P&soUG_gtiVIHg<&^ zkpDd_@Btj(1iBI2-?kj+YuMwh4pDY{n#E{xM$f26r*grCY`7@kwbwEIF*3hR<{d?E z+}?xyPkk&e|Qc45r9bPHM zU(lz%7vQz1?|%6t&YQ|*56rF|EXUDa#D%K@SbO3r)4=2q8xGU@nhi(#s<2iF~-ePHzgbCMGYYwFo70wyRX}EU0BAQ zO^?M{xI5KsHn(54VAwxBPu{dAME?fXTQ7q5*lvVP@UD%bT&V(&S+sqi9?=+}on=(u zB9tL~xlKGQwRLjvyON**RP=e~LpoeZ29K!C_M|qZ5~JgOFcT>KiYUw|neveS_*}Hf=S;(QerL$TzubwV{Udk0{qcdo z-sn1fO(Jhev5eWeTd7Gxl5dlKl!7{FS;T2mr%Y8Khs()RcatgvLTc$SScHN9xQ#xh z>T18tC-Lq86J-;X?rewDrJ0Q%zf>C>8fnp8301wZDGJ-(yQwv#C~y7d%xI-sN6jp> z_Zr5npw6SMM;OfMe;J5)JL;GgxX8x)vhU8zNJ!I-Zx6)TBH%!FFfC-MEMRsQkHresb)yi z3rQIbWp?e4RFxN=V3AUkocBL}b98`H`y()*0J}K?7`NvlUsIiVvi}EyjeeR0 z-2{W2eSsTL&)SRAqJB;3oCDG)H77HIPD-ZPAhWy|7ojSb(G$4ZD&V}vK8+FOmf^Se z9bPvXko_)#N@NM!lx8IA#)U?$6>P)=R)*iu2cxMswO4dSA4#enaJbDlyS`XxkZx<~4DQ7>1uQdmG)c0#Br>`w(nZo2M+|9u zhZO!2-_v;e7m>>9-MLT!7R(%=4sK=QO8Td)$dg*BG9$w`Y>jZ*ANC3~uB>!#T`Q}| zd$7kR#^589v@vM}W_E)xfa#s%TZnoriDnjrx8k8m*9OsOOe4aSq`-ZQys-^eceB)f zWbM4ISO=a4y6bvHmAr@1WwXQ$Lne_cU*aX90H>NI{pfla$JuhiGwS;(cZfC7%8G#3 z&?gYMB?n{3>;UgJ!6Epf! zU@l-XU-$Ygs2ODytC7wn3~04s512#Fr7y&H1D&%JmxN|>q-@pd2!ATjXKiN|%m7+) z0=~g>MC<&ImQ9VkVA#n%>BWZCuFOe;89|O)kCDZ0DoQ6$<$4Et1#CPJLgjSeR`|)HHW$RxHum z!$$56CIdEdDtx2^e5N*Ha-L+ob9Dola~O^|^t$TriSI;6jBz{da@sx1TmI6SV({uF zJ7xNkmGk=tM;&cq%&Bt!b^#WznwMl0BrQN7iw57Pb?(rGd@Lh^(VoznA*8MN688E@ zRW~n{672iMZNm3A^Wq&3w~o>|f&!jtNm}c2$|Q+!#)=Aq=rJHRR-NGBllQPe*PJly z>0T-jX7K9Fi|M3=yUY4*!a(H(ZTwA>mFz6dn*}C%X_Ul%3G>27H26$iWr;1{!&0t= z78*(Rn>Qm7{DpeCZura_t$pzK-|KVO8y{Qe+O@{WKuUX zooMeq?>c&BHu3%fsq%3tDAvXZ@A9rE*&`wNi&D>=1Ne41Ct3?$cMthUUQ|$~pFJhj zM`}GBS%Q?br9D|!9*#YTwfn#{`RAq2tMS58yXVjKoPSignZm?CL;81vJ_VNmqju|d z)jxPi8pD(GR{zl};!75tQPs`WQLBIE%_^vpA2ZYseh>(M7;`bo*0{=^r+sKSXB3Yi zAS?u@f-_qJ#hwKfr|+1$X+$cq;V|Z}KW_LMqIvHh`ReZVx%H}+1P#x{q-Ff9m$>|k zeN`~c3lUBkJ{fJ@;_r7*<+QlMxDt;kfAg4Q)?uS*=Xk!C%Pe%<_XR&Fx z!IG|T^xrz`q|z_vCA3&!C-3fWy)62!x=;jUSq2=${123HQDl*GnS;K^LvvB|Xg(H3 z(yk!qo;aDz;ktmF@IHT~XBn4`UulzIqdvB-Q|@5rsAZ=WTmXH6RS;2_b=Mxa!JH7| zfBKMZ<|+c{<2^5+yXpqBJGmpg<>L*zVD(yp9`BW%090z8FfglkKz09u0Z@nw^8S;L znm;gq3Q)ICfcJ>144FS71}-y5BfIkqV)PF9oDcE;1C=(Lo!mO7K#bbalJC82R^Iiy zVDZ8?ixS=xr>F0COn47Dv>2nBO)m5ImLzk_d-wXUWW{lAB_RrRnH5@9RvXIS?#iv# zK1<2pse=3MA3s;&{8)*Oz0=*EZuVJ&)1Z6X8)uHaTRI^g_foxy$_*E3B~AS@+8!zl5g^X^=c0DJjb zN?WdoOz3>1j?{XL7OlU-{KdX0*z!uE%*483jMp9Y+o0*^Uk`RNby-6x8KA347Czg; zlt=4oyb2fkq^T2q{IjSOdQN+{#&Z}F;{* z3vq6k+Ts1L$uV)6d04HL@r2cXQ^4UATlVD4LU4(A$1`hW!hV)o0r0bNN>;m*x&_`w zI~y}ka~tr-*`p07Ru**aj_4$dBv>+!&&yivX zr&j*J5$gF4*fm}@|M7lpDJ)-I`sM)z%jp*-zt*SQ?ak3xU!B43HDO408T2x%5qj^< z^PO9R)PFpeZ;qD$F)d>&J%>~z)dlJ%?szTQh3h&U$it0%v&qew9KsQ7wFy+jtNvr{6gi2iw1?ZGrovJ$v z#S>t^LA(lfnK+s#?I~fo2gh0ibpfdPWVWZ|5zgzmcG3;Z-O#m*qnz#Jj#^{ROz*yIkIwr|vX#l4FJ=x^ zaHA;}vRqC{z}!=_BH&YlLbXWYt-Y(ma(f+Hlaf^#^^o^NX*{9BW&3%3VCN}d8kb!q z8?Hu7s^&WIQojs(dYk1+Qk3tYL`R$Tnjrk~JBr%_3bu?~2-%bO-~Dg^=pdIyB@x(X z+Ct8-xD^;*MUN80RUWKiM2yEI^;N63UND61E9kD)o_||T@b|o*%dW~$)i`m*sF_>B z*~6@W>UTLx&`)4ABRJbx)$2<aK{Rqp5O0`>uL6-hy7*&lSbRZIeJ=Kml>K~1x`5jmA~DmNNcBG2+W^? zp!6R?oRjc)ZC38~BsM-@Y&W_^%thN!YU62qk!}yS%%m~9pcO-@H8tDxH1B8&7Rb}# z?R0Bq1c1fMM!#JjPH+G)Nr3Gdl()L+CZgeeFMY^~&G9PLduQim`Zem7>_k!qUCIO{ zAU2Og{MS@;M>-5oMG&L68VdZ67a?uMTXv5;D8>Z}@&+5FJ`WK3Z->c#Fr+Nf<#Hw} zP%;Wgc0O$-mY-x+K8;<=pLP=(NZs+nT{NuJ+Psw{)+PZg+w+KaDs_{gU!j zA0zfH#mnID-gKGrP9ByMz>a_fIehd2P(6izL=242Q#8t-L;WKXO_$%`LRJluFB{8_nxYqw6BEUV0bnX#bpiMf74ct+|01gpDc}9I2%Yj$*8h%3 z)_?c!4OA4GBIg?tA0xYWBM@M|bauj$mZr6oZTMYnIjsBjhE>bL#QmwCioN$Z zkMd6(yC3kQ|NK+q?dUDbB#R;N^CXjmI3#-w?-cWrD_~{^>kdrEG314mWSQuFoLiC% zSJ2az{+4*#Fy_iMp(KO_TjO`ZWvia7$}#Xb`tDLkN3<~D%Q)=gbw+~&0%GwOy}AqsIN1V8uIm4h8-JuGcB8Fx04z zNtRF-NT?QjK-pi7Bg+#kpzn&as}k-Qj+whv`!gCge!=aaoaduJRx0RZjjhRT@!99{Z^jcsLRDefwT@0MGv8s+W$Re=QsHrGZzH44*^05VgP} z3NbGsz5vJ_SYF{Ugaj~Euk$-t)I%bgyQaFB?{$z5rRS(%e7vQUV6l?|8eD}KHkQZ4-i&?RJ#)QiBd9(UOw?S)c zxvXZ!d{29nFCtWjyHezo!lQL#>PxaCmlJw5D_r_H+m7w}VK)A1<8?)b!@G~SddV7j zkDonKWAYqUpUUEB(LnESsWA;*am0!U`6!NxrJ`J-Z8529_TF`Ehul<$5UI34+i*2{6e!FHj%ENVNl}B1m2$i? zau`5lYB67!-!ax`_0>awU|}P$Gm46yk)6nM5F3UH_noM~Jz%j%C3ph#Oe$7?fRu&!XUXYm;~) zM_j$1z3Wusep1AsXdFw@H1|*BVPT~wWL1E>V$hPdteU~lw7J@nnbAeEU?!cK&hS={eI zhnfT>dM$XoB1TmD$`k{m*Zx#QEpJdc(SvWcZWmuxCKu+^HsyoM&RG^**qz?c$+^Hj z153K65E4bMO#evydQ~_dd4El>MZ$N2Tcx^lro#;o4)h#sJX$V&==hUS39cHcGCH(l z@-wQv`@Xc>s%5tN#goBMmCv^mMefydt=0JPf*K_N732Ie|gCSyz9HTSIjsO92LOuP+I5I~bvtxc5H4C!vn z0O@q<{Z8y`>__>@zsjJ3dzw1E7ptk%cYUOlD=rPRBZYr!(@pLloIT|%;Cx4nM|?WS zeX%b{pPe+45D0q6Wls;rn`AmiyrhB*7&0tQPXITjv3oy-RA|7URMMZ?q_j2O2a=3j z3;^IkfF$PS&zrP)*J8Yl-$^NKBq~c8ScaNl=;-(HizfU6LIJI(zJ%!(7Erff*(Yj4 zc*?~kU}XuF4-4Ci$K@kcV61n>*{+4*flrwjuAwD5K;A+4Zt@|3UJD1vHBEqfuUt7Q zkjwAPB<-Pt+8WLl@U>voWsnCaO&p$?Xk2gI?harlaxnmtJsO+udIUZGLN;_NHN=xc z_K^37(muI^e$Y8z0kXDDV>!gVr4mx%W5?N7Khh4YNR1HZvP8tx)u;LB(cqoO@6)^e z!3m`NaPW=v1=k(Ga_;eh>L=$mJR2{8>&&D)FXI(BVjl^NRQBcY{h(0U_EyW`_EV8* z1El~BmTU^GIP9xVHQ`!^t#0JQcbtkW3&)-27u z>oC95UP>Nh#*_WW?;khHmjnBp`GcNdq6d%tiVhJ?hz>o_J+S-A4as zTm|gB)Y7UD_H{$CT%Q?CM~RkMk}3KoHvn!_M^lbJ3Bxxgy!2wFx1+l^uHU@{Zh}$c zf@42y=b;!VqU}}r;AS5Hxno4{ploAwTpjlyf5vl*v0nZfnn@lVnFp#_bl?AZ7r2b& zfu~+5k>5Vmul9F{?^xN=F%@+31*ar55Dt#A>9Esh-+l-+f4V_9#^EMpTc$ZaaatJF z;fn_5C1fxczdaUET3YZ>O{pB}r~JxoD%jx?kd=X}MH|Gdp1qgWlL5XnsO4GeCCp?! zuqVaUn7q7pMu4REH&ILB6^8fnPE)Tm{9j4m9?tap|34-}qNE~bigKvrlngVH!;m@V zP~=n&iOrl(b4cFAm{VwmkaH5{lv83(nGnj^uq5X*v%as-@AvPnUDvjI*LA<1&&QEa z#1Gn}oxpHVC7SqT=JJaMvo5%kLz~z5^Fi!8tN6}v5kY-^bILnJsk3scgy4*0*5#A^ z0~!!=n&|TjJVI(NtcA_t5@{bld_9a{D&Fq<$|ez?(wI2#Bpr2myeig|rAwI5QlV2W zkS6Zz(|x%>M*o8+{>G?)GS&UwJRd~Ow?pTfs3^D3Oi+#;UL_e?5Kd=HTr+0--or0~ znb?Fp(u|aP{%!#KS2u{?Q@lLHh3z8B+~wLBP6N`1(GV0SeO`1M=n#v6yV)bY0j;D-NXoYfbZvF$^&}OL~@K0^i zywTRp|6Gpk0=-c3iN@(Jo~beVWwJggyUw!-o?BQdYMh6ysX9gR;9{9fca%pP?TCMn zMM`5VkqLM6GW}`=;iYl!J2=y}5pJ(p;P(%Q2%1y8ke!0@TA?K3D0?y6uljz@ox?#@ zDe==rDzHr{Iu3K8g8O7h8%7(fPh(vOP!^rpu!5ZrQB<>D-`p}Ha8Haou6~c1nutn+ zSzK4%c^AjADc`3s0n}ERN?&g1`p&B%ln33Eb{@JfrQc8Kk=8s2rxA1y7GEsa3Lprp zPlM+kML%k5R%!WuURG3|^Y6yMS4Mm&r&z6iySLc_gnY`o37TMywrv{uJ2-s=*1k}2 z<4a*%;4X&~_2v+X=SQ%8w`~Dy0JTsy?Whh6OFURiy)eD#q70`TD1s(dx}gmrO-ouz zaIp9;6whTFUv3`nbX@7KQG_ueH~AfoYA)I)T6P+c>`4n%5y#i2)cFE@Kf`BEYT{TP zlqO;G)=g=+-PS3ukT^m*0QY*Oh82ly7l!;e zvqiCQp`RPLGZufBzq+JXjxB{5P{IM?YZrBqsNc(*{gsz#1gL(fYG!)U#g0zNfe4Kq zvFWYyFNcKat)$OjhS)#Q4z|w~E`(^mF!WfrnseDpy#dWT~*k|D{3l_a^J~>pD;$*&PR) znPh-k*C~MX2x$}6)!LK$mluKOivJvYN$5XtJc)K-%(g6fGrpRq@YD9*+ZVb*U}th> z0e7Qpz;-Kb6Q&>gs83pz9*$K12g(K+-0aDB1B0?B!~a6?%qGRNwJh)zps;|G%a`pm zh9`g;bmcmbat#f|eFGuxU$M9$Dp4#j6YMhb#&)9g-NZHndGU_TEkV(}y}Yf~ej^Q_ z!V2PYYA*mh`cY9Ye?jaZXQ@LTd? zDT*E_kF6XBCeD~s3l4$r{ze`S1Nr86G-!Sb)LEut^lph*5nZoRFi9{5{N8I*8ZPdl zYEBz)`4l{0ay#Q4*hxTOX(X!=7oH35qPp`ZUgv8BGM&F)B=+XqDl3@O|1r|TUi!g<)k z(z3;eYNvpaH0Ltul=<^1mr@JxaLoPWnowq zJv-n?sYJM)_Iwr}E<&pPpDsr_NnYR##bk&<`>4HB?>G46x$teLfx$)vY|g7nk($W+ ze9xu=|Kcmy>`(J((4mrx4SX7B_cA;g$Fodr@v`^-K;<*eG*4{~tFw*DV4yjSPa-q2 z9<(0n#*Yk2Lumd5R{aNs;46WEJ{E762GfDYsxewdNViu{un0g~vO%KScIG6)i|ks= z@(?4=Qe0RXu=yt17{{Bx!^V3{f*wBO3I-B=owrOPj1=OdhWHsG=Ne4^o7i2VHX~eF zbY{Hs#YJjH?!%O@Q6yt?KCZLj09de7ic$jD@i4(}pzDU#FD{5;2`$Xh48uCMA}IU$ zyr?su=ytr|<*aN+U9%5{+*>9$zOD`Eh88512QbuI^y8o5Ud<3df+Zx@#saNTp$2aA z2X9Ori5a{`H8B+w>GUw=Z^~~61mCCzVTON*`R!Z}<_1*l$Y`nSe~ihz$WeY!)oD$x zpiSH;|AAF5OUyu&iH^oW(#oUNy~0cW&dnOo{X9^S`{^&pjiSJp;a|CV7Bf~)zJS2T zqjQs}FS^u^Pk;y8qolLLog?~rfmE}K7C|8$RT?8MzVgmz!h};%rQ&lw143}P)VM~q zNXSP=tsE1y4%^P|vSF)ap`O?vOQ6Xb!%2tFrjkvCO#L z_tnoxpO%KETGtHn*L9&kJiN1y6x_3Xr_zeig^wu?;OxY{=xOk?a7$PEH$~&neQdTm0i3IjQUK$EC26l9PLD=qcHH`B8Oc%&AW)#Q~kJZNc4P-=0hn zFZI6PKC$2ZH?rx}+?IeW`m+ilw1v0OcyQYQ&*Qoq2CEKfcW_V26XvtfxQ-o8N!hvO z4L{o}s++z8fibIX2NJKF%6k6A)%LLUfRL5Tt$9#Q<^On$6xNQV-dN^vFVzuRjE439 zpzMPqv8}8Gd!zQg?qrsQ7qSyWx8=lH!c~0N4o&Fx=MU%QM0avQ z8-JZ(SL@OeqonJ0iM&(979L%3r=lu0q4sewk4jkf5_jp#iO2xZ(V?l7YD!~)SG~^A z1fhr_b^flya~xGrS+KM-tkEctQg|}qt>KC&m>iUYMpuBZ>or?-d3$5nr-8!1i>Zz} zgDCp14F0ZylBMn|4TC5ZG2t$LdblM}vIR=5@|f*c0r14E6;U0lyWvR$CGQwuCJvJ zR5d8@4v>{XLY;sYfEiWpNGiUgB!Y{|;x1RmUR4QKS_~IQ>B~+XfJl6DIn0kn$sgWYd~qtd z(P6^(-tlL@GKlNqLiaZGkat1je3sYbD`EpSYI?nxIQ%I{)+{F|Q8h`Jo7c!)YF@ex z1v6?%I3Y2RJx{pxldMNSITV**)B(oA)#y21n^G??Nu1!7Nc8&h!c_RZ03dwqI17=x z+O{Ra(d)#XR04qmABH`azR~|Z8d8a95eb#*;tHvU98c|l^bBq)3nCikkL64P!a` z|H@<2@dH3lqQ8>;H6nr_M_n>R&FgT{DxFX*Cvp&hd(^=wO#exHpP6&HZhP0uqn*#t z{ou4h)FPwMqz<;?&-g=9`1{cQHZ1(mysE2W^99h2A`^IM>i^pY8=QPV`uk*lCah*% z!@f#*M|~#D!g?}R$O!&N$~pZG2?%6aSu!1eTtIwG^hP;qBBu&`#&iZTfvpJMCQJD$ zY1WM-BZciUjy9AvZ!gI?cu-)$S!z@LLFEl#^)9^LC(GIOS6V`|QI52?%}}rTec!?} zET#ou(ZYq3#iR8E)Lsa=VPpN)1SlSd!GxN-Uz50KfTSojnp@xG1JA7YWeNb7w4x z5V&?v`^$=Y(?0)_)eJO03zUMDJ@8P^VV0)zW5Umb>l)?gzpJz8{)1Wq1nQT)WLGD2 zRKV6v9mBYV3-iBu(AbV!BT{l{tLzo)M<4CaxEG`eJP4!r^P9@7(1u<*XmJ1t&>^W%PX%rNwjD8i<{G zx8o4Mmy0P_Szb1j&pS9x4s;JHecvk3`pwkF-SrdS*-N&AmlHE+PyMKDM{|XpAi&F%eXa8hVV=|NV-G$wJB#hoY5OgyBV>NAb$2h1Q{b--z}{u!E~bk4 za8Ad?$Fp2tli}@S z$>9HbqNlf^H1R^G=?0QM9PYSPQqD??(c{xB@9R7yM zIX;N0(IMP;=!oAu_~k?Gx&`CbvlOj;F17ER;$l3l zER-UNbKHEj{OX5w$@UfJskED(E zgvQL-X@)EGy9+fT)1ttF1}tF&DKb_xU76mpRod(*?~IH1%R75d;Zq`)}xPR^P`@Se(aGCYx)@6m!#g ziH#sXSoVEj^gitKfvN^yOvoN%u2A_|dvho=&skC5KPKRY)XZ_H z7nc5FU@+`BC_Nea6Vs>3RX*;`6H56g?~hXXq)AiHiN2E}R%}W^+!GpxM%V8y7Qizd zCGHyXDMQilLd%2HGyv}`UHDu>EBS+Vzp&8mqF(2D+?>jv-K*~WU!OnL=;mfJM7eaP zc?6pr%0=gA7x#pa_j8@Qcs9m-Z%JH=l=Q5hZ5%$M-#0F>3rx|h}PL6q$4vS^VyQH(ZXTYaYAAC=JjjTX`$QJF(`X ztWH(U&u{;A?`TvyDIXquYEp_{wO@?OG7>JfS#mi<&H~=GW>N%Ht&`!?9FYUyhDa^)3i&Pl)XGnrxSa4{S?6w0<*1i)wAbQcQi;W|H(>A)A$w1heOmFKxY#+n}d zod~IbDGSTo<-2;Vk;^GvaCaZayZ0^3me&pkSY#cw?gnp4TfK%gAyDG%RsuFc~s z4!Nw}A0@RCKZkZGe!bi^4I~t&eMyPx4E^)q?*+^Eb3Y&I`~>z7Y2Qn)g`MJ@-Qj}5 zA~icbtQc(TT!C|*^{|xak;t2LJuX>Cf1XxT@f@WeGd_F#yP>oQ;snsn8d2j*&1F6+ zye(j_EGSVS^~A1#SHm(z@8k*iDwaTFxU${`^X zkd1pL^cTof92}7p03M9CPkx4Bp(P=CLZup+_N_D`)lSObT_B=feHi4W$_a-QBdUyJ z1-bNf&CB)G_q7-G7-51o3`z25(r7!1r&M98fPkhtz@XDV>vq~vGSjzYJlru% z2k}9VaqU#(`!RER-L1bc7(>Qu`x^ejczm}b)3)_wSL}ZvCIprp2tIc*{_J>#cfZ<) zC*70o7WJI=qB0)`1|E zRm!!t4(cjXDrc*I+-UyhvyiWrR2fg7suG&@3SUU-MLaJOPYxTze`ylv*0$Od>%AG9 z^SJgUL;N4;CgZ9NoXrLHYhdF~*d96vvw@7Bc$V1FU?#kgpd-_=_anr9nllWl>ZODu z2aHQm3~@w5{Bsy@_m`!u^`O`T#)id;sZ?jmALiP=#Q2$~LD#Zl=Wg6;@NRg1mkIVo ze6)pO#U4~0z8pI|2K!Y02l)bHAzjn;tB2m*M;xlfol~%Scm{$ki;O+ia%v4tUm#2rg6UZ zwa`M=x`}wQRQ^n5bR6p~gH1$8 z2W&9WUAP9Hc|5(2u8aPB#k4n9Hp-eb0mr$=6$3%m>8LKJh@7~=D{cbk9! zW$RSDksG@0CP!UgP}3I6gokjLd3~Es;{W?9bqQbD&OcbTN6my5s_=Hc3eD}jCFDV< zuSaiGzG#`%+qaTC^YlXaKTus!PEVFPdw^^5KhU_pXcgs~XQs$Dyg=OsN!epI!%3gWjvB|%_E1V7Y23}yW%?@j%?a} z+(w}p{JOMh%JgbI%J*>RUArubAMyoQw7bW1tGaf)%4vFRLA#YND6QYlx=zladhw&B zM8*;jY!j#3JD1rBVI0wNj{fAR@aV((n6y=~$BH@fGN7*62&Vb0LlFCq@Er73*8bx# zcDD|{n{g5%(=LdX8GO%+eUaC-yaN4looX+rLu(tl$fRHu=#~Hdl=+LHzeQiV@qWj* zGu(_``F$h0DLCUXqwQD*zta|l7Q3lzTPE=jbfM!~h|rJIbJKHI#>9RsLexnpT@|T2 zi6|!bQ945Dz%1BGpCUYWS&H~`D&O-d+&rbz(F3^s_d~^eZfF_D(;?5kb+zt& zO_SXBr!`vsywLH@#GL%AaIA}QrEukRu=FG9zQRhqhZpBuzcvK+WfCuqy8dpMF|w*a zk;KLASE-%bkWu7>byisMOmE!&%(Q!K71a*AV1Fkm#CK#k2=8Hs-v}}FwRAXA!S^>W zh7zwd4JqI7nBg_Xpx`DPHN}aBl^32zy$0g)?lz>GcaF**MN_P|Q4!vAm&PE!X_L;ZkGW zq>+x{`7`l6l%^1}n&pwXC@2J9M=@zKiDk?#zKx;4K8DD!hWK6-Zh6f#I_v`57C~jS?|aDo z<1zA$cVi4~;_d~GwY zz)aFxSdYH&+yKqssCP|0>WpE}MMX*#6n|0d4*#_zUUc5MnNKY>E-U;b^emN4Pl2F5 zT*n~&s6)B}(_(2`(fZ9OXjSBw#ou98p3;GD9(~w)6u4R(U2sCB5}9yMWYY-Ls(fqi zblCT4fu0!#d_DiubY_Z0ot^sgCl$I!dJHQabr(SMDEkLAdKE@CeO2>cF#I5t{sEMG zV4w6L8K12(B2NLOL<1w=+_kRD#|2EW21l>xEDq)F@yc)1fk0n1WKC_tK-A4I&%}vs zfX*3~B!^dl&raC&+m22p+CA$534I6+m16VO3ZGP9D6iO==G4Tzw2n=gy%h^T(Aco};^uvW|pWM15S;nK5M?MdS4H79Z>iTPwZi%#!Xa7I| zOn~gp-%V{5W1f?F`}KBj!u|0=^#K11)vsPnwe)T0*GG81N$GtO;`sSDb8rB)7k0+| zbyf5~P;6nd@=n_flYjg8tw)-G!TKv3uhpCtRj0)6>d}#uk`u#WJz@XH^|4xrOGYU9k(;xI)79NZ_=d%o1ak4DDGR+`);RBSz9xq`!S0TqffD; z_70YMk5Kz==CSIq{e-}tVm%J{A+kay!2e>=m1D8*CK_-Vh5t2Tdpr>r1IeO|NJo}A z5-Sojo1A!-{nu+w{y*OUPw~scu}69V7w_1HKIO zB0S`1EF@Q@EZ7Ep8U>`tC9T6-AD!BdW%=^}ZRjL2X#PEHV47!s#x3g)8Lr~8k36UL zUE*g?uU5Uei!$4~LMM+@D~WVlZ1dLF$`$ZBU7T&?G@0X>T>YU#wP_J0@kKRJXwqFw zZSkI5CsG>zM%@}3bRx_zK5>u?=B`4bwH~J8g_;E%B1Sxox7*z|w+_uWLZ0nAcJVUA zGrE3{d;lyHgzw+ZcsElFXKt$nnWf%*6|PJbn5&Q8Z(kqOmTZ=JNhArm=`wruTh52U zWYj7JZQhG#*M&tf!McjNy<&wHru$m~5K!=;DT*`j10BTe9QqW*FyHqpzak=E4T_)X znkEX+HcG_y%hq>>T{_RJw=C;19SN>6*At?IGL8D2a=(dJ)o5k6ilEhMe*!*pXQEt0BmruQmweqlnI*`;-Nv-*D^lOeWhydHOV*x^TBH%`&i;=fMyF zC0gxHpJ8xHDsMav2iMmLFr)?vuGWWD;SE}+1yZGs1RNc+_s2Ro}S!qM#|I7XP~L@~l) zvvyh={pgd^0Uy;gXP27N`O*lSGr@HuEigw$r;C%X{73D5aH_2bv zG~3of{*mA|{Z`y7XA-zonDKdF=#naK4amH$OcQ}ADCrdI=&^BQAcnh(X})mFEAaln zZ;my8<2XcJ;?y?9CKDOKpWJ?)_*s2i7Ev|N7EFj4%<^@Mne=XMAFTQ|=FsSJIb#pr z4OKldFBFNSD|Nna^X~5Y)0y?Fnj}vi+bh@d8xK^&=L9)T@wHr{w|D_`nyM&W39#5? zzP@V5pf{mJ-u2YWV-qRsGQf)tq%fcP;i?`ouLOVz z=u?8f{w}I~WOM#^=s?ikZvNd1O305H^9u7h7JsF>%Y-opRJNV+$My4(3dd5~?PuyS za{J$%g*)eMI%W0R_QbNVKOQ{UYzO(2-~4oTvueht=(vKQu?GB=71~Lw->OS)^T~l+ zHVU5UiB4RhdOTaP>J5+uL95agPevr`D2ovd(jW>-XYUhHP$P`G=kUUh9MM&-qRJ-c z&_b)Q-`jJfn*wAbknqrllhwYrE`%ic`%-508dBe(Gg$V()7$0R^oI*#k3UPcd48VvUo2z}CSguHL9d;YAqmer!Ga2pQ1C08h?X=NlOmk)A3i5;c5G)D z$?2osXkj?pZn=pMVWFCe&KQmj()b03Vwv}@mMh%z%y51oz(^deKzM1wpIk@V^c`7^ zcM0&%VlUa3wMl%uG?`TvVs$oBHsGK{dr6@CABYmVe8bb4edJi`%X|?1YGK}%0?Lbs zivhWS`AhJZTAxshdY7%=*!E#G>5MocG!1pL6HitDC}!Hc7&pxcQeGkXe!Dzfb@XjW zf-qfSY<;F>Ay)!%a4a=v&ALHiA&CS4_}F$3aG>FpnPNDcqUP zO@ZlWK7pjG?NQxd`@`fyJ%b?#xI~@%$?>Y%oVU{2PD8b31m^I|Mn$Ja z1g~diN%HTTT2TXuTf_G~YNGH=tO?W;V!;!D4okwCPoqU52!4 z!FabIwc?uSO^O)uKBcZiEJ?CkBChPrjBNaH-hZwA$}?OqtKNQhcuUN0141AW8hKCf zGY&b6(ziU$66otitVL|XIYF#{lc2P{Oe8^WyC{}5F^NWK`g2SCuI(5onB1a?=8k~$ zc5TxXed>e8Z2KVKO1t6m*$1K|Lbe>LuG9mH@HhS}GO@j;0dlI9{yI+wii@bp6_*f! zONXY6_Q)oLl+aQBXve5aK`Z1@m{9-Lvj*l-?>R+*48>)d*J$3YB`f z$!G^G1J<=JNcwheRme~1UW*xZXHCnH9^Nv0U}nHgPzQqI!;#XUK;jQ<-Qiq@V2l=t z2q5%7^b<&xXn>0qt~v$e&U3vm*$VuYgGYM>W$1NacufW>?g$kp!2f}QYZuU#rhZb; zNXGCfxur|!jATe{1vh8+Mzmfftzm1rhOVI^^0wNbAMj3G+ZI_4ARqCA?)@bnNUctmqsanEz;iI*)l*;YA zGM26N2H>911RqV90T*X9B2)!v^)4yji1&oIQV;g>S2$piNRewl7F@pJUNsNMo_i50 z)f^y7&R8=~U$QnhGk`O^VB0QyX!67$Gr-P;OQXA+f5<0rLSw~T`{z-qdg@4>)6i(t zie5;^T1~M%s4MTWDI9)IW5PoyhSu-4F>!e_Xz}^Gd%eBV5$#T7tiDV@YuENI4LclF zJ)a?^-H!o>N}kNR=aZkTvNx{#3t}>gjDGU5{X8WzC#+p@r@`U?Zv*LbjU8V*PaOg% zCYvZ#sZKm#>o4C5tZ{)Knx$`H4sb-ELe8vdZZlv7EgZT@g5eIe#V;9O*iOwH${7!WgcR!L(% zIK|?`7n*Vxl3sCAMptx05hn-SLJdKNcCw1^UccT*k? zlRez0V-6N0(g#Bio(KbT>w3|uLu-HV?zy`Y=LAfMBxkcX={gWUPDG3Ysw@pS429_w zYN0w@NSH3ZQ!4xIs!~*c5YJ*koVV4Q;4UE0L0^N(-zZBV<5s9PX^(G&w?~krOS^hl zC}w2K2&Qg@{v((SFQS@>|58{MpDo@tvxCsqvrP>9Nv+TnF8hJD39whk6#dF8fR#FINSKjS=C9UP%z@cDz{W7)B8Q96|4a)AyDmV_JiKgV{jF;Z^3#B5R3JpOQi zt#qFI%xIIE{2uL-^py$R7Q6a#RuYal#5i`9k~@BoGX4>a@epVgtKbq6{G~fc&c*oc z9J@+7TsLP`Akd zf}w)8->%t*{R1_z^u^4^b}573xdkkRBhc=yjw*Blb9<#ofNC6M%sHXL4GQQ4dgT5S zC%23wRjJuy5N`5v1m;u?w?gAWEAlvqb>m~SsA}RJfWB?;4sRFcUC(8Ah!M8`KEeBF zD-P!rO&wZT(YSxqgZmXOdM!T6$ayJ+i9;XoUeRN?adgcLdCr71c(ELO&^=g*dumJI zkQfJUiS7rZPcG{Kqr|Mf=lJ^>%MWuQc=+4V*AKV@k0o!{A5^z4ez?~TKUtg7?~gTw zsQMrJ-xw?`gTQmWjQmzbxBEkvDtwO@MG1f<%J_*W8Ar9jzV`3YK|_}HyB;pg20X{K z1uT=@v=2ik=FfCqgO3ZeJ0W|LkPn5)X{fvKp9QsAYPZD>hSE1QJ86k@>nVFjd3m`J z62U5B{LqvKr|p@VblVw>zF=fD7yylSA;}(1@Bnr=s4&UDV9?1-v4g6avt&Tu$Z3b! z^cln$hWuRHC;yObu>6uv@p8~6KJ3_O7}Jx%U{2}PYVM20Z&`ntnV+pw!=h1|UYS+o z>G{EhVg7_G_2TwmN|5&?ySG66_8u&z(AfHYm{Z$TQgy`E`TW%8C$8e{m|tTEwAa?b z(ARb``f=zr6*%=i@{xPyRW8(w({U+r>*g|3pt+ju;Dr%n0gcM6W2o=C$K%Rl<~>!N z4u)f8P{LcI{I?v3U%qi7k`)HBk_@&y0dw0q?I&?voCz57uB}E^N%XI#kJsG;6`^Gor8Yu;+EZ^u(0+LxM z5~_E+;jMsb9SvT%Q%pZJ?<#(EhUxKvdAVSM|CtGpfv{L6miH{z36^b`q4u2GRiDzNd`i>vcXU4^I?!X2TQW<`u%{C$ zra$VPw@6FP3=~|6HzxuKR%fUdu)$m%9WcPboyp2l5)Y{$19wlkHRa8sxu^732J1-Ta5RM(*y7NBFD731(=+%AEGoy^X zH+w7*EbY&P{S`ALKyr*={La$XnOP%U2d1_}59iE0kK8gKyw!gIdZrA8KttuW3&(I> zh=#$xM%k$O9J)R`uVG3*oH`PvS<0>y6|pwGk^4?pxoeTk&JXWi`v!6Uq(I*1XF01h zXk4PaHijn+uG;3ZZ1bPLvoFZ@zc^(nXtQG-1*gIDD<);wr|`f?#~Wuj1(6UTy)Ob) z^rI+`fUWcDUl2c>Z3mpPoF%K-AGCE~7WZy~hhYZs&i4xf-|=VR!4i`clBKLU4`K#W z^IdX)mb=IOob+adTlg3Kd3h264wsmE-7DThicV|>0rllE?IWlDGUv! z%AX~Yq5z^^GaPiHabVoYihD7m&uaCTJVb3kIIX`gjQ_;IdJ+6;3Pu1#6N_^rZq?je sKsB;G*3kv-SJvN{Zc!&PJ}yOI)lG2IpGXaJ6EUO_6i|&PzW@9AfA-(jYXATM literal 0 HcmV?d00001 diff --git a/modules/aruco/tutorials/aruco_detection/aruco_detection.markdown b/modules/aruco/tutorials/aruco_detection/aruco_detection.markdown index ee0518b0c53..2e81d09b1f9 100644 --- a/modules/aruco/tutorials/aruco_detection/aruco_detection.markdown +++ b/modules/aruco/tutorials/aruco_detection/aruco_detection.markdown @@ -286,8 +286,9 @@ translation vectors of the estimated poses will be in the same unit - The output parameters `rvecs` and `tvecs` are the rotation and translation vectors respectively, for each of the markers in `markerCorners`. -The marker coordinate system that is assumed by this function is placed at the center of the marker -with the Z axis pointing out, as in the following image. Axis-color correspondences are X: red, Y: green, Z: blue. Note the axis directions of the rotated markers in this image. +The marker coordinate system that is assumed by this function is placed in the center (by default) or +in the top left corner of the marker with the Z axis pointing out, as in the following image. +Axis-color correspondences are X: red, Y: green, Z: blue. Note the axis directions of the rotated markers in this image. ![Image with axes drawn](images/singlemarkersaxes.jpg) From b2904b94414e885a595ec0c56fa70da5a3fe8472 Mon Sep 17 00:00:00 2001 From: cudawarped <12133430+cudawarped@users.noreply.github.com> Date: Thu, 2 Jun 2022 10:32:29 +0100 Subject: [PATCH 14/45] Merge pull request #3247 from cudawarped:videoreader_add_rtsp_feature MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add RTSP features to cudacodec::VideoReader * Add live video source enhancements, e.g. rtsp from ip camera's Add error logs. * Fix type. * Change badly named flag. * Alter live source flag everywhere to indicate what it does not what it is for, which should be left up to the documentation. * Prevent frame que object from being reinitialized which could be unsafe if another thread and/or object is using it. --- .../cudacodec/include/opencv2/cudacodec.hpp | 41 ++++++++----- modules/cudacodec/src/frame_queue.cpp | 21 ++++++- modules/cudacodec/src/frame_queue.hpp | 14 ++++- modules/cudacodec/src/video_decoder.cpp | 10 +++- modules/cudacodec/src/video_parser.cpp | 20 ++++--- modules/cudacodec/src/video_parser.hpp | 16 +++-- modules/cudacodec/src/video_reader.cpp | 38 +++++++----- modules/cudacodec/test/test_video.cpp | 58 ++++++++++++++----- 8 files changed, 155 insertions(+), 63 deletions(-) diff --git a/modules/cudacodec/include/opencv2/cudacodec.hpp b/modules/cudacodec/include/opencv2/cudacodec.hpp index 24f939af4b3..334c17f6e5f 100644 --- a/modules/cudacodec/include/opencv2/cudacodec.hpp +++ b/modules/cudacodec/include/opencv2/cudacodec.hpp @@ -321,6 +321,8 @@ enum class VideoReaderProps { PROP_RAW_MODE = 4, //!< Status of raw mode. PROP_LRF_HAS_KEY_FRAME = 5, //!< FFmpeg source only - Indicates whether the Last Raw Frame (LRF), output from VideoReader::retrieve() when VideoReader is initialized in raw mode, contains encoded data for a key frame. PROP_COLOR_FORMAT = 6, //!< Set the ColorFormat of the decoded frame. This can be changed before every call to nextFrame() and retrieve(). + PROP_UDP_SOURCE = 7, //!< Status of VideoReaderInitParams::udpSource initialization. + PROP_ALLOW_FRAME_DROP = 8, //!< Status of VideoReaderInitParams::allowFrameDrop initialization. #ifndef CV_DOXYGEN PROP_NOT_SUPPORTED #endif @@ -468,32 +470,43 @@ class CV_EXPORTS_W RawVideoSource virtual bool get(const int propertyId, double& propertyVal) const = 0; }; +/** @brief VideoReader initialization parameters +@param udpSource Remove validation which can cause VideoReader() to throw exceptions when reading from a UDP source. +@param allowFrameDrop Allow frames to be dropped when ingesting from a live capture source to prevent delay and eventual disconnection +when calls to nextFrame()/grab() cannot keep up with the source's fps. Only use if delay and disconnection are a problem, i.e. not when decoding from +video files where setting this flag will cause frames to be unnecessarily discarded. +@param minNumDecodeSurfaces Minimum number of internal decode surfaces used by the hardware decoder. NVDEC will automatically determine the minimum number of +surfaces it requires for correct functionality and optimal video memory usage but not necessarily for best performance, which depends on the design of the +overall application. The optimal number of decode surfaces (in terms of performance and memory utilization) should be decided by experimentation for each application, +but it cannot go below the number determined by NVDEC. +@param rawMode Allow the raw encoded data which has been read up until the last call to grab() to be retrieved by calling retrieve(rawData,RAW_DATA_IDX). +*/ +struct CV_EXPORTS_W_SIMPLE VideoReaderInitParams { + CV_WRAP VideoReaderInitParams() : udpSource(false), allowFrameDrop(false), minNumDecodeSurfaces(0), rawMode(0) {}; + CV_PROP_RW bool udpSource; + CV_PROP_RW bool allowFrameDrop; + CV_PROP_RW int minNumDecodeSurfaces; + CV_PROP_RW bool rawMode; +}; + /** @brief Creates video reader. @param filename Name of the input video file. -@param params Pass through parameters for VideoCapure. VideoCapture with the FFMpeg back end (CAP_FFMPEG) is used to parse the video input. -The `params` parameter allows to specify extra parameters encoded as pairs `(paramId_1, paramValue_1, paramId_2, paramValue_2, ...)`. +@param sourceParams Pass through parameters for VideoCapure. VideoCapture with the FFMpeg back end (CAP_FFMPEG) is used to parse the video input. +The `sourceParams` parameter allows to specify extra parameters encoded as pairs `(paramId_1, paramValue_1, paramId_2, paramValue_2, ...)`. See cv::VideoCaptureProperties e.g. when streaming from an RTSP source CAP_PROP_OPEN_TIMEOUT_MSEC may need to be set. -@param rawMode Allow the raw encoded data which has been read up until the last call to grab() to be retrieved by calling retrieve(rawData,RAW_DATA_IDX). -@param minNumDecodeSurfaces Minimum number of internal decode surfaces used by the hardware decoder. NVDEC will automatically determine the minimum number of -surfaces it requires for correct functionality and optimal video memory usage but not necessarily for best performance, which depends on the design of the -overall application. The optimal number of decode surfaces (in terms of performance and memory utilization) should be decided by experimentation for each application, -but it cannot go below the number determined by NVDEC. +@param params Initializaton parameters. See cv::cudacodec::VideoReaderInitParams. FFMPEG is used to read videos. User can implement own demultiplexing with cudacodec::RawVideoSource */ -CV_EXPORTS_W Ptr createVideoReader(const String& filename, const std::vector& params = {}, const bool rawMode = false, const int minNumDecodeSurfaces = 0); +CV_EXPORTS_W Ptr createVideoReader(const String& filename, const std::vector& sourceParams = {}, const VideoReaderInitParams params = VideoReaderInitParams()); /** @overload @param source RAW video source implemented by user. -@param rawMode Allow the raw encoded data which has been read up until the last call to grab() to be retrieved by calling retrieve(rawData,RAW_DATA_IDX). -@param minNumDecodeSurfaces Minimum number of internal decode surfaces used by the hardware decoder. NVDEC will automatically determine the minimum number of -surfaces it requires for correct functionality and optimal video memory usage but not necessarily for best performance, which depends on the design of the -overall application. The optimal number of decode surfaces (in terms of performance and memory utilization) should be decided by experimentation for each application, -but it cannot go below the number determined by NVDEC. +@param params Initializaton parameters. See cv::cudacodec::VideoReaderInitParams. */ -CV_EXPORTS_W Ptr createVideoReader(const Ptr& source, const bool rawMode = false, const int minNumDecodeSurfaces = 0); +CV_EXPORTS_W Ptr createVideoReader(const Ptr& source, const VideoReaderInitParams params = VideoReaderInitParams()); //! @} diff --git a/modules/cudacodec/src/frame_queue.cpp b/modules/cudacodec/src/frame_queue.cpp index f5d8eb91914..6fecff6b8cf 100644 --- a/modules/cudacodec/src/frame_queue.cpp +++ b/modules/cudacodec/src/frame_queue.cpp @@ -57,16 +57,20 @@ cv::cudacodec::detail::FrameQueue::~FrameQueue() { void cv::cudacodec::detail::FrameQueue::init(const int _maxSz) { AutoLock autoLock(mtx_); + if (isFrameInUse_) + return; maxSz = _maxSz; displayQueue_ = std::vector(maxSz, CUVIDPARSERDISPINFO()); isFrameInUse_ = new volatile int[maxSz]; std::memset((void*)isFrameInUse_, 0, sizeof(*isFrameInUse_) * maxSz); } -bool cv::cudacodec::detail::FrameQueue::waitUntilFrameAvailable(int pictureIndex) +bool cv::cudacodec::detail::FrameQueue::waitUntilFrameAvailable(int pictureIndex, const bool allowFrameDrop) { while (isInUse(pictureIndex)) { + if (allowFrameDrop && dequeueUntil(pictureIndex)) + break; // Decoder is getting too far ahead from display Thread::sleep(1); @@ -110,6 +114,20 @@ void cv::cudacodec::detail::FrameQueue::enqueue(const CUVIDPARSERDISPINFO* picPa } while (!isEndOfDecode()); } +bool cv::cudacodec::detail::FrameQueue::dequeueUntil(const int pictureIndex) { + AutoLock autoLock(mtx_); + if (isFrameInUse_[pictureIndex] != 1) + return false; + for (int i = 0; i < framesInQueue_; i++) { + const bool found = displayQueue_.at(readPosition_).picture_index == pictureIndex; + isFrameInUse_[displayQueue_.at(readPosition_).picture_index] = 0; + framesInQueue_--; + readPosition_ = (readPosition_ + 1) % maxSz; + if (found) return true; + } + return false; +} + bool cv::cudacodec::detail::FrameQueue::dequeue(CUVIDPARSERDISPINFO& displayInfo, std::vector& rawPackets) { AutoLock autoLock(mtx_); @@ -124,6 +142,7 @@ bool cv::cudacodec::detail::FrameQueue::dequeue(CUVIDPARSERDISPINFO& displayInfo } readPosition_ = (entry + 1) % maxSz; framesInQueue_--; + isFrameInUse_[displayInfo.picture_index] = 2; return true; } diff --git a/modules/cudacodec/src/frame_queue.hpp b/modules/cudacodec/src/frame_queue.hpp index f5a9b343736..b6e0b06bccf 100644 --- a/modules/cudacodec/src/frame_queue.hpp +++ b/modules/cudacodec/src/frame_queue.hpp @@ -72,7 +72,9 @@ class FrameQueue // If the requested frame is available the method returns true. // If decoding was interrupted before the requested frame becomes // available, the method returns false. - bool waitUntilFrameAvailable(int pictureIndex); + // If allowFrameDrop == true, spin is disabled and n > 0 frames are discarded + // to ensure a frame is available. + bool waitUntilFrameAvailable(int pictureIndex, const bool allowFrameDrop = false); void enqueue(const CUVIDPARSERDISPINFO* picParams, const std::vector rawPackets); @@ -84,8 +86,16 @@ class FrameQueue // false, if the queue was empty and no new frame could be returned. bool dequeue(CUVIDPARSERDISPINFO& displayInfo, std::vector& rawPackets); - void releaseFrame(const CUVIDPARSERDISPINFO& picParams) { isFrameInUse_[picParams.picture_index] = false; } + // Deque all frames up to and including the frame with index pictureIndex - must only + // be called in the same thread as enqueue. + // Parameters: + // pictureIndex - Display index of the frame. + // Returns: + // true, if successful, + // false, if no frames are dequed. + bool dequeueUntil(const int pictureIndex); + void releaseFrame(const CUVIDPARSERDISPINFO& picParams) { isFrameInUse_[picParams.picture_index] = 0; } private: bool isInUse(int pictureIndex) const { return isFrameInUse_[pictureIndex] != 0; } diff --git a/modules/cudacodec/src/video_decoder.cpp b/modules/cudacodec/src/video_decoder.cpp index c05d0fd7305..69845a7a0b9 100644 --- a/modules/cudacodec/src/video_decoder.cpp +++ b/modules/cudacodec/src/video_decoder.cpp @@ -64,7 +64,10 @@ static const char* GetVideoChromaFormatString(cudaVideoChromaFormat eChromaForma void cv::cudacodec::detail::VideoDecoder::create(const FormatInfo& videoFormat) { - videoFormat_ = videoFormat; + { + AutoLock autoLock(mtx_); + videoFormat_ = videoFormat; + } const cudaVideoCodec _codec = static_cast(videoFormat.codec); const cudaVideoChromaFormat _chromaFormat = static_cast(videoFormat.chromaFormat); if (videoFormat.nBitDepthMinus8 > 0) { @@ -120,9 +123,10 @@ void cv::cudacodec::detail::VideoDecoder::create(const FormatInfo& videoFormat) cuSafeCall(cuCtxPushCurrent(ctx_)); cuSafeCall(cuvidGetDecoderCaps(&decodeCaps)); cuSafeCall(cuCtxPopCurrent(NULL)); - if (!(decodeCaps.bIsSupported && (decodeCaps.nOutputFormatMask & (1 << cudaVideoSurfaceFormat_NV12)))) + if (!(decodeCaps.bIsSupported && (decodeCaps.nOutputFormatMask & (1 << cudaVideoSurfaceFormat_NV12)))){ CV_Error(Error::StsUnsupportedFormat, "Video source is not supported by hardware video decoder"); - + CV_LOG_ERROR(NULL, "Video source is not supported by hardware video decoder."); + } CV_Assert(videoFormat.ulWidth >= decodeCaps.nMinWidth && videoFormat.ulHeight >= decodeCaps.nMinHeight && videoFormat.ulWidth <= decodeCaps.nMaxWidth && diff --git a/modules/cudacodec/src/video_parser.cpp b/modules/cudacodec/src/video_parser.cpp index d178ea3e791..feda982c5b8 100644 --- a/modules/cudacodec/src/video_parser.cpp +++ b/modules/cudacodec/src/video_parser.cpp @@ -45,9 +45,10 @@ #ifdef HAVE_NVCUVID -cv::cudacodec::detail::VideoParser::VideoParser(VideoDecoder* videoDecoder, FrameQueue* frameQueue) : - videoDecoder_(videoDecoder), frameQueue_(frameQueue), unparsedPackets_(0), hasError_(false) +cv::cudacodec::detail::VideoParser::VideoParser(VideoDecoder* videoDecoder, FrameQueue* frameQueue, const bool allowFrameDrop, const bool udpSource) : + videoDecoder_(videoDecoder), frameQueue_(frameQueue), allowFrameDrop_(allowFrameDrop) { + if (udpSource) maxUnparsedPackets_ = 0; CUVIDPARSERPARAMS params; std::memset(¶ms, 0, sizeof(CUVIDPARSERPARAMS)); @@ -78,16 +79,17 @@ bool cv::cudacodec::detail::VideoParser::parseVideoData(const unsigned char* dat if (cuvidParseVideoData(parser_, &packet) != CUDA_SUCCESS) { + CV_LOG_ERROR(NULL, "Call to cuvidParseVideoData failed!"); hasError_ = true; frameQueue_->endDecode(); return false; } - constexpr int maxUnparsedPackets = 20; - ++unparsedPackets_; - if (unparsedPackets_ > maxUnparsedPackets) + if (maxUnparsedPackets_ && unparsedPackets_ > maxUnparsedPackets_) { + CV_LOG_ERROR(NULL, "Maxium number of packets (" << maxUnparsedPackets_ << ") parsed without decoding a frame or reconfiguring the decoder, if reading from \ + a live source consider initializing with VideoReaderInitParams::udpSource == true."); hasError_ = true; frameQueue_->endDecode(); return false; @@ -122,7 +124,8 @@ int CUDAAPI cv::cudacodec::detail::VideoParser::HandleVideoSequence(void* userDa newFormat.height = format->coded_height; newFormat.displayArea = Rect(Point(format->display_area.left, format->display_area.top), Point(format->display_area.right, format->display_area.bottom)); newFormat.fps = format->frame_rate.numerator / static_cast(format->frame_rate.denominator); - newFormat.ulNumDecodeSurfaces = max(thiz->videoDecoder_->nDecodeSurfaces(), static_cast(format->min_num_decode_surfaces)); + newFormat.ulNumDecodeSurfaces = min(!thiz->allowFrameDrop_ ? max(thiz->videoDecoder_->nDecodeSurfaces(), static_cast(format->min_num_decode_surfaces)) : + format->min_num_decode_surfaces * 2, 32); if (format->progressive_sequence) newFormat.deinterlaceMode = Weave; else @@ -149,6 +152,7 @@ int CUDAAPI cv::cudacodec::detail::VideoParser::HandleVideoSequence(void* userDa } catch (const cv::Exception&) { + CV_LOG_ERROR(NULL, "Attempt to reconfigure Nvidia decoder failed!"); thiz->hasError_ = true; return false; } @@ -163,13 +167,13 @@ int CUDAAPI cv::cudacodec::detail::VideoParser::HandlePictureDecode(void* userDa thiz->unparsedPackets_ = 0; - bool isFrameAvailable = thiz->frameQueue_->waitUntilFrameAvailable(picParams->CurrPicIdx); - + bool isFrameAvailable = thiz->frameQueue_->waitUntilFrameAvailable(picParams->CurrPicIdx, thiz->allowFrameDrop_); if (!isFrameAvailable) return false; if (!thiz->videoDecoder_->decodePicture(picParams)) { + CV_LOG_ERROR(NULL, "Decoding failed!"); thiz->hasError_ = true; return false; } diff --git a/modules/cudacodec/src/video_parser.hpp b/modules/cudacodec/src/video_parser.hpp index 870a2105a8d..28159a9b98a 100644 --- a/modules/cudacodec/src/video_parser.hpp +++ b/modules/cudacodec/src/video_parser.hpp @@ -52,7 +52,7 @@ namespace cv { namespace cudacodec { namespace detail { class VideoParser { public: - VideoParser(VideoDecoder* videoDecoder, FrameQueue* frameQueue); + VideoParser(VideoDecoder* videoDecoder, FrameQueue* frameQueue, const bool allowFrameDrop = false, const bool udpSource = false); ~VideoParser() { @@ -63,13 +63,19 @@ class VideoParser bool hasError() const { return hasError_; } + bool udpSource() const { return maxUnparsedPackets_ == 0; } + + bool allowFrameDrops() const { return allowFrameDrop_; } + private: - VideoDecoder* videoDecoder_; - FrameQueue* frameQueue_; + VideoDecoder* videoDecoder_ = 0; + FrameQueue* frameQueue_ = 0; CUvideoparser parser_; - int unparsedPackets_; + int unparsedPackets_ = 0; + int maxUnparsedPackets_ = 20; std::vector currentFramePackets; - volatile bool hasError_; + volatile bool hasError_ = false; + bool allowFrameDrop_ = false; // Called when the decoder encounters a video format change (or initial sequence header) // This particular implementation of the callback returns 0 in case the video format changes diff --git a/modules/cudacodec/src/video_reader.cpp b/modules/cudacodec/src/video_reader.cpp index 5a21357189f..d1a41fbff05 100644 --- a/modules/cudacodec/src/video_reader.cpp +++ b/modules/cudacodec/src/video_reader.cpp @@ -48,8 +48,8 @@ using namespace cv::cudacodec; #ifndef HAVE_NVCUVID -Ptr cv::cudacodec::createVideoReader(const String&, const std::vector&, const bool, const int) { throw_no_cuda(); return Ptr(); } -Ptr cv::cudacodec::createVideoReader(const Ptr&, const bool, const int) { throw_no_cuda(); return Ptr(); } +Ptr cv::cudacodec::createVideoReader(const String&, const std::vector&, const VideoReaderInitParams) { throw_no_cuda(); return Ptr(); } +Ptr cv::cudacodec::createVideoReader(const Ptr&, const VideoReaderInitParams) { throw_no_cuda(); return Ptr(); } #else // HAVE_NVCUVID @@ -86,7 +86,7 @@ namespace class VideoReaderImpl : public VideoReader { public: - explicit VideoReaderImpl(const Ptr& source, const int minNumDecodeSurfaces); + explicit VideoReaderImpl(const Ptr& source, const int minNumDecodeSurfaces, const bool allowFrameDrop = false , const bool udpSource = false); ~VideoReaderImpl(); bool nextFrame(GpuMat& frame, Stream& stream) CV_OVERRIDE; @@ -130,7 +130,7 @@ namespace return videoSource_->format(); } - VideoReaderImpl::VideoReaderImpl(const Ptr& source, const int minNumDecodeSurfaces) : + VideoReaderImpl::VideoReaderImpl(const Ptr& source, const int minNumDecodeSurfaces, const bool allowFrameDrop, const bool udpSource) : videoSource_(source), lock_(0) { @@ -143,7 +143,7 @@ namespace cuSafeCall( cuvidCtxLockCreate(&lock_, ctx) ); frameQueue_.reset(new FrameQueue()); videoDecoder_.reset(new VideoDecoder(videoSource_->format().codec, minNumDecodeSurfaces, ctx, lock_)); - videoParser_.reset(new VideoParser(videoDecoder_, frameQueue_)); + videoParser_.reset(new VideoParser(videoDecoder_, frameQueue_, allowFrameDrop, udpSource)); videoSource_->setVideoParser(videoParser_); videoSource_->start(); } @@ -291,10 +291,10 @@ namespace case VideoReaderProps::PROP_NUMBER_OF_RAW_PACKAGES_SINCE_LAST_GRAB: propertyVal = rawPackets.size(); return true; - case::VideoReaderProps::PROP_RAW_MODE: + case VideoReaderProps::PROP_RAW_MODE: propertyVal = videoSource_->RawModeEnabled(); return true; - case::VideoReaderProps::PROP_LRF_HAS_KEY_FRAME: { + case VideoReaderProps::PROP_LRF_HAS_KEY_FRAME: { const int iPacket = propertyVal - rawPacketsBaseIdx; if (videoSource_->RawModeEnabled() && iPacket >= 0 && iPacket < rawPackets.size()) { propertyVal = rawPackets.at(iPacket).containsKeyFrame; @@ -303,6 +303,14 @@ namespace else break; } + case VideoReaderProps::PROP_ALLOW_FRAME_DROP: { + propertyVal = videoParser_->allowFrameDrops(); + return true; + } + case VideoReaderProps::PROP_UDP_SOURCE: { + propertyVal = videoParser_->udpSource(); + return true; + } default: break; } @@ -321,7 +329,7 @@ namespace } } -Ptr cv::cudacodec::createVideoReader(const String& filename, const std::vector& params, const bool rawMode, const int minNumDecodeSurfaces) +Ptr cv::cudacodec::createVideoReader(const String& filename, const std::vector& sourceParams, const VideoReaderInitParams params) { CV_Assert(!filename.empty()); @@ -330,22 +338,22 @@ Ptr cv::cudacodec::createVideoReader(const String& filename, const try { // prefer ffmpeg to cuvidGetSourceVideoFormat() which doesn't always return the corrct raw pixel format - Ptr source(new FFmpegVideoSource(filename, params)); - videoSource.reset(new RawVideoSourceWrapper(source, rawMode)); + Ptr source(new FFmpegVideoSource(filename, sourceParams)); + videoSource.reset(new RawVideoSourceWrapper(source, params.rawMode)); } catch (...) { - if (params.size()) throw; + if (sourceParams.size()) throw; videoSource.reset(new CuvidVideoSource(filename)); } - return makePtr(videoSource, minNumDecodeSurfaces); + return makePtr(videoSource, params.minNumDecodeSurfaces, params.allowFrameDrop, params.udpSource); } -Ptr cv::cudacodec::createVideoReader(const Ptr& source, const bool rawMode, const int minNumDecodeSurfaces) +Ptr cv::cudacodec::createVideoReader(const Ptr& source, const VideoReaderInitParams params) { - Ptr videoSource(new RawVideoSourceWrapper(source, rawMode)); - return makePtr(videoSource, minNumDecodeSurfaces); + Ptr videoSource(new RawVideoSourceWrapper(source, params.rawMode)); + return makePtr(videoSource, params.minNumDecodeSurfaces); } #endif // HAVE_NVCUVID diff --git a/modules/cudacodec/test/test_video.cpp b/modules/cudacodec/test/test_video.cpp index 0a9a3c5362c..c2eb7a38102 100644 --- a/modules/cudacodec/test/test_video.cpp +++ b/modules/cudacodec/test/test_video.cpp @@ -70,6 +70,10 @@ PARAM_TEST_CASE(CheckDecodeSurfaces, cv::cuda::DeviceInfo, std::string) { }; +PARAM_TEST_CASE(CheckInitParams, cv::cuda::DeviceInfo, std::string, bool, bool, bool) +{ +}; + struct CheckParams : testing::TestWithParam { cv::cuda::DeviceInfo devInfo; @@ -127,10 +131,9 @@ CUDA_TEST_P(CheckExtraData, Reader) const string path = get<0>(GET_PARAM(1)); const int sz = get<1>(GET_PARAM(1)); std::string inputFile = std::string(cvtest::TS::ptr()->get_data_path()) + "../" + path; - cv::Ptr reader = cv::cudacodec::createVideoReader(inputFile, {}, true); - double rawModeVal = -1; - ASSERT_TRUE(reader->get(cv::cudacodec::VideoReaderProps::PROP_RAW_MODE, rawModeVal)); - ASSERT_TRUE(rawModeVal); + cv::cudacodec::VideoReaderInitParams params; + params.rawMode = true; + cv::Ptr reader = cv::cudacodec::createVideoReader(inputFile, {}, params); double extraDataIdx = -1; ASSERT_TRUE(reader->get(cv::cudacodec::VideoReaderProps::PROP_EXTRA_DATA_INDEX, extraDataIdx)); ASSERT_EQ(extraDataIdx, 1 ); @@ -151,10 +154,9 @@ CUDA_TEST_P(CheckKeyFrame, Reader) const string path = GET_PARAM(1); std::string inputFile = std::string(cvtest::TS::ptr()->get_data_path()) + "../" + path; - cv::Ptr reader = cv::cudacodec::createVideoReader(inputFile, {}, true); - double rawModeVal = -1; - ASSERT_TRUE(reader->get(cv::cudacodec::VideoReaderProps::PROP_RAW_MODE, rawModeVal)); - ASSERT_TRUE(rawModeVal); + cv::cudacodec::VideoReaderInitParams params; + params.rawMode = true; + cv::Ptr reader = cv::cudacodec::createVideoReader(inputFile, {}, params); double rawIdxBase = -1; ASSERT_TRUE(reader->get(cv::cudacodec::VideoReaderProps::PROP_RAW_PACKAGES_BASE_INDEX, rawIdxBase)); ASSERT_EQ(rawIdxBase, 2); @@ -222,10 +224,9 @@ CUDA_TEST_P(VideoReadRaw, Reader) { std::ofstream file(fileNameOut, std::ios::binary); ASSERT_TRUE(file.is_open()); - cv::Ptr reader = cv::cudacodec::createVideoReader(inputFile, {}, true); - double rawModeVal = -1; - ASSERT_TRUE(reader->get(cv::cudacodec::VideoReaderProps::PROP_RAW_MODE, rawModeVal)); - ASSERT_TRUE(rawModeVal); + cv::cudacodec::VideoReaderInitParams params; + params.rawMode = true; + cv::Ptr reader = cv::cudacodec::createVideoReader(inputFile, {}, params); double rawIdxBase = -1; ASSERT_TRUE(reader->get(cv::cudacodec::VideoReaderProps::PROP_RAW_PACKAGES_BASE_INDEX, rawIdxBase)); ASSERT_EQ(rawIdxBase, 2); @@ -250,7 +251,9 @@ CUDA_TEST_P(VideoReadRaw, Reader) { cv::Ptr readerReference = cv::cudacodec::createVideoReader(inputFile); - cv::Ptr readerActual = cv::cudacodec::createVideoReader(fileNameOut, {}, true); + cv::cudacodec::VideoReaderInitParams params; + params.rawMode = true; + cv::Ptr readerActual = cv::cudacodec::createVideoReader(fileNameOut, {}, params); double decodedFrameIdx = -1; ASSERT_TRUE(readerActual->get(cv::cudacodec::VideoReaderProps::PROP_DECODED_FRAME_IDX, decodedFrameIdx)); ASSERT_EQ(decodedFrameIdx, 0); @@ -323,7 +326,9 @@ CUDA_TEST_P(CheckDecodeSurfaces, Reader) } { - cv::Ptr reader = cv::cudacodec::createVideoReader(inputFile, {}, false, ulNumDecodeSurfaces - 1); + cv::cudacodec::VideoReaderInitParams params; + params.minNumDecodeSurfaces = ulNumDecodeSurfaces - 1; + cv::Ptr reader = cv::cudacodec::createVideoReader(inputFile, {}, params); cv::cudacodec::FormatInfo fmt = reader->format(); if (!fmt.valid) { reader->grab(); @@ -335,7 +340,9 @@ CUDA_TEST_P(CheckDecodeSurfaces, Reader) } { - cv::Ptr reader = cv::cudacodec::createVideoReader(inputFile, {}, false, ulNumDecodeSurfaces + 1); + cv::cudacodec::VideoReaderInitParams params; + params.minNumDecodeSurfaces = ulNumDecodeSurfaces + 1; + cv::Ptr reader = cv::cudacodec::createVideoReader(inputFile, {}, params); cv::cudacodec::FormatInfo fmt = reader->format(); if (!fmt.valid) { reader->grab(); @@ -346,6 +353,22 @@ CUDA_TEST_P(CheckDecodeSurfaces, Reader) for (int i = 0; i < 100; i++) ASSERT_TRUE(reader->grab()); } } + +CUDA_TEST_P(CheckInitParams, Reader) +{ + cv::cuda::setDevice(GET_PARAM(0).deviceID()); + const std::string inputFile = std::string(cvtest::TS::ptr()->get_data_path()) + "../" + GET_PARAM(1); + cv::cudacodec::VideoReaderInitParams params; + params.udpSource = GET_PARAM(2); + params.allowFrameDrop = GET_PARAM(3); + params.rawMode = GET_PARAM(4); + double udpSource = 0, allowFrameDrop = 0, rawMode = 0; + cv::Ptr reader = cv::cudacodec::createVideoReader(inputFile, {}, params); + ASSERT_TRUE(reader->get(cv::cudacodec::VideoReaderProps::PROP_UDP_SOURCE, udpSource) && static_cast(udpSource) == params.udpSource); + ASSERT_TRUE(reader->get(cv::cudacodec::VideoReaderProps::PROP_ALLOW_FRAME_DROP, allowFrameDrop) && static_cast(allowFrameDrop) == params.allowFrameDrop); + ASSERT_TRUE(reader->get(cv::cudacodec::VideoReaderProps::PROP_RAW_MODE, rawMode) && static_cast(rawMode) == params.rawMode); +} + #endif // HAVE_NVCUVID #if defined(_WIN32) && defined(HAVE_NVCUVENC) @@ -433,5 +456,10 @@ INSTANTIATE_TEST_CASE_P(CUDA_Codec, CheckDecodeSurfaces, testing::Combine( ALL_DEVICES, testing::Values("highgui/video/big_buck_bunny.mp4"))); +INSTANTIATE_TEST_CASE_P(CUDA_Codec, CheckInitParams, testing::Combine( + ALL_DEVICES, + testing::Values("highgui/video/big_buck_bunny.mp4"), + testing::Values(true,false), testing::Values(true,false), testing::Values(true,false))); + #endif // HAVE_NVCUVID || HAVE_NVCUVENC }} // namespace From d2760791a4e109c415020fcca72770148ca6d717 Mon Sep 17 00:00:00 2001 From: cudawarped <12133430+cudawarped@users.noreply.github.com> Date: Thu, 2 Jun 2022 13:35:44 +0100 Subject: [PATCH 15/45] Fix cudacodec and cudastereo python bindings. --- .../cudacodec/include/opencv2/cudacodec.hpp | 62 +++++++++++++++---- .../misc/python/test/test_cudacodec.py | 29 +++++++++ modules/cudacodec/src/video_reader.cpp | 24 ++++--- modules/cudacodec/test/test_video.cpp | 2 + .../cudastereo/include/opencv2/cudastereo.hpp | 5 +- .../misc/python/test/test_cudastereo.py | 28 +++++++++ 6 files changed, 129 insertions(+), 21 deletions(-) create mode 100644 modules/cudastereo/misc/python/test/test_cudastereo.py diff --git a/modules/cudacodec/include/opencv2/cudacodec.hpp b/modules/cudacodec/include/opencv2/cudacodec.hpp index 334c17f6e5f..54ec5bed0a8 100644 --- a/modules/cudacodec/include/opencv2/cudacodec.hpp +++ b/modules/cudacodec/include/opencv2/cudacodec.hpp @@ -353,8 +353,12 @@ class CV_EXPORTS_W VideoReader /** @brief Grabs, decodes and returns the next video frame. - If no frames has been grabbed (there are no more frames in video file), the methods return false . - The method throws Exception if error occurs. + @param [out] frame The video frame. + @param stream Stream for the asynchronous version. + @return `false` if no frames have been grabbed. + + If no frames have been grabbed (there are no more frames in video file), the methods return false. + The method throws an Exception if error occurs. */ CV_WRAP virtual bool nextFrame(CV_OUT GpuMat& frame, Stream &stream = Stream::Null()) = 0; @@ -364,6 +368,7 @@ class CV_EXPORTS_W VideoReader /** @brief Grabs the next frame from the video source. + @param stream Stream for the asynchronous version. @return `true` (non-zero) in the case of success. The method/function grabs the next frame from video file or camera and returns true (non-zero) in @@ -376,17 +381,44 @@ class CV_EXPORTS_W VideoReader /** @brief Returns previously grabbed video data. - @param [out] frame The returned data which depends on the provided idx. If there is no new data since the last call to grab() the image will be empty. - @param idx Determins the returned data inside image. The returned data can be the: - Decoded frame, idx = get(PROP_DECODED_FRAME_IDX). - Extra data if available, idx = get(PROP_EXTRA_DATA_INDEX). - Raw encoded data package. To retrieve package i, idx = get(PROP_RAW_PACKAGES_BASE_INDEX) + i with i < get(PROP_NUMBER_OF_RAW_PACKAGES_SINCE_LAST_GRAB) - @return `false` if no frames has been grabbed + @param [out] frame The returned data which depends on the provided idx. + @param idx Determines the returned data inside image. The returned data can be the: + - Decoded frame, idx = get(PROP_DECODED_FRAME_IDX). + - Extra data if available, idx = get(PROP_EXTRA_DATA_INDEX). + - Raw encoded data package. To retrieve package i, idx = get(PROP_RAW_PACKAGES_BASE_INDEX) + i with i < get(PROP_NUMBER_OF_RAW_PACKAGES_SINCE_LAST_GRAB) + @return `false` if no frames have been grabbed + + The method returns data associated with the current video source since the last call to grab() or the creation of the VideoReader. If no data is present + the method returns false and the function returns an empty image. + */ + virtual bool retrieve(OutputArray frame, const size_t idx = static_cast(VideoReaderProps::PROP_DECODED_FRAME_IDX)) const = 0; + + /** @brief Returns previously grabbed encoded video data. + + @param [out] frame The encoded video data. + @param idx Determines the returned data inside image. The returned data can be the: + - Extra data if available, idx = get(PROP_EXTRA_DATA_INDEX). + - Raw encoded data package. To retrieve package i, idx = get(PROP_RAW_PACKAGES_BASE_INDEX) + i with i < get(PROP_NUMBER_OF_RAW_PACKAGES_SINCE_LAST_GRAB) + @return `false` if no frames have been grabbed The method returns data associated with the current video source since the last call to grab() or the creation of the VideoReader. If no data is present the method returns false and the function returns an empty image. */ - CV_WRAP virtual bool retrieve(CV_OUT OutputArray frame, const size_t idx = static_cast(VideoReaderProps::PROP_DECODED_FRAME_IDX)) const = 0; + CV_WRAP inline bool retrieve(CV_OUT Mat& frame, const size_t idx) const { + return retrieve(OutputArray(frame), idx); + } + + /** @brief Returns the next video frame. + + @param [out] frame The video frame. If grab() has not been called then this will be empty(). + @return `false` if no frames have been grabbed + + The method returns data associated with the current video source since the last call to grab(). If no data is present + the method returns false and the function returns an empty image. + */ + CV_WRAP inline bool retrieve(CV_OUT GpuMat& frame) const { + return retrieve(OutputArray(frame)); + } /** @brief Sets a property in the VideoReader. @@ -395,7 +427,10 @@ class CV_EXPORTS_W VideoReader @param propertyVal Value of the property. @return `true` if the property has been set. */ - CV_WRAP virtual bool set(const VideoReaderProps propertyId, const double propertyVal) = 0; + virtual bool set(const VideoReaderProps propertyId, const double propertyVal) = 0; + CV_WRAP inline bool setVideoReaderProps(const VideoReaderProps propertyId, double propertyVal) { + return set(propertyId, propertyVal); + } /** @brief Set the desired ColorFormat for the frame returned by nextFrame()/retrieve(). @@ -408,11 +443,12 @@ class CV_EXPORTS_W VideoReader @param propertyId Property identifier from cv::cudacodec::VideoReaderProps (eg. cv::cudacodec::VideoReaderProps::PROP_DECODED_FRAME_IDX, cv::cudacodec::VideoReaderProps::PROP_EXTRA_DATA_INDEX, ...). @param propertyVal - In - Optional value required for querying specific propertyId's, e.g. the index of the raw package to be checked for a key frame (cv::cudacodec::VideoReaderProps::PROP_LRF_HAS_KEY_FRAME). - Out - Value of the property. + - In: Optional value required for querying specific propertyId's, e.g. the index of the raw package to be checked for a key frame (cv::cudacodec::VideoReaderProps::PROP_LRF_HAS_KEY_FRAME). + - Out: Value of the property. @return `true` unless the property is not supported. */ - CV_WRAP virtual bool get(const VideoReaderProps propertyId, CV_IN_OUT double& propertyVal) const = 0; + virtual bool get(const VideoReaderProps propertyId, double& propertyVal) const = 0; + CV_WRAP virtual bool getVideoReaderProps(const VideoReaderProps propertyId, CV_OUT double& propertyValOut, double propertyValIn = 0) const = 0; /** @brief Retrieves the specified property used by the VideoSource. diff --git a/modules/cudacodec/misc/python/test/test_cudacodec.py b/modules/cudacodec/misc/python/test/test_cudacodec.py index e2f756af245..dc9f7a40aae 100644 --- a/modules/cudacodec/misc/python/test/test_cudacodec.py +++ b/modules/cudacodec/misc/python/test/test_cudacodec.py @@ -34,6 +34,35 @@ def test_reader(self): ret, _gpu_mat2 = reader.nextFrame(gpu_mat) #TODO: self.assertTrue(gpu_mat == gpu_mat2) self.assertTrue(ret) + + params = cv.cudacodec.VideoReaderInitParams() + params.rawMode = True + ms_gs = 1234 + reader = cv.cudacodec.createVideoReader(vid_path,[cv.CAP_PROP_OPEN_TIMEOUT_MSEC, ms_gs], params) + ret, ms = reader.get(cv.CAP_PROP_OPEN_TIMEOUT_MSEC) + self.assertTrue(ret and ms == ms_gs) + ret, raw_mode = reader.getVideoReaderProps(cv.cudacodec.VideoReaderProps_PROP_RAW_MODE) + self.assertTrue(ret and raw_mode) + + ret, colour_code = reader.getVideoReaderProps(cv.cudacodec.VideoReaderProps_PROP_COLOR_FORMAT) + self.assertTrue(ret and colour_code == cv.cudacodec.ColorFormat_BGRA) + colour_code_gs = cv.cudacodec.ColorFormat_GRAY + reader.set(colour_code_gs) + ret, colour_code = reader.getVideoReaderProps(cv.cudacodec.VideoReaderProps_PROP_COLOR_FORMAT) + self.assertTrue(ret and colour_code == colour_code_gs) + + ret, i_base = reader.getVideoReaderProps(cv.cudacodec.VideoReaderProps_PROP_RAW_PACKAGES_BASE_INDEX) + self.assertTrue(ret and i_base == 2.0) + self.assertTrue(reader.grab()) + ret, gpu_mat3 = reader.retrieve() + self.assertTrue(ret and isinstance(gpu_mat3,cv.cuda.GpuMat) and not gpu_mat3.empty()) + ret = reader.retrieve(gpu_mat3) + self.assertTrue(ret and isinstance(gpu_mat3,cv.cuda.GpuMat) and not gpu_mat3.empty()) + ret, n_raw_packages_since_last_grab = reader.getVideoReaderProps(cv.cudacodec.VideoReaderProps_PROP_NUMBER_OF_RAW_PACKAGES_SINCE_LAST_GRAB) + self.assertTrue(ret and n_raw_packages_since_last_grab > 0) + ret, raw_data = reader.retrieve(int(i_base)) + self.assertTrue(ret and isinstance(raw_data,np.ndarray) and np.any(raw_data)) + except cv.error as e: notSupported = (e.code == cv.Error.StsNotImplemented or e.code == cv.Error.StsUnsupportedFormat or e.code == cv.Error.GPU_API_CALL_ERROR) self.assertTrue(notSupported) diff --git a/modules/cudacodec/src/video_reader.cpp b/modules/cudacodec/src/video_reader.cpp index d1a41fbff05..903defaf379 100644 --- a/modules/cudacodec/src/video_reader.cpp +++ b/modules/cudacodec/src/video_reader.cpp @@ -102,6 +102,7 @@ namespace void set(const ColorFormat _colorFormat) CV_OVERRIDE; bool get(const VideoReaderProps propertyId, double& propertyVal) const CV_OVERRIDE; + bool getVideoReaderProps(const VideoReaderProps propertyId, double& propertyValOut, double propertyValIn) const CV_OVERRIDE; bool get(const int propertyId, double& propertyVal) const CV_OVERRIDE; @@ -246,13 +247,13 @@ namespace } else if (idx == extraDataIdx) { if (!frame.isMat()) - CV_Error(Error::StsUnsupportedFormat, "Extra data is stored on the host and must be retrueved using a cv::Mat"); + CV_Error(Error::StsUnsupportedFormat, "Extra data is stored on the host and must be retrieved using a cv::Mat"); videoSource_->getExtraData(frame.getMatRef()); } else{ if (idx >= rawPacketsBaseIdx && idx < rawPacketsBaseIdx + rawPackets.size()) { if (!frame.isMat()) - CV_Error(Error::StsUnsupportedFormat, "Raw data is stored on the host and must retrievd using a cv::Mat"); + CV_Error(Error::StsUnsupportedFormat, "Raw data is stored on the host and must be retrieved using a cv::Mat"); Mat tmp(1, rawPackets.at(idx - rawPacketsBaseIdx).size, CV_8UC1, rawPackets.at(idx - rawPacketsBaseIdx).Data(), rawPackets.at(idx - rawPacketsBaseIdx).size); frame.getMatRef() = tmp; } @@ -264,8 +265,9 @@ namespace switch (propertyId) { case VideoReaderProps::PROP_RAW_MODE : videoSource_->SetRawMode(static_cast(propertyVal)); + return true; } - return true; + return false; } void VideoReaderImpl::set(const ColorFormat _colorFormat) { @@ -303,20 +305,28 @@ namespace else break; } - case VideoReaderProps::PROP_ALLOW_FRAME_DROP: { + case VideoReaderProps::PROP_ALLOW_FRAME_DROP: propertyVal = videoParser_->allowFrameDrops(); return true; - } - case VideoReaderProps::PROP_UDP_SOURCE: { + case VideoReaderProps::PROP_UDP_SOURCE: propertyVal = videoParser_->udpSource(); return true; - } + case VideoReaderProps::PROP_COLOR_FORMAT: + propertyVal = static_cast(colorFormat); + return true; default: break; } return false; } + bool VideoReaderImpl::getVideoReaderProps(const VideoReaderProps propertyId, double& propertyValOut, double propertyValIn) const { + double propertyValInOut = propertyValIn; + const bool ret = get(propertyId, propertyValInOut); + propertyValOut = propertyValInOut; + return ret; + } + bool VideoReaderImpl::get(const int propertyId, double& propertyVal) const { return videoSource_->get(propertyId, propertyVal); } diff --git a/modules/cudacodec/test/test_video.cpp b/modules/cudacodec/test/test_video.cpp index c2eb7a38102..e37833ef8bd 100644 --- a/modules/cudacodec/test/test_video.cpp +++ b/modules/cudacodec/test/test_video.cpp @@ -201,6 +201,8 @@ CUDA_TEST_P(Video, Reader) // request a different colour format for each frame const std::pair< cudacodec::ColorFormat, int>& formatToChannels = formatsToChannels[i % formatsToChannels.size()]; reader->set(formatToChannels.first); + double colorFormat; + ASSERT_TRUE(reader->get(cudacodec::VideoReaderProps::PROP_COLOR_FORMAT, colorFormat) && static_cast(colorFormat) == formatToChannels.first); ASSERT_TRUE(reader->nextFrame(frame)); if(!fmt.valid) fmt = reader->format(); diff --git a/modules/cudastereo/include/opencv2/cudastereo.hpp b/modules/cudastereo/include/opencv2/cudastereo.hpp index 9cadd123b53..27291c2e97d 100644 --- a/modules/cudastereo/include/opencv2/cudastereo.hpp +++ b/modules/cudastereo/include/opencv2/cudastereo.hpp @@ -355,7 +355,10 @@ disparity map. @sa reprojectImageTo3D */ -CV_EXPORTS_W void reprojectImageTo3D(InputArray disp, OutputArray xyzw, InputArray Q, int dst_cn = 4, Stream& stream = Stream::Null()); +CV_EXPORTS void reprojectImageTo3D(InputArray disp, OutputArray xyzw, InputArray Q, int dst_cn = 4, Stream& stream = Stream::Null()); +CV_EXPORTS_W inline void reprojectImageTo3D(GpuMat disp, CV_OUT GpuMat& xyzw, Mat Q, int dst_cn = 4, Stream& stream = Stream::Null()) { + reprojectImageTo3D((InputArray)disp, (OutputArray)xyzw, (InputArray)Q, dst_cn, stream); +} /** @brief Colors a disparity image. diff --git a/modules/cudastereo/misc/python/test/test_cudastereo.py b/modules/cudastereo/misc/python/test/test_cudastereo.py new file mode 100644 index 00000000000..8cd7c6cf762 --- /dev/null +++ b/modules/cudastereo/misc/python/test/test_cudastereo.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python +import os +import cv2 as cv +import numpy as np + +from tests_common import NewOpenCVTests, unittest + +class cudastereo_test(NewOpenCVTests): + def setUp(self): + super(cudastereo_test, self).setUp() + if not cv.cuda.getCudaEnabledDeviceCount(): + self.skipTest("No CUDA-capable device is detected") + + def test_reprojectImageTo3D(self): + # Test's the functionality but not the results from reprojectImageTo3D + sz = (128,128) + np_disparity = np.random.randint(0, 64, sz, dtype=np.int16) + cu_disparity = cv.cuda_GpuMat(np_disparity) + np_q = np.random.randint(0, 100, (4, 4)).astype(np.float32) + stream = cv.cuda.Stream() + cu_xyz = cv.cuda.reprojectImageTo3D(cu_disparity, np_q, stream = stream) + self.assertTrue(cu_xyz.type() == cv.CV_32FC4 and cu_xyz.size() == sz) + cu_xyz1 = cv.cuda.GpuMat(sz, cv.CV_32FC3) + cv.cuda.reprojectImageTo3D(cu_disparity, np_q, cu_xyz1, 3, stream) + self.assertTrue(cu_xyz1.type() == cv.CV_32FC3 and cu_xyz1.size() == sz) + +if __name__ == '__main__': + NewOpenCVTests.bootstrap() \ No newline at end of file From 2e3f918ff7812e33579c0eb5f9e1f86e03704164 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Sun, 5 Jun 2022 07:46:09 +0000 Subject: [PATCH 16/45] aruco: fix missing solvePnP parameter, build warning --- modules/aruco/src/aruco.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/modules/aruco/src/aruco.cpp b/modules/aruco/src/aruco.cpp index d7eabd776d2..87c95321505 100644 --- a/modules/aruco/src/aruco.cpp +++ b/modules/aruco/src/aruco.cpp @@ -894,7 +894,10 @@ static void _refineCandidateLines(std::vector& nContours, std::vector(i), - tvecs.at(i)); + tvecs.at(i), estimateParameters->solvePnPMethod); } }); From 30dc847100f3fdb69a38f7c1d6906253c344a845 Mon Sep 17 00:00:00 2001 From: Vincent Rabaud Date: Wed, 8 Jun 2022 15:09:19 +0200 Subject: [PATCH 17/45] Fix compilation because of pointer/int comparison. Without this fix, the error is: "ordered comparison between pointer and zero ('volatile unsigned int *' and 'int')" --- modules/cudastereo/src/cuda/stereobm.cu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/cudastereo/src/cuda/stereobm.cu b/modules/cudastereo/src/cuda/stereobm.cu index 98c727a3fbd..348556060d1 100644 --- a/modules/cudastereo/src/cuda/stereobm.cu +++ b/modules/cudastereo/src/cuda/stereobm.cu @@ -281,7 +281,7 @@ namespace cv { namespace cuda { namespace device InitColSSD(x_tex, y_tex, img_step, left, right, d, col_ssd); - if (col_ssd_extra > 0) + if (col_ssd_extra != nullptr) if (x_tex + BLOCK_W < cwidth) InitColSSD(x_tex + BLOCK_W, y_tex, img_step, left, right, d, col_ssd_extra); From 57db17eca22d72ebe8ddc1f26e6adc2c421a9611 Mon Sep 17 00:00:00 2001 From: Pavel Procopiuc Date: Wed, 8 Jun 2022 13:55:33 +0200 Subject: [PATCH 18/45] Fix building error with OGRE 1.12.9 --- modules/ovis/src/ovis.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/ovis/src/ovis.cpp b/modules/ovis/src/ovis.cpp index 1fa6db9295b..8c0e3431e4c 100644 --- a/modules/ovis/src/ovis.cpp +++ b/modules/ovis/src/ovis.cpp @@ -453,7 +453,8 @@ class WindowSceneImpl : public WindowScene if(tus->getTextureName() != name) { RTShader::ShaderGenerator::getSingleton().invalidateMaterial( - RTShader::ShaderGenerator::DEFAULT_SCHEME_NAME, *bgplane->getMaterial()); + RTShader::ShaderGenerator::DEFAULT_SCHEME_NAME, bgplane->getMaterial()->getName(), + RESOURCEGROUP_NAME); tus->setTextureName(name); tus->setTextureAddressingMode(TAM_CLAMP); From e182fc8675a167044b129a3bdf3c4ad2d3399f68 Mon Sep 17 00:00:00 2001 From: jmackay2 <1.732mackay@gmail.com> Date: Thu, 9 Jun 2022 22:06:33 -0400 Subject: [PATCH 19/45] Fix ceres manifold build --- modules/sfm/src/libmv_light/libmv/simple_pipeline/bundle.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sfm/src/libmv_light/libmv/simple_pipeline/bundle.cc b/modules/sfm/src/libmv_light/libmv/simple_pipeline/bundle.cc index 1d19978937b..65afe331f70 100644 --- a/modules/sfm/src/libmv_light/libmv/simple_pipeline/bundle.cc +++ b/modules/sfm/src/libmv_light/libmv/simple_pipeline/bundle.cc @@ -549,7 +549,7 @@ void EuclideanBundleCommonIntrinsics( if (bundle_constraints & BUNDLE_NO_TRANSLATION) { #if CERES_VERSION_MAJOR >= 3 || (CERES_VERSION_MAJOR >= 2 && CERES_VERSION_MINOR >= 1) - problem.SetParameterization(current_camera_R_t, + problem.SetManifold(current_camera_R_t, constant_translation_manifold); #else problem.SetParameterization(current_camera_R_t, From 344295eef2a2fc89c2dbcb5122b832541908bf33 Mon Sep 17 00:00:00 2001 From: fengyuentau Date: Wed, 15 Jun 2022 11:16:10 +0800 Subject: [PATCH 20/45] add workflows for macOS for contrib 4.x --- .github/workflows/PR-4.x.yaml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/workflows/PR-4.x.yaml b/.github/workflows/PR-4.x.yaml index a1441e004d9..122e3d0e99b 100644 --- a/.github/workflows/PR-4.x.yaml +++ b/.github/workflows/PR-4.x.yaml @@ -13,4 +13,10 @@ jobs: uses: opencv/ci-gha-workflow/.github/workflows/OCV-Contrib-PR-4.x-U20.yaml@main W10: - uses: opencv/ci-gha-workflow/.github/workflows/OCV-Contrib-PR-4.x-W10.yaml@main \ No newline at end of file + uses: opencv/ci-gha-workflow/.github/workflows/OCV-Contrib-PR-4.x-W10.yaml@main + + macOS-ARM64: + uses: opencv/ci-gha-workflow/.github/workflows/OCV-Contrib-PR-4.x-macOS-ARM64.yaml@main + + macOS-X64: + uses: opencv/ci-gha-workflow/.github/workflows/OCV-Contrib-PR-4.x-macOS-x86_64.yaml@main From 19769f03f379010b19c56b02baef295be63a70c1 Mon Sep 17 00:00:00 2001 From: fengyuentau Date: Wed, 15 Jun 2022 11:17:06 +0800 Subject: [PATCH 21/45] add workflows for macOS for contrib 3.4 --- .github/workflows/PR-3.4.yaml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/workflows/PR-3.4.yaml b/.github/workflows/PR-3.4.yaml index 3d6d61bb48f..e219af7b809 100644 --- a/.github/workflows/PR-3.4.yaml +++ b/.github/workflows/PR-3.4.yaml @@ -13,4 +13,10 @@ jobs: uses: opencv/ci-gha-workflow/.github/workflows/OCV-Contrib-PR-3.4-U20.yaml@main W10: - uses: opencv/ci-gha-workflow/.github/workflows/OCV-Contrib-PR-3.4-W10.yaml@main \ No newline at end of file + uses: opencv/ci-gha-workflow/.github/workflows/OCV-Contrib-PR-3.4-W10.yaml@main + + macOS-ARM64: + uses: opencv/ci-gha-workflow/.github/workflows/OCV-Contrib-PR-3.4-macOS-ARM64.yaml@main + + macOS-X64: + uses: opencv/ci-gha-workflow/.github/workflows/OCV-Contrib-PR-3.4-macOS-x86_64.yaml@main From 911ab34eaa3faafa6f4421ae928aeb810d54d0f3 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Thu, 16 Jun 2022 12:11:40 +0000 Subject: [PATCH 22/45] aruco: fix solvePnP parameters --- modules/aruco/src/aruco.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/aruco/src/aruco.cpp b/modules/aruco/src/aruco.cpp index 87c95321505..fccb3d965ad 100644 --- a/modules/aruco/src/aruco.cpp +++ b/modules/aruco/src/aruco.cpp @@ -1227,6 +1227,7 @@ void estimatePoseSingleMarkers(InputArrayOfArrays _corners, float markerLength, Ptr estimateParameters) { CV_Assert(markerLength > 0); + CV_Assert(estimateParameters); Mat markerObjPoints; _getSingleMarkerObjectPoints(markerLength, markerObjPoints, *estimateParameters); @@ -1243,7 +1244,7 @@ void estimatePoseSingleMarkers(InputArrayOfArrays _corners, float markerLength, for (int i = begin; i < end; i++) { solvePnP(markerObjPoints, _corners.getMat(i), _cameraMatrix, _distCoeffs, rvecs.at(i), - tvecs.at(i), estimateParameters->solvePnPMethod); + tvecs.at(i), estimateParameters->useExtrinsicGuess, estimateParameters->solvePnPMethod); } }); From 377e856803925eca689fd27e63da986f6284f7ca Mon Sep 17 00:00:00 2001 From: Jochen Sprickerhof Date: Fri, 24 Jun 2022 10:11:08 +0200 Subject: [PATCH 23/45] Fix file ending --- .../aruco_board_detection.markdown | 2 +- .../images/{board.jpg => board.png} | Bin .../charuco_detection/charuco_detection.markdown | 2 +- .../images/{charucoboard.jpg => charucoboard.png} | Bin ...oof.jpg => checkershadow_illusion4med_proof.png} | Bin .../retina_illusion/retina_illusion.markdown | 4 ++-- 6 files changed, 4 insertions(+), 4 deletions(-) rename modules/aruco/tutorials/aruco_board_detection/images/{board.jpg => board.png} (100%) rename modules/aruco/tutorials/charuco_detection/images/{charucoboard.jpg => charucoboard.png} (100%) rename modules/bioinspired/tutorials/retina_illusion/images/{checkershadow_illusion4med_proof.jpg => checkershadow_illusion4med_proof.png} (100%) diff --git a/modules/aruco/tutorials/aruco_board_detection/aruco_board_detection.markdown b/modules/aruco/tutorials/aruco_board_detection/aruco_board_detection.markdown index 95d3a3dc3a4..6ce8603ecf2 100644 --- a/modules/aruco/tutorials/aruco_board_detection/aruco_board_detection.markdown +++ b/modules/aruco/tutorials/aruco_board_detection/aruco_board_detection.markdown @@ -156,7 +156,7 @@ In this case the margin is 10. The output image will be something like this: -![](images/board.jpg) +![](images/board.png) A full working example of board creation is included in the `create_board.cpp` inside the `modules/aruco/samples/`. diff --git a/modules/aruco/tutorials/aruco_board_detection/images/board.jpg b/modules/aruco/tutorials/aruco_board_detection/images/board.png similarity index 100% rename from modules/aruco/tutorials/aruco_board_detection/images/board.jpg rename to modules/aruco/tutorials/aruco_board_detection/images/board.png diff --git a/modules/aruco/tutorials/charuco_detection/charuco_detection.markdown b/modules/aruco/tutorials/charuco_detection/charuco_detection.markdown index 3ed729db95f..ddcd5e91beb 100644 --- a/modules/aruco/tutorials/charuco_detection/charuco_detection.markdown +++ b/modules/aruco/tutorials/charuco_detection/charuco_detection.markdown @@ -88,7 +88,7 @@ In this case the margin is 10. The output image will be something like this: -![](images/charucoboard.jpg) +![](images/charucoboard.png) A full working example is included in the `create_board_charuco.cpp` inside the `modules/aruco/samples/`. diff --git a/modules/aruco/tutorials/charuco_detection/images/charucoboard.jpg b/modules/aruco/tutorials/charuco_detection/images/charucoboard.png similarity index 100% rename from modules/aruco/tutorials/charuco_detection/images/charucoboard.jpg rename to modules/aruco/tutorials/charuco_detection/images/charucoboard.png diff --git a/modules/bioinspired/tutorials/retina_illusion/images/checkershadow_illusion4med_proof.jpg b/modules/bioinspired/tutorials/retina_illusion/images/checkershadow_illusion4med_proof.png similarity index 100% rename from modules/bioinspired/tutorials/retina_illusion/images/checkershadow_illusion4med_proof.jpg rename to modules/bioinspired/tutorials/retina_illusion/images/checkershadow_illusion4med_proof.png diff --git a/modules/bioinspired/tutorials/retina_illusion/retina_illusion.markdown b/modules/bioinspired/tutorials/retina_illusion/retina_illusion.markdown index 847321e5463..024879e0209 100644 --- a/modules/bioinspired/tutorials/retina_illusion/retina_illusion.markdown +++ b/modules/bioinspired/tutorials/retina_illusion/retina_illusion.markdown @@ -30,7 +30,7 @@ values of the two squares with the picker tool. In this image I've cropped a little piece of the A and B squares and I've put them side-by-side. It should be quite evident they have the same luminance. -![Adelson checkerboard proof](images/checkershadow_illusion4med_proof.jpg) +![Adelson checkerboard proof](images/checkershadow_illusion4med_proof.png) It's worth to know that this illusion works because the checkerboard image, as you may see it on your laptop, casts on your retina with dimensions that cause the retina local adaptation to take @@ -184,4 +184,4 @@ opportunity of writing this tutorial, and for reviewing it. **Edward Adelson** - for allowing me to freely use his checkerboard image. -**Antonio Cuni** - for reviewing this tutorial and for writing the Python code. \ No newline at end of file +**Antonio Cuni** - for reviewing this tutorial and for writing the Python code. From 3951e0338ae06d1846ca9acbd1cc591be8e7c18e Mon Sep 17 00:00:00 2001 From: Kumataro Date: Sun, 26 Jun 2022 00:39:02 +0900 Subject: [PATCH 24/45] Merge pull request #3288 from Kumataro:4.x-issue_contrib3276 freetype: support 8UC1/8UC4 image * freetype: add parameter test * freetype: support CV_8UC3 and CV_8C4 * freetype:fix for gcc5.4 * freetype: delete newline * freetype:fix for python binding * freetype: Revert 3rd argument type of loadFontData() to use int. - Revert 3rd argument type of loadFontData() to use "int". - Use cvtest::debugLevel instead to OUTPUT_FILE define. - Change type of idx_range list and ctol_range list to use "int". - fix typo. --- modules/freetype/include/opencv2/freetype.hpp | 8 +- modules/freetype/src/freetype.cpp | 194 +++++++++++--- modules/freetype/test/test_basic.cpp | 250 ++++++++++++++++++ modules/freetype/test/test_main.cpp | 6 + modules/freetype/test/test_precomp.hpp | 10 + modules/freetype/test/test_putText.cpp | 201 ++++++++++++++ 6 files changed, 633 insertions(+), 36 deletions(-) create mode 100644 modules/freetype/test/test_basic.cpp create mode 100644 modules/freetype/test/test_main.cpp create mode 100644 modules/freetype/test/test_precomp.hpp create mode 100644 modules/freetype/test/test_putText.cpp diff --git a/modules/freetype/include/opencv2/freetype.hpp b/modules/freetype/include/opencv2/freetype.hpp index 1c0a6e5e224..e62d058a876 100644 --- a/modules/freetype/include/opencv2/freetype.hpp +++ b/modules/freetype/include/opencv2/freetype.hpp @@ -79,10 +79,10 @@ class CV_EXPORTS_W FreeType2 : public Algorithm The function loadFontData loads font data. @param fontFileName FontFile Name -@param id face_index to select a font faces in a single file. +@param idx face_index to select a font faces in a single file. */ - CV_WRAP virtual void loadFontData(String fontFileName, int id) = 0; + CV_WRAP virtual void loadFontData(String fontFileName, int idx) = 0; /** @brief Set Split Number from Bezier-curve to line @@ -99,7 +99,7 @@ If you want to draw small glyph, small is better. The function putText renders the specified text string in the image. Symbols that cannot be rendered using the specified font are replaced by "Tofu" or non-drawn. -@param img Image. (Only 8UC3 image is supported.) +@param img Image. (Only 8UC1/8UC3/8UC4 2D mat is supported.) @param text Text string to be drawn. @param org Bottom-left/Top-left corner of the text string in the image. @param fontHeight Drawing font size by pixel unit. @@ -123,7 +123,7 @@ That is, the following code renders some text, the tight box surrounding it, and String text = "Funny text inside the box"; int fontHeight = 60; int thickness = -1; - int linestyle = 8; + int linestyle = LINE_8; Mat img(600, 800, CV_8UC3, Scalar::all(0)); diff --git a/modules/freetype/src/freetype.cpp b/modules/freetype/src/freetype.cpp index 968c8d07d88..b8e605e5104 100644 --- a/modules/freetype/src/freetype.cpp +++ b/modules/freetype/src/freetype.cpp @@ -66,7 +66,7 @@ class CV_EXPORTS_W FreeType2Impl CV_FINAL : public FreeType2 public: FreeType2Impl(); ~FreeType2Impl(); - void loadFontData(String fontFileName, int id) CV_OVERRIDE; + void loadFontData(String fontFileName, int idx) CV_OVERRIDE; void setSplitNumber( int num ) CV_OVERRIDE; void putText( InputOutputArray img, const String& text, Point org, @@ -92,17 +92,28 @@ class CV_EXPORTS_W FreeType2Impl CV_FINAL : public FreeType2 int fontHeight, Scalar color, int thickness, int line_type, bool bottomLeftOrigin ); + void putTextBitmapBlend( InputOutputArray img, const String& text, Point org, int fontHeight, Scalar color, int thickness, int line_type, bool bottomLeftOrigin ); + void putTextOutline( InputOutputArray img, const String& text, Point org, int fontHeight, Scalar color, int thickness, int line_type, bool bottomLeftOrigin ); + typedef void (putPixel_mono_fn)( Mat& _dst, const int _py, const int _px, const uint8_t *_col); + putPixel_mono_fn putPixel_8UC1_mono; + putPixel_mono_fn putPixel_8UC3_mono; + putPixel_mono_fn putPixel_8UC4_mono; + + typedef void (putPixel_blend_fn)( Mat& _dst, const int _py, const int _px, const uint8_t *_col, const uint8_t alpha); + putPixel_blend_fn putPixel_8UC1_blend; + putPixel_blend_fn putPixel_8UC3_blend; + putPixel_blend_fn putPixel_8UC4_blend; static int mvFn( const FT_Vector *to, void * user); static int lnFn( const FT_Vector *to, void * user); @@ -158,7 +169,8 @@ FreeType2Impl::FreeType2Impl() FreeType2Impl::~FreeType2Impl() { - if( mIsFaceAvailable == true ){ + if( mIsFaceAvailable == true ) + { hb_font_destroy (mHb_font); CV_Assert(!FT_Done_Face(mFace)); mIsFaceAvailable = false; @@ -168,12 +180,22 @@ FreeType2Impl::~FreeType2Impl() void FreeType2Impl::loadFontData(String fontFileName, int idx) { - if( mIsFaceAvailable == true ){ + CV_Assert( idx >= 0 ); + if( mIsFaceAvailable == true ) + { hb_font_destroy (mHb_font); CV_Assert(!FT_Done_Face(mFace)); } - CV_Assert(!FT_New_Face( mLibrary, fontFileName.c_str(), idx, &(mFace) ) ); + + mIsFaceAvailable = false; + CV_Assert( !FT_New_Face( mLibrary, fontFileName.c_str(), static_cast(idx), &(mFace) ) ); + mHb_font = hb_ft_font_create (mFace, NULL); + if ( mHb_font == NULL ) + { + CV_Assert(!FT_Done_Face(mFace)); + return; + } CV_Assert( mHb_font != NULL ); mIsFaceAvailable = true; } @@ -189,16 +211,17 @@ void FreeType2Impl::putText( int _thickness, int _line_type, bool _bottomLeftOrigin ) { - CV_Assert( mIsFaceAvailable == true ); - CV_Assert( ( _img.empty() == false ) && - ( _img.isMat() == true ) && - ( _img.depth() == CV_8U ) && - ( _img.dims() == 2 ) && - ( _img.channels() == 3 ) ); - CV_Assert( ( _line_type == CV_AA) || - ( _line_type == 4 ) || - ( _line_type == 8 ) ); - CV_Assert( _fontHeight >= 0 ); + CV_Assert ( mIsFaceAvailable == true ); + CV_Assert ( _img.empty() == false ); + CV_Assert ( _img.isMat() == true ); + CV_Assert ( _img.dims() == 2 ); + CV_Assert( ( _img.type() == CV_8UC1 ) || + ( _img.type() == CV_8UC3 ) || + ( _img.type() == CV_8UC4 ) ); + CV_Assert( ( _line_type == LINE_AA) || + ( _line_type == LINE_4 ) || + ( _line_type == LINE_8 ) ); + CV_Assert ( _fontHeight >= 0 ); if ( _text.empty() ) { @@ -209,15 +232,11 @@ void FreeType2Impl::putText( return; } - if( _line_type == CV_AA && _img.depth() != CV_8U ){ - _line_type = 8; - } - CV_Assert(!FT_Set_Pixel_Sizes( mFace, _fontHeight, _fontHeight )); if( _thickness < 0 ) // CV_FILLED { - if ( _line_type == CV_AA ) { + if ( _line_type == LINE_AA ) { putTextBitmapBlend( _img, _text, _org, _fontHeight, _color, _thickness, _line_type, _bottomLeftOrigin ); }else{ @@ -292,13 +311,36 @@ void FreeType2Impl::putTextOutline( hb_buffer_destroy (hb_buffer); } +void FreeType2Impl::putPixel_8UC1_mono( Mat& _dst, const int _py, const int _px, const uint8_t *_col) +{ + uint8_t* ptr = _dst.ptr( _py, _px ); + (*ptr) = _col[0]; +} + +void FreeType2Impl::putPixel_8UC3_mono ( Mat& _dst, const int _py, const int _px, const uint8_t *_col) +{ + cv::Vec3b* ptr = _dst.ptr( _py, _px ); + (*ptr)[0] = _col[0]; + (*ptr)[1] = _col[1]; + (*ptr)[2] = _col[2]; +} + +void FreeType2Impl::putPixel_8UC4_mono( Mat& _dst, const int _py, const int _px, const uint8_t *_col) +{ + cv::Vec4b* ptr = _dst.ptr( _py, _px ); + (*ptr)[0] = _col[0]; + (*ptr)[1] = _col[1]; + (*ptr)[2] = _col[2]; + (*ptr)[3] = _col[3]; +} + void FreeType2Impl::putTextBitmapMono( InputOutputArray _img, const String& _text, Point _org, int _fontHeight, Scalar _color, int _thickness, int _line_type, bool _bottomLeftOrigin ) { CV_Assert( _thickness < 0 ); - CV_Assert( _line_type == 4 || _line_type == 8); + CV_Assert( _line_type == LINE_4 || _line_type == LINE_8); Mat dst = _img.getMat(); hb_buffer_t *hb_buffer = hb_buffer_create (); @@ -318,6 +360,17 @@ void FreeType2Impl::putTextBitmapMono( _org.y -= _fontHeight; } + const uint8_t _colorUC8n[4] = { + static_cast(_color[0]), + static_cast(_color[1]), + static_cast(_color[2]), + static_cast(_color[3]) }; + + void (cv::freetype::FreeType2Impl::*putPixel)( Mat&, const int, const int, const uint8_t*) = + (_img.type() == CV_8UC4)?(&FreeType2Impl::putPixel_8UC4_mono): + (_img.type() == CV_8UC3)?(&FreeType2Impl::putPixel_8UC3_mono): + (&FreeType2Impl::putPixel_8UC1_mono); + for( unsigned int i = 0 ; i < textLen ; i ++ ){ CV_Assert( !FT_Load_Glyph(mFace, info[i].codepoint, 0 ) ); CV_Assert( !FT_Render_Glyph( mFace->glyph, FT_RENDER_MODE_MONO ) ); @@ -351,10 +404,7 @@ void FreeType2Impl::putTextBitmapMono( } if ( ( (cl >> bit) & 0x01 ) == 1 ) { - cv::Vec3b* ptr = dst.ptr( gPos.y + row, gPos.x + col * 8 + (7 - bit) ); - (*ptr)[0] = _color[0]; - (*ptr)[1] = _color[1]; - (*ptr)[2] = _color[2]; + (this->*putPixel)( dst, gPos.y + row, gPos.x + col * 8 + (7 - bit), _colorUC8n ); } } } @@ -366,6 +416,80 @@ void FreeType2Impl::putTextBitmapMono( hb_buffer_destroy (hb_buffer); } +// Alpha composite algorithm is porting from imgproc. +// See https://github.com/opencv/opencv/blob/4.6.0/modules/imgproc/src/drawing.cpp +// static void LineAA( Mat& img, Point2l pt1, Point2l pt2, const void* color ) +// ICV_PUT_POINT Macro. + +void FreeType2Impl::putPixel_8UC1_blend( Mat& _dst, const int _py, const int _px, const uint8_t *_col, const uint8_t alpha) +{ + const int a = alpha; + const int cb = _col[0]; + uint8_t* tptr = _dst.ptr( _py, _px ); + + int _cb = static_cast(tptr[0]); + _cb += ((cb - _cb)*a + 127)>> 8; + _cb += ((cb - _cb)*a + 127)>> 8; + + tptr[0] = static_cast(_cb); +} + +void FreeType2Impl::putPixel_8UC3_blend ( Mat& _dst, const int _py, const int _px, const uint8_t *_col, const uint8_t alpha) +{ + const int a = alpha; + const int cb = _col[0]; + const int cg = _col[1]; + const int cr = _col[2]; + uint8_t* tptr = _dst.ptr( _py, _px ); + + int _cb = static_cast(tptr[0]); + _cb += ((cb - _cb)*a + 127)>> 8; + _cb += ((cb - _cb)*a + 127)>> 8; + + int _cg = static_cast(tptr[1]); + _cg += ((cg - _cg)*a + 127)>> 8; + _cg += ((cg - _cg)*a + 127)>> 8; + + int _cr = static_cast(tptr[2]); + _cr += ((cr - _cr)*a + 127)>> 8; + _cr += ((cr - _cr)*a + 127)>> 8; + + tptr[0] = static_cast(_cb); + tptr[1] = static_cast(_cg); + tptr[2] = static_cast(_cr); +} + +void FreeType2Impl::putPixel_8UC4_blend( Mat& _dst, const int _py, const int _px, const uint8_t *_col, const uint8_t alpha) +{ + const uint8_t a = alpha; + const int cb = _col[0]; + const int cg = _col[1]; + const int cr = _col[2]; + const int ca = _col[3]; + uint8_t* tptr = _dst.ptr( _py, _px ); + + int _cb = static_cast(tptr[0]); + _cb += ((cb - _cb)*a + 127)>> 8; + _cb += ((cb - _cb)*a + 127)>> 8; + + int _cg = static_cast(tptr[1]); + _cg += ((cg - _cg)*a + 127)>> 8; + _cg += ((cg - _cg)*a + 127)>> 8; + + int _cr = static_cast(tptr[2]); + _cr += ((cr - _cr)*a + 127)>> 8; + _cr += ((cr - _cr)*a + 127)>> 8; + + int _ca = static_cast(tptr[3]); + _ca += ((ca - _ca)*a + 127)>> 8; + _ca += ((ca - _ca)*a + 127)>> 8; + + tptr[0] = static_cast(_cb); + tptr[1] = static_cast(_cg); + tptr[2] = static_cast(_cr); + tptr[3] = static_cast(_ca); +} + void FreeType2Impl::putTextBitmapBlend( InputOutputArray _img, const String& _text, Point _org, int _fontHeight, Scalar _color, @@ -373,7 +497,7 @@ void FreeType2Impl::putTextBitmapBlend( { CV_Assert( _thickness < 0 ); - CV_Assert( _line_type == 16 ); + CV_Assert( _line_type == LINE_AA ); Mat dst = _img.getMat(); hb_buffer_t *hb_buffer = hb_buffer_create (); @@ -393,6 +517,17 @@ void FreeType2Impl::putTextBitmapBlend( _org.y -= _fontHeight; } + const uint8_t _colorUC8n[4] = { + static_cast(_color[0]), + static_cast(_color[1]), + static_cast(_color[2]), + static_cast(_color[3]) }; + + void (cv::freetype::FreeType2Impl::*putPixel)( Mat&, const int, const int, const uint8_t*, const uint8_t) = + (_img.type() == CV_8UC4)?(&FreeType2Impl::putPixel_8UC4_blend): + (_img.type() == CV_8UC3)?(&FreeType2Impl::putPixel_8UC3_blend): + (&FreeType2Impl::putPixel_8UC1_blend); + for( unsigned int i = 0 ; i < textLen ; i ++ ){ CV_Assert( !FT_Load_Glyph(mFace, info[i].codepoint, 0 ) ); CV_Assert( !FT_Render_Glyph( mFace->glyph, FT_RENDER_MODE_NORMAL ) ); @@ -411,7 +546,7 @@ void FreeType2Impl::putTextBitmapBlend( } for (int col = 0; col < bmp->pitch; col ++) { - int cl = bmp->buffer[ row * bmp->pitch + col ]; + uint8_t cl = bmp->buffer[ row * bmp->pitch + col ]; if ( cl == 0 ) { continue; } @@ -424,12 +559,7 @@ void FreeType2Impl::putTextBitmapBlend( break; } - cv::Vec3b* ptr = dst.ptr( gPos.y + row , gPos.x + col); - double blendAlpha = (double ) cl / 255.0; - - (*ptr)[0] = (double) _color[0] * blendAlpha + (*ptr)[0] * (1.0 - blendAlpha ); - (*ptr)[1] = (double) _color[1] * blendAlpha + (*ptr)[1] * (1.0 - blendAlpha ); - (*ptr)[2] = (double) _color[2] * blendAlpha + (*ptr)[2] * (1.0 - blendAlpha ); + (this->*putPixel)( dst, gPos.y + row, gPos.x + col, _colorUC8n, cl ); } } _org.x += ( mFace->glyph->advance.x ) >> 6; diff --git a/modules/freetype/test/test_basic.cpp b/modules/freetype/test/test_basic.cpp new file mode 100644 index 00000000000..4c4e0c3d7ce --- /dev/null +++ b/modules/freetype/test/test_basic.cpp @@ -0,0 +1,250 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +#include "test_precomp.hpp" + +namespace opencv_test { namespace { + +struct MattypeParams +{ + string title; + int mattype; + bool expect_success; +}; + +::std::ostream& operator<<(::std::ostream& os, const MattypeParams& prm) { + return os << prm.title; +} + +const MattypeParams mattype_list[] = +{ + { "CV_8UC1", CV_8UC1, true}, { "CV_8UC2", CV_8UC2, false}, + { "CV_8UC3", CV_8UC3, true}, { "CV_8UC4", CV_8UC4, true}, + + { "CV_8SC1", CV_8SC1, false}, { "CV_8SC2", CV_8SC2, false}, + { "CV_8SC3", CV_8SC3, false}, { "CV_8SC4", CV_8SC4, false}, + { "CV_16UC1", CV_16UC1, false}, { "CV_16UC2", CV_16UC2, false}, + { "CV_16UC3", CV_16UC3, false}, { "CV_16UC4", CV_16UC4, false}, + { "CV_16SC1", CV_16SC1, false}, { "CV_16SC2", CV_16SC2, false}, + { "CV_16SC3", CV_16SC3, false}, { "CV_16SC4", CV_16SC4, false}, + { "CV_32SC1", CV_32SC1, false}, { "CV_32SC2", CV_32SC2, false}, + { "CV_32SC3", CV_32SC3, false}, { "CV_32SC4", CV_32SC4, false}, + { "CV_32FC1", CV_32FC1, false}, { "CV_32FC2", CV_32FC2, false}, + { "CV_32FC3", CV_32FC3, false}, { "CV_32FC4", CV_32FC4, false}, + { "CV_64FC1", CV_64FC1, false}, { "CV_64FC2", CV_64FC2, false}, + { "CV_64FC3", CV_64FC3, false}, { "CV_64FC4", CV_64FC4, false}, + { "CV_16FC1", CV_16FC1, false}, { "CV_16FC2", CV_16FC2, false}, + { "CV_16FC3", CV_16FC3, false}, { "CV_16FC4", CV_16FC4, false}, +}; + +/****************** + * Basically usage + *****************/ + +TEST(Freetype_Basic, success ) +{ + const string root = cvtest::TS::ptr()->get_data_path(); + const string fontdata = root + "freetype/mplus/Mplus1-Regular.ttf"; + + cv::Ptr ft2; + EXPECT_NO_THROW( ft2 = cv::freetype::createFreeType2() ); + EXPECT_NO_THROW( ft2->loadFontData( fontdata, 0 ) ); + + Mat dst(600,600, CV_8UC3, Scalar::all(255) ); + Scalar col(128,64,255,192); + EXPECT_NO_THROW( ft2->putText(dst, "Basic,success", Point( 0, 50), 50, col, -1, LINE_AA, true ) ); +} + +/****************** + * loadFontData() + *****************/ + +TEST(Freetype_loadFontData, nonexist_file) +{ + const string root = cvtest::TS::ptr()->get_data_path(); + const string fontdata = root + "UNEXITSTFONT"; /* NON EXISTS FONT DATA */ + + cv::Ptr ft2; + EXPECT_NO_THROW( ft2 = cv::freetype::createFreeType2() ); + EXPECT_THROW( ft2->loadFontData( fontdata, 0 ), cv::Exception ); + Mat dst(600,600, CV_8UC3, Scalar::all(255) ); + Scalar col(128,64,255,192); + EXPECT_THROW( ft2->putText(dst, "nonexist_file", Point( 0, 50), 50, col, -1, LINE_AA, true ), cv::Exception ); +} + +TEST(Freetype_loadFontData, forget_calling) +{ + cv::Ptr ft2; + EXPECT_NO_THROW( ft2 = cv::freetype::createFreeType2() ); + + const string root = cvtest::TS::ptr()->get_data_path(); + const string fontdata = root + "freetype/mplus/Mplus1-Regular.ttf"; +// EXPECT_NO_THROW( ft2->loadFontData( fontdata, 0 ) ); + + Mat dst(600,600, CV_8UC3, Scalar::all(255) ); + + Scalar col(128,64,255,192); + EXPECT_THROW( ft2->putText(dst, "forget_calling", Point( 0, 50), 50, col, -1, LINE_AA, true ), cv::Exception ); +} + +TEST(Freetype_loadFontData, call_multiple) +{ + cv::Ptr ft2; + EXPECT_NO_THROW( ft2 = cv::freetype::createFreeType2() ); + + const string root = cvtest::TS::ptr()->get_data_path(); + const string fontdata = root + "freetype/mplus/Mplus1-Regular.ttf"; + + for( int i = 0 ; i < 100 ; i ++ ) + { + EXPECT_NO_THROW( ft2->loadFontData( fontdata, 0 ) ); + } + + Mat dst(600,600, CV_8UC3, Scalar::all(255) ); + Scalar col(128,64,255,192); + EXPECT_NO_THROW( ft2->putText(dst, "call_mutilple", Point( 0, 50), 50, col, -1, LINE_AA, true ) ); +} + +typedef testing::TestWithParam idx_range; + +TEST_P(idx_range, failed ) +{ + cv::Ptr ft2; + EXPECT_NO_THROW( ft2 = cv::freetype::createFreeType2() ); + + const string root = cvtest::TS::ptr()->get_data_path(); + const string fontdata = root + "freetype/mplus/Mplus1-Regular.ttf"; + EXPECT_THROW( ft2->loadFontData( fontdata, GetParam() ), cv::Exception ); +} + +const int idx_failed_list[] = +{ + INT_MIN, + INT_MIN + 1, + -1, + 1, + 2, + INT_MAX - 1, + INT_MAX +}; + +INSTANTIATE_TEST_CASE_P(Freetype_loadFontData, idx_range, + testing::ValuesIn(idx_failed_list)); + +/****************** + * setSplitNumber() + *****************/ + +typedef testing::TestWithParam ctol_range; + +TEST_P(ctol_range, success) +{ + cv::Ptr ft2; + EXPECT_NO_THROW( ft2 = cv::freetype::createFreeType2() ); + + const string root = cvtest::TS::ptr()->get_data_path(); + const string fontdata = root + "freetype/mplus/Mplus1-Regular.ttf"; + EXPECT_NO_THROW( ft2->loadFontData( fontdata, 0 ) ); + EXPECT_NO_THROW( ft2->setSplitNumber(GetParam()) ); + + Mat dst(600,600, CV_8UC3, Scalar::all(255) ); + Scalar col(128,64,255,192); + EXPECT_NO_THROW( ft2->putText(dst, "CtoL", Point( 0, 50), 50, col, 1, LINE_4, true ) ); + EXPECT_NO_THROW( ft2->putText(dst, "LINE_4: oOpPqQ", Point( 40, 100), 50, col, 1, LINE_4, true ) ); + EXPECT_NO_THROW( ft2->putText(dst, "LINE_8: oOpPqQ", Point( 40, 150), 50, col, 1, LINE_8, true ) ); + EXPECT_NO_THROW( ft2->putText(dst, "LINE_AA:oOpPqQ", Point( 40, 150), 50, col, 1, LINE_AA, true ) ); +} + +const int ctol_list[] = +{ + 1, + 8, + 16, + 32, + 64, + 128, + // INT_MAX -1, // Hang-up + // INT_MAX // Hang-up +}; + +INSTANTIATE_TEST_CASE_P(Freetype_setSplitNumber, ctol_range, + testing::ValuesIn(ctol_list)); + + +/******************** + * putText()::common + *******************/ + +TEST(Freetype_putText, invalid_img ) +{ + const string root = cvtest::TS::ptr()->get_data_path(); + const string fontdata = root + "freetype/mplus/Mplus1-Regular.ttf"; + + cv::Ptr ft2; + EXPECT_NO_THROW( ft2 = cv::freetype::createFreeType2() ); + EXPECT_NO_THROW( ft2->loadFontData( fontdata, 0 ) ); + + Scalar col(128,64,255,192); + { /* empty mat */ + Mat dst; + EXPECT_THROW( ft2->putText(dst, "Invalid_img(empty Mat)", Point( 0, 50), 50, col, -1, LINE_AA, true ), cv::Exception ); + } + { /* not mat(scalar) */ + Scalar dst; + EXPECT_THROW( ft2->putText(dst, "Invalid_img(Scalar)", Point( 0, 50), 50, col, -1, LINE_AA, true ), cv::Exception ); + } +} + +typedef testing::TestWithParam MatType_Test; + +TEST_P(MatType_Test, default) +{ + const string root = cvtest::TS::ptr()->get_data_path(); + const string fontdata = root + "freetype/mplus/Mplus1-Regular.ttf"; + + const MattypeParams params = static_cast(GetParam()); + const string title = params.title; + const int mattype = params.mattype; + const bool expect_success = params.expect_success; + + cv::Ptr ft2; + EXPECT_NO_THROW( ft2 = cv::freetype::createFreeType2() ); + EXPECT_NO_THROW( ft2->loadFontData( fontdata, 0 ) ); + + Mat dst(600,600, mattype, Scalar::all(255) ); + + Scalar col(128,64,255,192); + + if ( expect_success == false ) + { + EXPECT_THROW( ft2->putText(dst, title, Point( 0, 50), 50, col, -1, LINE_AA, true ), cv::Exception ); + return; + } + + EXPECT_NO_THROW( ft2->putText(dst, title, Point( 0, 50), 50, col, -1, LINE_AA, true ) ); + EXPECT_NO_THROW( ft2->putText(dst, "LINE_4 FILL(mono)", Point(40, 100), 50, col, -1, LINE_4, true ) ); + EXPECT_NO_THROW( ft2->putText(dst, "LINE_8 FILL(mono)", Point(40, 150), 50, col, -1, LINE_8, true ) ); + EXPECT_NO_THROW( ft2->putText(dst, "LINE_AA FILL(blend)",Point(40, 200), 50, col, -1, LINE_AA, true ) ); + EXPECT_NO_THROW( ft2->putText(dst, "LINE_4 OUTLINE(1)", Point(40, 250), 50, col, 1, LINE_4, true ) ); + EXPECT_NO_THROW( ft2->putText(dst, "LINE_8 OUTLINE(1)", Point(40, 300), 50, col, 1, LINE_8, true ) ); + EXPECT_NO_THROW( ft2->putText(dst, "LINE_AA OUTLINE(1)", Point(40, 350), 50, col, 1, LINE_AA, true ) ); + EXPECT_NO_THROW( ft2->putText(dst, "LINE_4 OUTLINE(5)", Point(40, 400), 50, col, 5, LINE_4, true ) ); + EXPECT_NO_THROW( ft2->putText(dst, "LINE_8 OUTLINE(5)", Point(40, 450), 50, col, 5, LINE_8, true ) ); + EXPECT_NO_THROW( ft2->putText(dst, "LINE_AA OUTLINE(5)", Point(40, 500), 50, col, 5, LINE_AA, true ) ); + putText(dst, "LINE_4 putText(th=1)" , Point( 40,550), FONT_HERSHEY_SIMPLEX, 0.5, col, 1, LINE_4); + putText(dst, "LINE_8 putText(th=1)" , Point( 40,565), FONT_HERSHEY_SIMPLEX, 0.5, col, 1, LINE_8); + putText(dst, "LINE_AA putText(th=1)", Point( 40,580), FONT_HERSHEY_SIMPLEX, 0.5, col, 1, LINE_AA); + putText(dst, "LINE_4 putText(th=2)" , Point( 240,550),FONT_HERSHEY_SIMPLEX, 0.5, col, 2, LINE_4); + putText(dst, "LINE_8 putText(th=2)" , Point( 240,565),FONT_HERSHEY_SIMPLEX, 0.5, col, 2, LINE_8); + putText(dst, "LINE_AA putText(th=2)", Point( 240,580),FONT_HERSHEY_SIMPLEX, 0.5, col, 2, LINE_AA); + + if (cvtest::debugLevel > 0 ) + { + imwrite( cv::format("%s-MatType.png", title.c_str()), dst ); + } +} + +INSTANTIATE_TEST_CASE_P(Freetype_putText, MatType_Test, + testing::ValuesIn(mattype_list)); + +}} // namespace diff --git a/modules/freetype/test/test_main.cpp b/modules/freetype/test/test_main.cpp new file mode 100644 index 00000000000..0e51ddfd050 --- /dev/null +++ b/modules/freetype/test/test_main.cpp @@ -0,0 +1,6 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +#include "test_precomp.hpp" + +CV_TEST_MAIN("cv") diff --git a/modules/freetype/test/test_precomp.hpp b/modules/freetype/test/test_precomp.hpp new file mode 100644 index 00000000000..273705086bf --- /dev/null +++ b/modules/freetype/test/test_precomp.hpp @@ -0,0 +1,10 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +#ifndef __OPENCV_TEST_PRECOMP_HPP__ +#define __OPENCV_TEST_PRECOMP_HPP__ + +#include "opencv2/freetype.hpp" +#include "opencv2/ts.hpp" + +#endif diff --git a/modules/freetype/test/test_putText.cpp b/modules/freetype/test/test_putText.cpp new file mode 100644 index 00000000000..55277e9b280 --- /dev/null +++ b/modules/freetype/test/test_putText.cpp @@ -0,0 +1,201 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +#include "test_precomp.hpp" + +namespace opencv_test { namespace { + +struct DrawingParams +{ + string title; + int mattype; + string fontname; +}; + +::std::ostream& operator<<(::std::ostream& os, const DrawingParams& prm) { + return os << prm.title; +} + +const DrawingParams drawing_list[] = +{ + { "CV_8UC1-Mplus1-Regular", CV_8UC1, "freetype/mplus/Mplus1-Regular.ttf"}, + { "CV_8UC3-Mplus1-Regular", CV_8UC3, "freetype/mplus/Mplus1-Regular.ttf"}, + { "CV_8UC4-Mplus1-Regular", CV_8UC4, "freetype/mplus/Mplus1-Regular.ttf"}, +}; + +/******************** + * putText()::boundry + *******************/ +typedef testing::TestWithParam BoundaryTest; + +TEST_P(BoundaryTest, default) +{ + const DrawingParams params = GetParam(); + const string title = params.title; + const int mattype = params.mattype; + const string fontname = params.fontname; + + const string root = cvtest::TS::ptr()->get_data_path(); + const string fontdata = root + fontname; + + cv::Ptr ft2; + EXPECT_NO_THROW( ft2 = cv::freetype::createFreeType2() ); + EXPECT_NO_THROW( ft2->loadFontData( fontdata, 0 ) ); + + Mat dst(600,600, mattype, Scalar::all(255) ); + + Scalar col(128,64,255,192); + EXPECT_NO_THROW( ft2->putText(dst, title, Point( 100, 200), 20, col, -1, LINE_AA, true ) ); + + const int textHeight = 30; + for ( int iy = -50 ; iy <= +50 ; iy++ ) + { + Point textOrg( 50, iy ); + const string text = "top boundary"; + EXPECT_NO_THROW( ft2->putText(dst, text, textOrg, textHeight, col, -1, LINE_4, true ) ); + EXPECT_NO_THROW( ft2->putText(dst, text, textOrg, textHeight, col, -1, LINE_8, true ) ); + EXPECT_NO_THROW( ft2->putText(dst, text, textOrg, textHeight, col, -1, LINE_AA, true ) ); + } + + for ( int iy = -50 ; iy <= +50 ; iy++ ) + { + Point textOrg( 400, dst.cols + iy ); + const string text = "bottom boundary"; + EXPECT_NO_THROW( ft2->putText(dst, text, textOrg, textHeight, col, -1, LINE_4, true ) ); + EXPECT_NO_THROW( ft2->putText(dst, text, textOrg, textHeight, col, -1, LINE_8, true ) ); + EXPECT_NO_THROW( ft2->putText(dst, text, textOrg, textHeight, col, -1, LINE_AA, true ) ); + } + + for ( int ix = -50 ; ix <= +50 ; ix++ ) + { + Point textOrg( ix, 100 ); + const string text = "left boundary"; + EXPECT_NO_THROW( ft2->putText(dst, text, textOrg, textHeight, col, -1, LINE_4, true ) ); + EXPECT_NO_THROW( ft2->putText(dst, text, textOrg, textHeight, col, -1, LINE_8, true ) ); + EXPECT_NO_THROW( ft2->putText(dst, text, textOrg, textHeight, col, -1, LINE_AA, true ) ); + } + + for ( int ix = -50 ; ix <= +50 ; ix++ ) + { + Point textOrg( dst.rows + ix, 500 ); + const string text = "bottom boundary"; + EXPECT_NO_THROW( ft2->putText(dst, text, textOrg, textHeight, col, -1, LINE_4, true ) ); + EXPECT_NO_THROW( ft2->putText(dst, text, textOrg, textHeight, col, -1, LINE_8, true ) ); + EXPECT_NO_THROW( ft2->putText(dst, text, textOrg, textHeight, col, -1, LINE_AA, true ) ); + } + + if (cvtest::debugLevel > 0 ) + { + imwrite( cv::format("%s-boundary.png", title.c_str()), dst ); + } +} + +INSTANTIATE_TEST_CASE_P(Freetype_putText, BoundaryTest, + testing::ValuesIn(drawing_list)) ; + +/********************* + * putText()::Ligature + *********************/ + +// See https://github.com/opencv/opencv_contrib/issues/2627 + +static Mat clipRoiAs8UC1( Mat &dst, Rect roi_rect ) +{ + Mat roi = Mat(dst, roi_rect).clone(); + switch( roi.type() ){ + case CV_8UC4: cvtColor(roi,roi,COLOR_BGRA2GRAY); break; + case CV_8UC3: cvtColor(roi,roi,COLOR_BGR2GRAY); break; + case CV_8UC1: default: break; // Do nothing + } + return roi; +} + +typedef testing::TestWithParam LigatureTest; +TEST_P(LigatureTest, regression2627) +{ + const DrawingParams params = GetParam(); + const string title = params.title; + const int mattype = params.mattype; + const string fontname = params.fontname; + + const string root = cvtest::TS::ptr()->get_data_path(); + const string fontdata = root + fontname; + + cv::Ptr ft2; + EXPECT_NO_THROW( ft2 = cv::freetype::createFreeType2() ); + EXPECT_NO_THROW( ft2->loadFontData( fontdata, 0 ) ); + + Mat dst(600,600, mattype, Scalar(0,0,0,255) ); + Scalar col(255,255,255,255); + EXPECT_NO_THROW( ft2->putText(dst, title, Point( 0, 50), 30, col, -1, LINE_AA, true ) ); + + vector texts = { + "ffi", // ff will be combined to single glyph. + "fs", + "fi", + "ff", + "ae", + "tz", + "oe", + "\xE3\x81\xAF", // HA ( HIRAGANA ) + "\xE3\x81\xAF\xE3\x82\x99", // BA ( HA + VOICED SOUND MARK ) + "\xE3\x81\xAF\xE3\x82\x9A", // PA ( HA + SEMI-VOICED SOUND MARK ) + "\xE3\x83\x8F", // HA ( KATAKANA ) + "\xE3\x83\x8F\xE3\x82\x99", // BA ( HA + VOICED SOUND MARK ) + "\xE3\x83\x8F\xE3\x82\x9A", // PA ( HA + SEMI-VOICED SOUND MARK ) + }; + + const int fontHeight = 20; + const int margin = fontHeight / 2; // for current glyph right edgeto next glyph left edge + + const int start_x = 40; + const int start_y = 100; + const int skip_x = 100; + const int skip_y = 25; + + int tx = start_x; + int ty = start_y; + + for (auto it = texts.begin(); it != texts.end(); it++ ) + { + if ( ty + fontHeight * 3 > dst.rows ) { + ty = start_y; + tx = tx + skip_x; + } + + EXPECT_NO_THROW( ft2->putText(dst, *it, Point(tx,ty), fontHeight, col, -1, LINE_4, true ) ); + + { // Check for next glyph area. + const Rect roi_rect = Rect( tx + fontHeight + margin, ty - fontHeight, fontHeight, fontHeight ); + const Mat roi = clipRoiAs8UC1(dst, roi_rect); + EXPECT_EQ(0, countNonZero(roi) ); + } + ty += skip_y; + + EXPECT_NO_THROW( ft2->putText(dst, *it, Point(tx,ty), fontHeight, col, -1, LINE_8, true ) ); + { // Check for next glyph area. + const Rect roi_rect = Rect( tx + fontHeight + margin, ty - fontHeight, fontHeight, fontHeight ); + const Mat roi = clipRoiAs8UC1(dst, roi_rect); + EXPECT_EQ(0, countNonZero(roi) ); + } + ty += skip_y; + + EXPECT_NO_THROW( ft2->putText(dst, *it, Point(tx,ty), fontHeight, col, 1, LINE_AA, true ) ); + { // Check for next glyph area. + const Rect roi_rect = Rect( tx + fontHeight + margin, ty - fontHeight, fontHeight, fontHeight ); + const Mat roi = clipRoiAs8UC1(dst, roi_rect); + EXPECT_EQ(0, countNonZero(roi) ); + } + ty += skip_y; + } + + if (cvtest::debugLevel > 0 ) + { + imwrite( cv::format("%s-contrib2627.png", title.c_str()), dst ); + } +} + +INSTANTIATE_TEST_CASE_P(Freetype_putText, LigatureTest, + testing::ValuesIn(drawing_list)); + +}} // namespace From 940d9104b03872574e7254504aac1ae596c3d5d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Iago=20Su=C3=A1rez?= Date: Wed, 29 Jun 2022 23:56:33 +0200 Subject: [PATCH 25/45] Merge pull request #3277 from iago-suarez:4.x Add BAD descriptor to xfeatures2d module * Adding new BAD descriptor to xfeatures2d module * Changing BAD name by TEBLID and using int threshold again for BEBLID * Changing link to AKAZE tutorial and moved parameters initialization to ::create() * Adding f suffixes to floating-point parameters and using arrays again --- modules/xfeatures2d/README.md | 2 +- modules/xfeatures2d/doc/xfeatures2d.bib | 12 ++ .../include/opencv2/xfeatures2d.hpp | 43 ++++ modules/xfeatures2d/perf/perf_teblid.cpp | 36 ++++ modules/xfeatures2d/src/beblid.cpp | 119 ++++++++--- modules/xfeatures2d/src/beblid.p256.hpp | 4 +- modules/xfeatures2d/src/beblid.p512.hpp | 4 +- modules/xfeatures2d/src/teblid.p256.hpp | 104 ++++++++++ modules/xfeatures2d/src/teblid.p512.hpp | 189 ++++++++++++++++++ modules/xfeatures2d/test/test_features2d.cpp | 7 + .../test_rotation_and_scale_invariance.cpp | 4 + 11 files changed, 495 insertions(+), 29 deletions(-) create mode 100644 modules/xfeatures2d/perf/perf_teblid.cpp create mode 100644 modules/xfeatures2d/src/teblid.p256.hpp create mode 100644 modules/xfeatures2d/src/teblid.p512.hpp diff --git a/modules/xfeatures2d/README.md b/modules/xfeatures2d/README.md index f2cba30154e..29214fd6b14 100644 --- a/modules/xfeatures2d/README.md +++ b/modules/xfeatures2d/README.md @@ -5,4 +5,4 @@ Extra 2D Features Framework 2. Non-free 2D feature algorithms Extra 2D Features Framework containing experimental and non-free 2D feature detector/descriptor algorithms: - SURF, BRIEF, Censure, Freak, LUCID, Daisy, BEBLID, Self-similar. + SURF, BRIEF, Censure, Freak, LUCID, Daisy, BEBLID, TEBLID, Self-similar. diff --git a/modules/xfeatures2d/doc/xfeatures2d.bib b/modules/xfeatures2d/doc/xfeatures2d.bib index 2e950c46ae3..7d3f146cb94 100644 --- a/modules/xfeatures2d/doc/xfeatures2d.bib +++ b/modules/xfeatures2d/doc/xfeatures2d.bib @@ -154,6 +154,18 @@ @article{Suarez2020BEBLID author = {Iago Su\'arez and Ghesn Sfeir and Jos\'e M. Buenaposada and Luis Baumela}, } +@article{Suarez2021TEBLID, + title = {Revisiting Binary Local Image Description for Resource Limited Devices}, + journal = {IEEE Robotics and Automation Letters}, + volume = {6}, + pages = {8317--8324}, + year = {2021}, + number = {4}, + doi = {https://doi.org/10.1109/LRA.2021.3107024}, + url = {https://arxiv.org/pdf/2108.08380.pdf}, + author = {Iago Su\'arez and Jos\'e M. Buenaposada and Luis Baumela}, +} + @inproceedings{winder2007learning, title= {Learning Local Image Descriptors}, author= {Winder, Simon AJ and Brown, Matthew}, diff --git a/modules/xfeatures2d/include/opencv2/xfeatures2d.hpp b/modules/xfeatures2d/include/opencv2/xfeatures2d.hpp index fa7c449273b..59e62ad216d 100644 --- a/modules/xfeatures2d/include/opencv2/xfeatures2d.hpp +++ b/modules/xfeatures2d/include/opencv2/xfeatures2d.hpp @@ -224,6 +224,49 @@ class CV_EXPORTS_W BEBLID : public Feature2D CV_WRAP static Ptr create(float scale_factor, int n_bits = BEBLID::SIZE_512_BITS); }; +/** @brief Class implementing TEBLID (Triplet-based Efficient Binary Local Image Descriptor), + * described in @cite Suarez2021TEBLID. + +TEBLID stands for Triplet-based Efficient Binary Local Image Descriptor, although originally it was called BAD +\cite Suarez2021TEBLID. It is an improvement over BEBLID \cite Suarez2020BEBLID, that uses triplet loss, +hard negative mining, and anchor swap to improve the image matching results. +It is able to describe keypoints from any detector just by changing the scale_factor parameter. +TEBLID is as efficient as ORB, BEBLID or BRISK, but the triplet-based training objective selected more +discriminative features that explain the accuracy gain. It is also more compact than BEBLID, +when running the [AKAZE example](https://github.com/opencv/opencv/blob/4.x/samples/cpp/tutorial_code/features2D/AKAZE_match.cpp) +with 10000 keypoints detected by ORB, BEBLID obtains 561 inliers (75%) with 512 bits, whereas +TEBLID obtains 621 (75.2%) with 256 bits. ORB obtains only 493 inliers (63%). + +If you find this code useful, please add a reference to the following paper: +
Iago Suárez, José M. Buenaposada, and Luis Baumela. +Revisiting Binary Local Image Description for Resource Limited Devices. +IEEE Robotics and Automation Letters, vol. 6, no. 4, pp. 8317-8324, Oct. 2021.
+ +The descriptor was trained in Liberty split of the UBC datasets \cite winder2007learning . +*/ +class CV_EXPORTS_W TEBLID : public Feature2D +{ +public: + /** + * @brief Descriptor number of bits, each bit is a box average difference. + * The user can choose between 256 or 512 bits. + */ + enum TeblidSize + { + SIZE_256_BITS = 102, SIZE_512_BITS = 103, + }; + /** @brief Creates the TEBLID descriptor. + @param scale_factor Adjust the sampling window around detected keypoints: + - 1.00f should be the scale for ORB keypoints + - 6.75f should be the scale for SIFT detected keypoints + - 6.25f is default and fits for KAZE, SURF detected keypoints + - 5.00f should be the scale for AKAZE, MSD, AGAST, FAST, BRISK keypoints + @param n_bits Determine the number of bits in the descriptor. Should be either + TEBLID::SIZE_256_BITS or TEBLID::SIZE_512_BITS. + */ + CV_WRAP static Ptr create(float scale_factor, int n_bits = TEBLID::SIZE_256_BITS); +}; + /** @brief Class implementing DAISY descriptor, described in @cite Tola10 @param radius radius of the descriptor at the initial scale diff --git a/modules/xfeatures2d/perf/perf_teblid.cpp b/modules/xfeatures2d/perf/perf_teblid.cpp new file mode 100644 index 00000000000..01a84336711 --- /dev/null +++ b/modules/xfeatures2d/perf/perf_teblid.cpp @@ -0,0 +1,36 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +#include "perf_precomp.hpp" + +namespace opencv_test { namespace { + +typedef perf::TestBaseWithParam teblid; + +#define TEBLID_IMAGES \ + "cv/detectors_descriptors_evaluation/images_datasets/leuven/img1.png",\ + "stitching/a3.png" + +#ifdef OPENCV_ENABLE_NONFREE +PERF_TEST_P(teblid, extract, testing::Values(TEBLID_IMAGES)) +{ + string filename = getDataPath(GetParam()); + Mat frame = imread(filename, IMREAD_GRAYSCALE); + ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename; + + Mat mask; + declare.in(frame).time(90); + + Ptr detector = SURF::create(); + vector points; + detector->detect(frame, points, mask); + + Ptr descriptor = TEBLID::create(6.25f); + cv::Mat descriptors; + TEST_CYCLE() descriptor->compute(frame, points, descriptors); + + SANITY_CHECK_NOTHING(); +} +#endif // NONFREE + +}} // namespace diff --git a/modules/xfeatures2d/src/beblid.cpp b/modules/xfeatures2d/src/beblid.cpp index c0d38ed004e..8dec556a495 100644 --- a/modules/xfeatures2d/src/beblid.cpp +++ b/modules/xfeatures2d/src/beblid.cpp @@ -30,14 +30,21 @@ struct ABWLParams { int x1, y1, x2, y2, boxRadius, th; }; +// Same as previous with floating point threshold +struct ABWLParamsFloatTh +{ + int x1, y1, x2, y2, boxRadius; + float th; +}; // BEBLID implementation +template class BEBLID_Impl CV_FINAL: public BEBLID { public: // constructor - explicit BEBLID_Impl(float scale_factor, int n_bits = SIZE_512_BITS); + explicit BEBLID_Impl(float scale_factor, const std::vector& wl_params); // destructor ~BEBLID_Impl() CV_OVERRIDE = default; @@ -55,15 +62,65 @@ class BEBLID_Impl CV_FINAL: public BEBLID void compute(InputArray image, vector &keypoints, OutputArray descriptors) CV_OVERRIDE; private: - std::vector wl_params_; + std::vector wl_params_; float scale_factor_; cv::Size patch_size_; - void computeBEBLID(const cv::Mat &integralImg, - const std::vector &keypoints, - cv::Mat &descriptors); + void computeBoxDiffsDescriptor(const cv::Mat &integralImg, + const std::vector &keypoints, + cv::Mat &descriptors); }; // END BEBLID_Impl CLASS + +// TEBLID implementation +class TEBLID_Impl CV_FINAL: public TEBLID +{ +public: + + // constructor + explicit TEBLID_Impl(float scale_factor, const std::vector& wl_params) : + impl(scale_factor, wl_params){} + + // destructor + ~TEBLID_Impl() CV_OVERRIDE = default; + + // returns the descriptor length in bytes + int descriptorSize() const CV_OVERRIDE { return impl.descriptorSize(); } + + // returns the descriptor type + int descriptorType() const CV_OVERRIDE { return impl.descriptorType(); } + + // returns the default norm type + int defaultNorm() const CV_OVERRIDE { return impl.defaultNorm(); } + + // compute descriptors given keypoints + void compute(InputArray image, vector &keypoints, OutputArray descriptors) CV_OVERRIDE + { + impl.compute(image, keypoints, descriptors); + } + +private: + BEBLID_Impl impl; +}; // END TEBLID_Impl CLASS + +Ptr TEBLID::create(float scale_factor, int n_bits) +{ + if (n_bits == TEBLID::SIZE_512_BITS) + { + #include "teblid.p512.hpp" + return makePtr(scale_factor, teblid_wl_params_512); + } + else if(n_bits == TEBLID::SIZE_256_BITS) + { + #include "teblid.p256.hpp" + return makePtr(scale_factor, teblid_wl_params_256); + } + else + { + CV_Error(Error::StsBadArg, "n_bits should be either TEBLID::SIZE_512_BITS or TEBLID::SIZE_256_BITS"); + } +} + /** * @brief Function that determines if a keypoint is close to the image border. * @param kp The detected keypoint @@ -100,8 +157,9 @@ static inline bool isKeypointInTheBorder(const cv::KeyPoint &kp, * @param scaleFactor A scale factor that magnifies the measurement functions w.r.t. the keypoint. * @param patchSize The size of the normalized patch where the measurement functions were learnt. */ -static inline void rectifyABWL(const std::vector &wlPatchParams, - std::vector &wlImageParams, +template< typename WeakLearnerT> +static inline void rectifyABWL(const std::vector &wlPatchParams, + std::vector &wlImageParams, const cv::KeyPoint &kp, float scaleFactor = 1, const cv::Size &patchSize = cv::Size(32, 32)) @@ -151,7 +209,8 @@ static inline void rectifyABWL(const std::vector &wlPatchParams, * @param integralImage The integral image used to compute the average gray value in the square regions. * @return The difference of gray level in the two squares defined by wlImageParams */ -static inline float computeABWLResponse(const ABWLParams &wlImageParams, +template +static inline float computeABWLResponse(const WeakLearnerT &wlImageParams, const cv::Mat &integralImage) { CV_DbgAssert(!integralImage.empty()); @@ -239,7 +298,8 @@ static inline float computeABWLResponse(const ABWLParams &wlImageParams, } // descriptor computation using keypoints -void BEBLID_Impl::compute(InputArray _image, vector &keypoints, OutputArray _descriptors) +template +void BEBLID_Impl::compute(InputArray _image, vector &keypoints, OutputArray _descriptors) { Mat image = _image.getMat(); @@ -281,27 +341,21 @@ void BEBLID_Impl::compute(InputArray _image, vector &keypoints, Output CV_DbgAssert(descriptors.type() == CV_8UC1); // Compute the BEBLID descriptors - computeBEBLID(integralImg, keypoints, descriptors); + computeBoxDiffsDescriptor(integralImg, keypoints, descriptors); } // constructor -BEBLID_Impl::BEBLID_Impl(float scale_factor, int n_bits) - : scale_factor_(scale_factor), patch_size_(32, 32) +template +BEBLID_Impl::BEBLID_Impl(float scale_factor, const std::vector& wl_params) + : wl_params_(wl_params), scale_factor_(scale_factor),patch_size_(32, 32) { - #include "beblid.p512.hpp" - #include "beblid.p256.hpp" - if (n_bits == SIZE_512_BITS) - wl_params_.assign(wl_params_512, wl_params_512 + sizeof(wl_params_512) / sizeof(wl_params_512[0])); - else if(n_bits == SIZE_256_BITS) - wl_params_.assign(wl_params_256, wl_params_256 + sizeof(wl_params_256) / sizeof(wl_params_256[0])); - else - CV_Error(Error::StsBadArg, "n_wls should be either SIZE_512_BITS or SIZE_256_BITS"); } // Internal function that implements the core of BEBLID descriptor -void BEBLID_Impl::computeBEBLID(const cv::Mat &integralImg, - const std::vector &keypoints, - cv::Mat &descriptors) +template +void BEBLID_Impl::computeBoxDiffsDescriptor(const cv::Mat &integralImg, + const std::vector &keypoints, + cv::Mat &descriptors) { CV_DbgAssert(!integralImg.empty()); CV_DbgAssert(size_t(descriptors.rows) == keypoints.size()); @@ -316,13 +370,13 @@ void BEBLID_Impl::computeBEBLID(const cv::Mat &integralImg, #endif { // Get a pointer to the first element in the range - ABWLParams *wl; + WeakLearnerT *wl; float responseFun; int areaResponseFun, kpIdx; size_t wlIdx; int box1x1, box1y1, box1x2, box1y2, box2x1, box2y1, box2x2, box2y2, bit_idx, side; uchar byte = 0; - std::vector imgWLParams(wl_params_.size()); + std::vector imgWLParams(wl_params_.size()); uchar *d = &descriptors.at(range.start, 0); for (kpIdx = range.start; kpIdx < range.end; kpIdx++) @@ -397,7 +451,20 @@ void BEBLID_Impl::computeBEBLID(const cv::Mat &integralImg, Ptr BEBLID::create(float scale_factor, int n_bits) { - return makePtr(scale_factor, n_bits); + if (n_bits == BEBLID::SIZE_512_BITS) + { + #include "beblid.p512.hpp" + return makePtr>(scale_factor, beblid_wl_params_512); + } + else if(n_bits == BEBLID::SIZE_256_BITS) + { + #include "beblid.p256.hpp" + return makePtr>(scale_factor, beblid_wl_params_256); + } + else + { + CV_Error(Error::StsBadArg, "n_bits should be either BEBLID::SIZE_512_BITS or BEBLID::SIZE_256_BITS"); + } } } // END NAMESPACE XFEATURES2D } // END NAMESPACE CV diff --git a/modules/xfeatures2d/src/beblid.p256.hpp b/modules/xfeatures2d/src/beblid.p256.hpp index 15be1ba2f9e..be95345e934 100644 --- a/modules/xfeatures2d/src/beblid.p256.hpp +++ b/modules/xfeatures2d/src/beblid.p256.hpp @@ -12,7 +12,7 @@ // Pre-trained parameters of BEBLID-256 trained in Liberty data set with // a million of patch pairs, 20% positives and 80% negatives -static const ABWLParams wl_params_256[] = { +static const ABWLParams beblid_wl_params_256_[] = { {26, 20, 14, 16, 5, 16}, {17, 17, 15, 15, 2, 7}, {18, 16, 8, 13, 3, 18}, {19, 15, 13, 14, 3, 17}, {16, 16, 5, 15, 4, 10}, {25, 10, 16, 16, 6, 11}, {16, 15, 12, 15, 1, 12}, {18, 17, 14, 17, 1, 13}, {15, 14, 5, 21, 5, 6}, {14, 14, 11, 7, 4, 2}, @@ -79,3 +79,5 @@ static const ABWLParams wl_params_256[] = { {2, 14, 1, 9, 1, 1}, {6, 25, 6, 21, 1, 1}, {6, 2, 2, 1, 1, 1}, {30, 19, 29, 20, 1, 0}, {25, 21, 23, 20, 1, 0}, {16, 10, 16, 9, 1, 0} }; +static const std::vector beblid_wl_params_256(std::begin(beblid_wl_params_256_), + std::end(beblid_wl_params_256_)); diff --git a/modules/xfeatures2d/src/beblid.p512.hpp b/modules/xfeatures2d/src/beblid.p512.hpp index 496d0bd2024..57c97ecc025 100644 --- a/modules/xfeatures2d/src/beblid.p512.hpp +++ b/modules/xfeatures2d/src/beblid.p512.hpp @@ -12,7 +12,7 @@ // Pre-trained parameters of BEBLID-512 trained in Liberty data set with // a million of patch pairs, 20% positives and 80% negatives -static const ABWLParams wl_params_512[] = { +static const ABWLParams beblid_wl_params_512_[] = { {24, 18, 15, 17, 6, 13}, {19, 14, 13, 17, 2, 18}, {23, 19, 12, 15, 6, 19}, {24, 14, 16, 16, 6, 11}, {16, 15, 12, 16, 1, 12}, {16, 15, 7, 10, 4, 10}, {17, 12, 8, 17, 3, 16}, {24, 12, 11, 17, 7, 19}, {19, 17, 14, 11, 3, 13}, @@ -144,3 +144,5 @@ static const ABWLParams wl_params_512[] = { {26, 4, 26, 1, 1, 0}, {5, 21, 2, 20, 1, -1}, {14, 1, 13, 3, 1, 1}, {30, 9, 28, 8, 1, 0}, {13, 15, 12, 12, 1, 1}, {7, 23, 6, 25, 1, -1} }; +static const std::vector beblid_wl_params_512(std::begin(beblid_wl_params_512_), + std::end(beblid_wl_params_512_)); diff --git a/modules/xfeatures2d/src/teblid.p256.hpp b/modules/xfeatures2d/src/teblid.p256.hpp new file mode 100644 index 00000000000..a3c4eb69f5c --- /dev/null +++ b/modules/xfeatures2d/src/teblid.p256.hpp @@ -0,0 +1,104 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// Author: Iago Suarez + +// Implementation of the article: +// Iago Suarez, Jose M. Buenaposada, and Luis Baumela. +// Revisiting Binary Local Image Description for Resource Limited Devices. +// IEEE Robotics and Automation Letters, vol. 6, no. 4, pp. 8317-8324, Oct. 2021. + +// ABWLParams: x1, y1, x2, y2, boxRadius, th + +// Pre-trained parameters of TEBLID-256 trained in Liberty data set. 10K triplets are sampled per iteration. Each triplet +// contains an anchor patch, a positive and a negative, selected as the hardest among 256 random negatives. +static const ABWLParamsFloatTh teblid_wl_params_256_[] = { + {25, 14, 13, 15, 6, 21.65f}, {16, 15, 14, 11, 1, 5.65f}, {14, 14, 7, 8, 6, 4.95f}, + {10, 9, 6, 20, 6, 2.45f}, {13, 26, 13, 19, 5, 2.25f}, {19, 14, 19, 5, 4, 0.85f}, + {16, 19, 15, 13, 2, 3.35f}, {26, 26, 21, 12, 5, 1.75f}, {18, 23, 15, 20, 2, 4.55f}, + {12, 15, 10, 20, 1, -1.55f}, {26, 4, 18, 8, 3, 4.55f}, {8, 21, 2, 29, 2, -5.05f}, + {19, 16, 17, 19, 1, 3.15f}, {10, 3, 5, 13, 3, 4.85f}, {16, 10, 10, 14, 1, 9.95f}, + {19, 12, 18, 17, 1, 1.35f}, {21, 26, 21, 19, 5, -2.05f}, {6, 7, 5, 5, 5, -0.15f}, + {22, 12, 20, 14, 2, 1.55f}, {14, 12, 13, 17, 1, 3.35f}, {11, 16, 10, 13, 2, 0.25f}, + {7, 23, 7, 17, 3, 0.35f}, {27, 13, 25, 8, 4, 2.45f}, {20, 19, 16, 14, 1, 2.75f}, + {27, 10, 24, 16, 2, -1.65f}, {13, 12, 13, 6, 2, -0.05f}, {14, 18, 13, 23, 1, -0.75f}, + {14, 8, 11, 1, 1, 0.85f}, {14, 23, 12, 9, 2, 2.95f}, {6, 19, 2, 13, 2, -1.65f}, + {8, 19, 6, 19, 3, -0.05f}, {18, 28, 17, 25, 3, -0.25f}, {29, 28, 25, 22, 2, -3.85f}, + {15, 19, 15, 17, 3, -0.05f}, {23, 21, 19, 19, 1, 3.35f}, {20, 20, 20, 16, 3, 0.05f}, + {29, 4, 25, 8, 2, -3.55f}, {17, 6, 16, 25, 2, 2.65f}, {12, 21, 8, 29, 1, 1.95f}, + {14, 15, 9, 17, 2, 6.35f}, {18, 5, 17, 3, 3, 0.85f}, {21, 12, 18, 10, 1, 2.65f}, + {17, 14, 14, 14, 2, 12.45f}, {5, 26, 3, 6, 3, 0.05f}, {16, 13, 15, 14, 1, 3.35f}, + {28, 21, 24, 22, 3, 1.75f}, {13, 12, 13, 10, 1, -1.05f}, {22, 3, 21, 11, 3, -1.05f}, + {27, 27, 4, 16, 4, 28.25f}, {12, 13, 7, 10, 1, 0.35f}, {15, 25, 15, 22, 2, -0.15f}, + {19, 10, 18, 12, 1, 2.05f}, {17, 16, 17, 9, 2, 2.55f}, {21, 17, 21, 14, 2, 0.85f}, + {13, 19, 12, 16, 1, 1.35f}, {11, 11, 9, 15, 1, 1.15f}, {15, 26, 14, 28, 3, 1.25f}, + {17, 22, 17, 20, 1, 1.35f}, {10, 26, 2, 27, 2, 1.85f}, {28, 12, 26, 23, 3, 3.95f}, + {4, 5, 3, 14, 3, 0.75f}, {17, 7, 17, 4, 3, 1.65f}, {19, 15, 17, 15, 1, -3.15f}, + {7, 8, 2, 5, 2, -6.35f}, {22, 15, 19, 14, 2, 2.05f}, {15, 16, 12, 20, 1, -5.15f}, + {13, 19, 12, 20, 1, 1.75f}, {17, 10, 17, 8, 2, -0.65f}, {26, 16, 19, 15, 4, -0.65f}, + {9, 14, 8, 20, 2, 1.05f}, {27, 14, 27, 4, 4, -0.85f}, {17, 14, 15, 9, 1, 0.85f}, + {5, 4, 5, 3, 3, -0.35f}, {15, 30, 9, 5, 1, 9.05f}, {7, 25, 7, 23, 6, 0.75f}, + {12, 24, 11, 16, 1, -1.75f}, {20, 29, 20, 20, 2, 0.75f}, {19, 18, 15, 19, 1, 16.05f}, + {9, 11, 7, 11, 7, 0.35f}, {27, 26, 26, 15, 4, 0.75f}, {10, 28, 10, 27, 3, 0.05f}, + {8, 12, 8, 6, 3, 0.05f}, {21, 23, 16, 22, 1, 3.75f}, {22, 7, 4, 25, 4, 14.15f}, + {17, 19, 16, 15, 1, -8.95f}, {28, 21, 11, 15, 3, 67.25f}, {15, 3, 15, 2, 2, -0.45f}, + {16, 16, 14, 17, 3, 1.65f}, {10, 17, 7, 18, 3, -1.95f}, {12, 18, 12, 15, 1, 1.15f}, + {18, 16, 16, 13, 1, 1.85f}, {20, 16, 19, 15, 1, 3.95f}, {16, 15, 11, 11, 1, -1.75f}, + {4, 14, 2, 13, 2, 0.45f}, {29, 18, 27, 17, 2, -1.55f}, {16, 18, 14, 16, 1, 1.05f}, + {23, 29, 22, 27, 2, -0.25f}, {18, 13, 18, 11, 1, -1.05f}, {26, 23, 21, 27, 4, 3.05f}, + {18, 22, 17, 18, 1, -1.05f}, {3, 11, 2, 21, 2, 1.95f}, {13, 18, 13, 9, 3, -0.05f}, + {15, 14, 14, 5, 2, 0.85f}, {1, 14, 1, 1, 1, 3.05f}, {29, 2, 5, 9, 2, 34.85f}, + {12, 17, 11, 17, 1, -0.15f}, {13, 10, 12, 25, 4, 4.35f}, {5, 13, 1, 25, 1, -10.65f}, + {13, 16, 13, 12, 1, 2.35f}, {16, 23, 16, 12, 1, -1.35f}, {27, 14, 22, 14, 2, 0.05f}, + {29, 29, 27, 27, 2, 1.05f}, {23, 6, 22, 4, 4, 1.05f}, {22, 16, 22, 8, 3, -0.15f}, + {14, 1, 11, 9, 1, 0.45f}, {12, 11, 10, 8, 2, -0.55f}, {24, 19, 7, 16, 7, 10.45f}, + {5, 29, 2, 20, 2, 1.35f}, {19, 15, 19, 13, 1, -0.95f}, {15, 18, 8, 24, 2, 0.45f}, + {4, 24, 1, 30, 1, -0.85f}, {17, 30, 17, 26, 1, 1.45f}, {9, 8, 7, 5, 2, -1.85f}, + {15, 20, 15, 18, 1, 1.65f}, {27, 5, 14, 26, 4, 2.75f}, {18, 19, 18, 15, 1, 1.05f}, + {24, 14, 9, 12, 1, 81.45f}, {20, 6, 18, 10, 1, 3.35f}, {21, 23, 21, 21, 1, 0.85f}, + {19, 17, 6, 6, 6, 2.65f}, {10, 13, 6, 12, 3, 9.35f}, {30, 10, 27, 14, 1, 1.15f}, + {9, 5, 6, 3, 3, 1.35f}, {26, 21, 18, 19, 2, -1.55f}, {23, 5, 23, 4, 4, 0.85f}, + {14, 11, 11, 12, 1, 20.65f}, {18, 13, 16, 13, 1, 2.05f}, {7, 8, 3, 16, 3, 12.85f}, + {16, 15, 16, 12, 2, 7.95f}, {25, 20, 24, 25, 3, 2.25f}, {20, 14, 19, 14, 1, 0.05f}, + {12, 29, 12, 5, 1, 0.85f}, {23, 17, 13, 13, 5, 8.75f}, {27, 27, 23, 22, 4, -8.25f}, + {11, 4, 11, 3, 3, -0.35f}, {9, 18, 7, 15, 1, 1.65f}, {18, 17, 18, 14, 1, -3.95f}, + {28, 2, 6, 17, 2, 92.55f}, {5, 20, 3, 22, 3, 0.55f}, {30, 30, 30, 2, 1, 0.35f}, + {16, 8, 15, 13, 1, -0.75f}, {15, 16, 14, 13, 1, -12.25f}, {28, 5, 27, 5, 3, 0.55f}, + {13, 13, 12, 12, 1, 1.05f}, {7, 8, 6, 7, 6, 0.95f}, {10, 21, 10, 17, 1, 1.15f}, + {11, 17, 3, 30, 1, -43.25f}, {16, 17, 9, 14, 7, 3.05f}, {17, 16, 9, 14, 1, 4.35f}, + {14, 29, 13, 27, 2, 7.15f}, {19, 5, 19, 3, 2, 0.15f}, {18, 16, 14, 14, 1, 57.95f}, + {10, 23, 8, 25, 2, 4.35f}, {17, 17, 15, 18, 1, 0.75f}, {16, 22, 16, 16, 6, 0.05f}, + {29, 11, 27, 11, 2, 0.05f}, {13, 9, 7, 11, 1, 5.45f}, {18, 23, 17, 19, 4, 0.55f}, + {12, 14, 11, 17, 1, 0.95f}, {13, 23, 11, 18, 2, 20.55f}, {27, 8, 23, 20, 4, -4.45f}, + {18, 18, 18, 11, 4, 0.75f}, {8, 21, 5, 8, 5, 4.55f}, {23, 5, 21, 10, 1, -0.15f}, + {16, 16, 16, 12, 1, 8.65f}, {18, 17, 14, 19, 1, 42.65f}, {16, 27, 16, 24, 2, -0.45f}, + {21, 17, 15, 15, 1, -1.25f}, {16, 5, 15, 9, 2, -1.75f}, {24, 16, 1, 30, 1, 11.25f}, + {15, 14, 14, 19, 1, -8.15f}, {19, 12, 12, 14, 2, 2.85f}, {5, 5, 3, 4, 3, -2.85f}, + {16, 11, 16, 9, 1, -5.05f}, {16, 9, 6, 18, 6, 44.65f}, {25, 24, 23, 14, 1, 1.45f}, + {5, 26, 5, 17, 5, -0.75f}, {9, 16, 6, 18, 1, 11.85f}, {29, 25, 9, 24, 2, 2.05f}, + {25, 22, 24, 30, 1, 1.25f}, {22, 2, 20, 5, 2, 4.45f}, {27, 1, 25, 11, 1, -1.35f}, + {15, 12, 14, 10, 1, 5.95f}, {17, 6, 16, 8, 1, 1.35f}, {28, 8, 23, 7, 3, -2.55f}, + {24, 24, 23, 22, 7, 5.05f}, {7, 18, 5, 20, 3, -2.85f}, {22, 15, 20, 20, 1, 7.35f}, + {30, 21, 28, 20, 1, -1.35f}, {3, 18, 2, 18, 2, -0.45f}, {6, 14, 5, 15, 1, 0.45f}, + {15, 18, 15, 16, 1, -11.85f}, {7, 11, 5, 2, 1, -39.65f}, {17, 17, 13, 15, 3, 1.65f}, + {12, 15, 7, 15, 5, -0.05f}, {16, 12, 15, 18, 1, 3.65f}, {14, 26, 14, 25, 5, -0.35f}, + {11, 17, 8, 18, 1, 0.05f}, {23, 13, 15, 21, 7, 1.85f}, {10, 9, 10, 2, 2, -0.45f}, + {17, 13, 12, 19, 1, -1.75f}, {20, 25, 19, 22, 1, 3.95f}, {9, 26, 8, 21, 1, 5.25f}, + {19, 22, 19, 18, 1, -1.05f}, {8, 15, 3, 12, 1, -11.95f}, {26, 13, 16, 19, 5, 37.05f}, + {24, 12, 21, 13, 1, -1.15f}, {12, 14, 12, 9, 1, 1.25f}, {3, 7, 1, 1, 1, 0.75f}, + {16, 9, 15, 3, 3, -6.05f}, {23, 20, 23, 8, 7, -1.55f}, {24, 16, 22, 15, 1, -1.65f}, + {20, 19, 20, 14, 1, 0.85f}, {30, 27, 29, 22, 1, 0.35f}, {27, 17, 4, 16, 4, 101.55f}, + {8, 13, 5, 13, 5, -5.05f}, {19, 8, 10, 16, 3, 3.65f}, {30, 11, 30, 4, 1, -2.35f}, + {14, 21, 14, 20, 1, -0.35f}, {14, 11, 13, 13, 1, -1.65f}, {30, 2, 28, 5, 1, 0.65f}, + {17, 29, 12, 24, 2, 6.35f}, {15, 25, 6, 30, 1, 2.85f}, {4, 1, 1, 1, 1, 5.25f}, + {12, 16, 5, 20, 5, 24.05f}, {16, 20, 14, 15, 1, 38.15f}, {6, 17, 6, 9, 3, -1.05f}, + {20, 17, 12, 20, 4, 3.05f}, {15, 15, 12, 4, 4, 0.35f}, {28, 20, 22, 21, 3, -16.05f}, + {14, 18, 9, 18, 5, -1.25f}, {26, 1, 23, 5, 1, 0.25f}, {21, 24, 11, 10, 7, 1.95f}, + {15, 19, 14, 12, 1, -0.85f}, {27, 29, 11, 16, 1, 107.65f}, {23, 19, 22, 29, 1, -1.55f}, + {2, 30, 2, 29, 1, -0.25f}, {14, 16, 6, 5, 3, 26.95f}, {17, 13, 14, 16, 1, 35.95f}, + {19, 14, 15, 16, 1, -4.85f}, {20, 25, 13, 15, 6, 1.55f}, {19, 18, 11, 12, 5, 10.85f}, + {30, 30, 30, 13, 1, -7.15f}, {3, 14, 1, 9, 1, -4.25f}, {20, 17, 1, 18, 1, -25.15f}, + {16, 20, 12, 19, 1, 2.75f} +}; +static const std::vector teblid_wl_params_256(std::begin(teblid_wl_params_256_), + std::end(teblid_wl_params_256_)); diff --git a/modules/xfeatures2d/src/teblid.p512.hpp b/modules/xfeatures2d/src/teblid.p512.hpp new file mode 100644 index 00000000000..b1b1dabba70 --- /dev/null +++ b/modules/xfeatures2d/src/teblid.p512.hpp @@ -0,0 +1,189 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// Author: Iago Suarez + +// Implementation of the article: +// Iago Suarez, Jose M. Buenaposada, and Luis Baumela. +// Revisiting Binary Local Image Description for Resource Limited Devices. +// IEEE Robotics and Automation Letters, vol. 6, no. 4, pp. 8317-8324, Oct. 2021. + +// ABWLParams: x1, y1, x2, y2, boxRadius, th + +// Pre-trained parameters of TEBLID-512 trained in Liberty data set. 10K triplets are sampled per iteration. Each triplet +// contains an anchor patch, a positive and a negative, selected as the hardest among 256 random negatives. +static const ABWLParamsFloatTh teblid_wl_params_512_[] = { + {17, 18, 12, 15, 2, 14.45f}, {13, 14, 5, 7, 5, 4.15f}, {21, 16, 16, 14, 1, 7.75f}, + {27, 11, 18, 20, 3, 9.65f}, {17, 13, 16, 19, 2, 2.25f}, {18, 24, 18, 16, 5, 0.15f}, + {12, 11, 10, 25, 6, 0.45f}, {14, 17, 14, 13, 1, -0.95f}, {7, 4, 4, 15, 4, 3.65f}, + {27, 27, 23, 8, 4, -1.75f}, {19, 13, 19, 6, 6, 1.05f}, {14, 15, 10, 16, 1, 5.45f}, + {13, 15, 12, 22, 1, -0.05f}, {8, 22, 3, 27, 3, -2.65f}, {13, 19, 8, 13, 1, 3.35f}, + {18, 16, 17, 12, 1, 1.65f}, {27, 7, 25, 11, 4, -1.55f}, {24, 20, 20, 15, 2, 2.85f}, + {16, 24, 14, 3, 3, 3.05f}, {23, 18, 7, 18, 7, 22.05f}, {8, 7, 2, 1, 1, -3.65f}, + {17, 28, 17, 26, 3, -0.15f}, {17, 13, 17, 10, 2, -0.55f}, {10, 18, 10, 11, 1, -0.05f}, + {11, 28, 7, 22, 2, 3.25f}, {18, 13, 15, 15, 1, -2.85f}, {7, 14, 3, 20, 3, -1.25f}, + {17, 19, 14, 15, 1, 10.45f}, {14, 12, 14, 8, 2, -1.05f}, {14, 12, 13, 11, 1, 1.25f}, + {21, 9, 19, 19, 2, 3.15f}, {4, 28, 3, 10, 3, 2.05f}, {27, 27, 26, 26, 4, -0.55f}, + {19, 22, 19, 19, 2, -1.25f}, {12, 25, 12, 20, 1, 3.45f}, {19, 12, 15, 12, 1, 4.35f}, + {28, 21, 23, 21, 2, 2.45f}, {10, 15, 7, 18, 2, 2.55f}, {12, 7, 10, 3, 3, 1.35f}, + {21, 16, 19, 15, 1, 1.25f}, {19, 20, 18, 17, 1, 2.75f}, {26, 2, 19, 7, 2, -0.15f}, + {18, 2, 15, 22, 2, 8.35f}, {24, 26, 24, 22, 5, 0.35f}, {15, 26, 15, 19, 1, -1.25f}, + {13, 19, 11, 20, 1, 0.75f}, {5, 14, 4, 10, 4, -0.45f}, {15, 7, 15, 4, 2, -0.05f}, + {13, 16, 11, 7, 1, 0.85f}, {15, 22, 15, 18, 1, 3.65f}, {24, 8, 23, 4, 4, 1.55f}, + {13, 11, 11, 14, 1, 1.75f}, {4, 19, 3, 19, 3, -0.35f}, {22, 12, 19, 10, 1, 1.35f}, + {24, 27, 15, 22, 2, 4.85f}, {12, 13, 10, 10, 1, -2.25f}, {11, 25, 9, 29, 2, 0.25f}, + {15, 21, 15, 10, 1, -2.15f}, {19, 16, 18, 19, 1, 3.35f}, {29, 13, 24, 8, 2, 1.95f}, + {17, 16, 16, 20, 1, 6.25f}, {12, 17, 12, 15, 1, 0.35f}, {28, 4, 2, 11, 2, 24.45f}, + {7, 25, 5, 19, 3, -1.15f}, {22, 13, 20, 16, 1, 0.85f}, {14, 16, 13, 17, 1, -1.95f}, + {10, 3, 8, 11, 3, 5.15f}, {18, 7, 17, 11, 2, 1.35f}, {27, 11, 25, 22, 2, 0.85f}, + {5, 26, 3, 28, 3, 0.35f}, {28, 13, 27, 13, 3, -0.45f}, {22, 20, 20, 28, 3, 4.95f}, + {12, 6, 5, 2, 2, -0.25f}, {14, 18, 13, 16, 1, 2.45f}, {17, 29, 3, 25, 2, 11.75f}, + {20, 20, 19, 19, 1, 0.85f}, {15, 12, 14, 15, 1, -1.65f}, {12, 14, 12, 13, 1, 0.05f}, + {17, 14, 10, 26, 3, 4.05f}, {11, 15, 6, 12, 6, -0.35f}, {9, 22, 9, 19, 1, -0.95f}, + {19, 18, 19, 14, 1, -0.25f}, {23, 15, 12, 18, 2, 49.35f}, {12, 15, 11, 14, 1, 0.85f}, + {28, 2, 27, 9, 2, 1.95f}, {11, 19, 11, 11, 7, 0.25f}, {13, 29, 13, 23, 2, 1.15f}, + {27, 19, 22, 17, 3, -2.65f}, {17, 3, 17, 2, 2, -0.25f}, {4, 6, 3, 3, 3, 0.85f}, + {19, 15, 16, 16, 1, -5.65f}, {22, 5, 20, 9, 2, 1.15f}, {14, 6, 13, 9, 2, 3.05f}, + {17, 16, 13, 16, 2, 4.05f}, {24, 18, 12, 6, 6, 7.35f}, {20, 14, 18, 15, 2, 9.05f}, + {20, 9, 18, 13, 1, 0.35f}, {18, 20, 17, 8, 2, 1.65f}, {10, 15, 9, 15, 2, 1.65f}, + {13, 7, 12, 26, 2, 2.55f}, {13, 12, 11, 19, 2, 6.95f}, {15, 2, 2, 29, 2, 1.75f}, + {15, 12, 14, 13, 1, 0.85f}, {20, 30, 19, 26, 1, -1.15f}, {28, 26, 28, 4, 3, 1.35f}, + {16, 13, 15, 12, 1, 5.45f}, {18, 11, 17, 25, 2, 1.35f}, {3, 17, 1, 24, 1, -2.35f}, + {21, 18, 19, 22, 1, -0.15f}, {9, 13, 9, 8, 2, 0.85f}, {19, 18, 16, 16, 1, -3.05f}, + {21, 22, 17, 20, 1, 0.05f}, {13, 4, 13, 3, 3, -0.35f}, {24, 15, 21, 9, 1, -0.65f}, + {24, 25, 19, 17, 6, 11.65f}, {4, 14, 3, 14, 2, -0.85f}, {17, 13, 14, 19, 1, 1.15f}, + {7, 19, 4, 16, 3, 2.35f}, {4, 20, 1, 5, 1, -9.25f}, {15, 13, 12, 14, 3, 16.05f}, + {19, 26, 19, 21, 2, -1.45f}, {11, 26, 10, 18, 5, 1.95f}, {17, 16, 17, 13, 1, 0.35f}, + {19, 16, 19, 11, 1, -0.35f}, {4, 26, 4, 23, 4, 0.15f}, {14, 19, 14, 13, 5, 0.25f}, + {10, 13, 8, 13, 2, -1.35f}, {14, 12, 14, 10, 1, 0.65f}, {29, 24, 26, 19, 2, -4.05f}, + {26, 9, 19, 19, 5, -2.25f}, {16, 23, 16, 17, 1, 1.05f}, {4, 13, 3, 4, 3, -0.05f}, + {13, 16, 7, 21, 2, -1.55f}, {17, 16, 16, 17, 1, 0.25f}, {29, 15, 5, 18, 2, 69.45f}, + {29, 2, 23, 5, 2, 0.15f}, {9, 17, 9, 14, 2, -1.25f}, {25, 26, 25, 22, 5, -1.85f}, + {13, 21, 13, 20, 1, -0.65f}, {23, 12, 7, 20, 6, 8.75f}, {6, 8, 6, 3, 3, -0.95f}, + {13, 19, 13, 17, 1, 1.95f}, {25, 21, 22, 20, 1, 1.05f}, {24, 17, 23, 15, 2, -1.45f}, + {20, 8, 17, 4, 1, 2.15f}, {11, 19, 10, 17, 1, -1.85f}, {9, 11, 6, 9, 1, -1.75f}, + {25, 9, 24, 14, 1, -2.95f}, {18, 20, 13, 14, 3, 2.65f}, {26, 23, 25, 23, 5, 0.65f}, + {14, 20, 11, 4, 4, -1.05f}, {28, 7, 25, 13, 3, 4.35f}, {13, 13, 12, 12, 1, 0.25f}, + {7, 29, 2, 2, 2, 19.65f}, {16, 17, 16, 8, 5, 0.35f}, {20, 6, 19, 12, 3, 1.65f}, + {19, 7, 19, 6, 6, 0.65f}, {20, 13, 19, 14, 1, 2.75f}, {19, 24, 16, 29, 2, 2.85f}, + {8, 15, 4, 13, 1, -10.95f}, {7, 9, 2, 10, 2, 3.65f}, {15, 14, 14, 13, 1, -4.15f}, + {18, 13, 18, 11, 1, 0.25f}, {8, 19, 5, 23, 2, -0.65f}, {3, 13, 1, 14, 1, -2.25f}, + {23, 20, 16, 14, 1, 3.75f}, {17, 15, 13, 18, 2, 35.75f}, {16, 16, 9, 14, 5, 3.15f}, + {15, 28, 15, 27, 3, -0.55f}, {18, 20, 16, 19, 1, 1.95f}, {16, 17, 16, 11, 2, -6.55f}, + {30, 1, 10, 19, 1, 88.35f}, {12, 19, 9, 23, 2, 7.25f}, {25, 13, 21, 13, 1, 1.75f}, + {9, 23, 5, 24, 5, -4.15f}, {13, 20, 13, 18, 1, 0.25f}, {13, 13, 12, 13, 3, 0.25f}, + {29, 18, 25, 2, 2, 0.65f}, {30, 30, 25, 26, 1, 3.75f}, {16, 20, 15, 11, 1, 1.65f}, + {18, 16, 18, 14, 1, 2.85f}, {15, 18, 5, 7, 4, 42.15f}, {16, 13, 15, 19, 1, 11.75f}, + {26, 24, 16, 9, 5, -1.25f}, {1, 28, 1, 5, 1, -8.25f}, {20, 17, 20, 16, 1, 0.05f}, + {15, 19, 10, 17, 4, 2.15f}, {12, 9, 10, 5, 1, 0.65f}, {30, 29, 28, 29, 1, -1.55f}, + {29, 17, 27, 18, 2, -2.75f}, {17, 29, 15, 27, 2, 1.15f}, {9, 29, 9, 28, 2, -0.15f}, + {23, 24, 21, 22, 1, -0.75f}, {22, 2, 1, 1, 1, 16.85f}, {20, 4, 20, 1, 1, 1.15f}, + {5, 30, 4, 25, 1, 1.45f}, {20, 8, 17, 12, 7, 15.35f}, {10, 7, 3, 17, 3, 19.45f}, + {21, 17, 14, 15, 5, 8.65f}, {14, 10, 13, 8, 1, -1.25f}, {4, 21, 4, 13, 3, 0.25f}, + {30, 1, 24, 10, 1, 2.15f}, {15, 17, 14, 16, 3, 0.15f}, {21, 23, 20, 15, 3, 2.85f}, + {17, 20, 17, 18, 3, -2.25f}, {12, 11, 12, 6, 5, 1.75f}, {15, 15, 12, 17, 1, -15.15f}, + {25, 9, 16, 25, 6, 4.15f}, {22, 28, 22, 27, 3, -0.45f}, {5, 8, 3, 3, 3, -8.25f}, + {9, 5, 9, 1, 1, 1.25f}, {30, 12, 29, 23, 1, -0.85f}, {20, 21, 5, 9, 5, 3.55f}, + {15, 21, 15, 20, 1, 0.45f}, {11, 17, 10, 23, 2, -1.25f}, {16, 11, 15, 13, 1, 9.65f}, + {16, 12, 16, 10, 1, 1.25f}, {15, 6, 14, 3, 3, 3.15f}, {2, 4, 1, 1, 1, -2.05f}, + {15, 16, 11, 15, 1, -6.85f}, {24, 6, 24, 2, 2, -0.05f}, {8, 15, 6, 12, 1, 3.25f}, + {21, 27, 1, 30, 1, 7.05f}, {17, 10, 14, 16, 3, 2.35f}, {13, 9, 7, 7, 7, -1.45f}, + {22, 17, 19, 17, 1, -1.05f}, {16, 14, 14, 13, 2, 20.25f}, {14, 21, 13, 23, 1, 1.15f}, + {18, 2, 15, 7, 2, 1.35f}, {3, 25, 1, 24, 1, 1.05f}, {24, 20, 7, 14, 7, 5.15f}, + {26, 25, 24, 19, 2, 2.55f}, {6, 25, 6, 23, 6, 0.55f}, {15, 24, 15, 17, 7, 0.55f}, + {22, 14, 16, 15, 1, -0.15f}, {17, 25, 17, 23, 1, 0.65f}, {12, 18, 2, 26, 2, -28.35f}, + {30, 30, 26, 11, 1, 3.85f}, {22, 8, 16, 14, 5, 2.05f}, {9, 16, 8, 20, 1, -1.15f}, + {4, 14, 2, 13, 2, 3.05f}, {28, 7, 27, 8, 1, -0.95f}, {10, 22, 9, 24, 1, -1.55f}, + {14, 16, 13, 18, 3, -0.35f}, {28, 26, 3, 15, 2, 101.95f}, {12, 15, 10, 15, 1, 0.35f}, + {18, 17, 17, 15, 1, -2.05f}, {30, 10, 28, 14, 1, -2.15f}, {30, 14, 28, 30, 1, 4.25f}, + {30, 18, 7, 13, 1, 105.15f}, {3, 19, 2, 20, 1, 1.15f}, {16, 19, 14, 13, 2, 27.45f}, + {11, 9, 5, 27, 4, 30.15f}, {16, 19, 15, 15, 2, 18.45f}, {24, 22, 18, 19, 7, -2.45f}, + {12, 17, 12, 12, 1, 1.85f}, {28, 5, 28, 1, 1, 2.95f}, {4, 29, 2, 30, 1, -3.75f}, + {27, 11, 27, 8, 1, -0.65f}, {8, 3, 8, 1, 1, 0.05f}, {15, 10, 15, 8, 3, 0.25f}, + {12, 27, 11, 18, 4, 34.45f}, {25, 6, 22, 8, 6, -2.05f}, {15, 3, 15, 2, 2, 0.25f}, + {19, 22, 17, 19, 1, 3.25f}, {24, 21, 24, 16, 2, 2.05f}, {9, 7, 6, 6, 6, 2.85f}, + {13, 26, 11, 27, 2, 3.45f}, {24, 10, 19, 12, 4, 20.95f}, {22, 17, 22, 9, 2, 1.45f}, + {17, 14, 14, 11, 1, -1.95f}, {13, 4, 13, 3, 1, -0.35f}, {15, 18, 15, 17, 1, -6.85f}, + {29, 30, 29, 24, 1, 1.15f}, {29, 29, 20, 17, 2, -31.95f}, {6, 12, 2, 27, 2, 11.85f}, + {18, 17, 14, 13, 2, 2.75f}, {11, 27, 11, 26, 4, 0.15f}, {22, 12, 3, 18, 3, 8.35f}, + {15, 13, 13, 9, 1, 1.15f}, {12, 20, 7, 18, 1, 2.15f}, {16, 6, 15, 9, 1, -1.15f}, + {3, 6, 1, 7, 1, -1.05f}, {12, 17, 11, 19, 1, 2.45f}, {15, 8, 8, 18, 7, 1.45f}, + {11, 19, 11, 5, 3, 0.15f}, {17, 20, 16, 23, 3, -1.65f}, {12, 6, 9, 13, 1, 41.85f}, + {2, 1, 1, 2, 1, 0.35f}, {14, 26, 13, 21, 3, 0.65f}, {25, 16, 16, 14, 3, -0.35f}, + {30, 14, 29, 14, 1, 0.25f}, {27, 25, 15, 22, 4, 56.75f}, {13, 10, 8, 7, 2, 23.55f}, + {18, 19, 13, 14, 1, 88.85f}, {28, 28, 28, 22, 3, -7.25f}, {8, 14, 8, 11, 1, 1.05f}, + {23, 28, 22, 24, 2, 0.75f}, {8, 2, 3, 18, 2, -1.05f}, {22, 24, 22, 23, 7, -1.65f}, + {20, 17, 15, 16, 1, -9.75f}, {8, 11, 6, 4, 4, -16.95f}, {25, 13, 23, 13, 2, -0.05f}, + {18, 18, 16, 15, 1, -10.05f}, {20, 16, 16, 15, 1, 34.15f}, {18, 20, 14, 26, 3, 14.75f}, + {17, 12, 17, 8, 1, -0.95f}, {1, 5, 1, 3, 1, 1.45f}, {22, 13, 13, 20, 2, 4.85f}, + {17, 16, 17, 14, 3, -0.05f}, {27, 17, 25, 17, 2, -0.35f}, {8, 23, 6, 29, 2, 0.75f}, + {15, 4, 14, 18, 1, 64.75f}, {10, 24, 10, 17, 4, 24.25f}, {25, 30, 25, 28, 1, -0.35f}, + {3, 22, 1, 29, 1, -13.65f}, {24, 8, 23, 17, 1, 2.85f}, {26, 3, 26, 1, 1, 1.75f}, + {18, 22, 18, 17, 2, -0.35f}, {9, 17, 8, 10, 2, 0.25f}, {29, 22, 29, 2, 2, -12.15f}, + {19, 4, 5, 10, 3, 108.05f}, {3, 28, 3, 27, 1, -0.35f}, {12, 15, 11, 18, 1, -3.35f}, + {30, 3, 28, 4, 1, 1.95f}, {7, 9, 7, 8, 1, -0.35f}, {24, 15, 8, 14, 7, 21.25f}, + {30, 6, 20, 16, 1, -20.05f}, {18, 18, 1, 10, 1, 95.85f}, {30, 20, 28, 21, 1, -1.05f}, + {15, 15, 13, 14, 1, -17.75f}, {6, 3, 5, 1, 1, -0.55f}, {3, 8, 1, 17, 1, 2.75f}, + {3, 2, 2, 2, 2, 0.65f}, {19, 28, 18, 20, 1, 0.75f}, {20, 20, 20, 17, 2, -1.85f}, + {21, 30, 19, 29, 1, 2.65f}, {12, 19, 12, 13, 1, -2.15f}, {29, 10, 29, 4, 2, 1.05f}, + {20, 16, 20, 14, 1, -0.05f}, {15, 9, 11, 16, 2, 4.25f}, {8, 13, 6, 26, 4, 3.75f}, + {13, 11, 12, 8, 2, -13.55f}, {17, 27, 17, 26, 4, -0.05f}, {29, 29, 14, 12, 1, 105.95f}, + {29, 2, 28, 3, 2, 0.45f}, {9, 15, 7, 9, 4, 3.05f}, {27, 28, 12, 30, 1, 9.35f}, + {14, 30, 2, 28, 1, 79.25f}, {19, 12, 18, 14, 1, 1.75f}, {26, 5, 24, 15, 5, 7.65f}, + {2, 24, 2, 2, 2, -0.45f}, {6, 21, 5, 21, 1, 0.95f}, {22, 16, 9, 17, 2, 79.15f}, + {16, 19, 15, 17, 1, 28.05f}, {2, 29, 2, 28, 2, 0.05f}, {25, 11, 24, 1, 1, 0.65f}, + {16, 30, 16, 29, 1, -0.35f}, {14, 20, 14, 17, 3, 0.95f}, {15, 14, 11, 17, 3, 2.05f}, + {18, 17, 16, 21, 1, 1.15f}, {17, 8, 17, 4, 2, 2.75f}, {11, 4, 11, 3, 3, -1.65f}, + {25, 16, 9, 17, 6, 8.65f}, {18, 8, 18, 6, 6, 3.55f}, {17, 22, 17, 19, 1, -0.25f}, + {8, 20, 3, 11, 3, -28.95f}, {20, 17, 4, 17, 1, -7.35f}, {29, 12, 12, 19, 2, 122.25f}, + {14, 29, 14, 28, 2, 0.55f}, {12, 18, 10, 18, 1, 4.75f}, {13, 15, 13, 11, 2, 1.75f}, + {18, 15, 14, 15, 2, 11.15f}, {19, 17, 17, 19, 1, 0.15f}, {22, 17, 12, 16, 6, 2.55f}, + {30, 22, 29, 18, 1, 0.05f}, {30, 2, 29, 20, 1, -7.95f}, {12, 3, 1, 1, 1, 51.05f}, + {4, 7, 1, 7, 1, 6.25f}, {27, 10, 21, 13, 4, 0.05f}, {18, 21, 18, 13, 3, -1.15f}, + {12, 4, 3, 6, 2, 0.75f}, {12, 10, 9, 3, 2, 26.65f}, {3, 28, 2, 29, 2, -1.25f}, + {22, 2, 20, 5, 2, 1.15f}, {27, 18, 20, 3, 3, -0.55f}, {6, 24, 6, 23, 1, 0.05f}, + {27, 26, 9, 16, 4, 5.85f}, {5, 18, 5, 11, 5, 1.55f}, {20, 14, 15, 12, 3, 2.25f}, + {19, 16, 19, 15, 1, -1.65f}, {27, 4, 21, 9, 4, -16.85f}, {3, 19, 2, 29, 1, -62.65f}, + {20, 24, 18, 22, 1, -1.25f}, {18, 7, 18, 2, 1, -1.05f}, {28, 30, 28, 28, 1, -1.55f}, + {11, 24, 10, 9, 1, 1.35f}, {21, 18, 21, 14, 3, 0.25f}, {27, 19, 26, 18, 2, -0.65f}, + {16, 18, 10, 6, 6, 0.85f}, {11, 18, 5, 19, 1, 17.45f}, {24, 16, 22, 16, 1, 0.25f}, + {17, 15, 17, 9, 5, -9.35f}, {27, 29, 20, 11, 2, 34.35f}, {29, 25, 28, 22, 1, 0.95f}, + {21, 11, 21, 5, 1, -0.95f}, {12, 15, 8, 16, 2, -10.05f}, {2, 29, 1, 30, 1, 1.45f}, + {18, 12, 4, 21, 3, -6.05f}, {18, 9, 11, 13, 3, 93.25f}, {18, 3, 10, 21, 3, 3.15f}, + {17, 11, 16, 16, 1, -10.85f}, {15, 17, 13, 14, 1, -1.65f}, {7, 7, 7, 5, 5, -0.15f}, + {9, 29, 5, 18, 2, -2.45f}, {10, 11, 10, 6, 6, -0.35f}, {28, 26, 25, 26, 1, 0.15f}, + {19, 30, 8, 20, 1, 118.15f}, {8, 15, 7, 29, 2, -81.85f}, {21, 18, 19, 17, 1, 4.05f}, + {2, 22, 1, 22, 1, 1.05f}, {12, 20, 4, 17, 1, -0.85f}, {27, 8, 4, 14, 2, 152.35f}, + {26, 10, 25, 13, 1, 0.65f}, {19, 13, 19, 8, 3, 1.35f}, {12, 16, 7, 18, 7, -12.35f}, + {20, 26, 12, 3, 3, 95.35f}, {6, 10, 3, 10, 2, -23.25f}, {25, 25, 25, 21, 2, -0.75f}, + {12, 3, 7, 16, 2, 75.65f}, {8, 4, 4, 17, 4, 50.85f}, {12, 20, 5, 8, 5, 47.85f}, + {22, 15, 8, 13, 7, 2.55f}, {12, 13, 12, 8, 2, 0.55f}, {20, 15, 19, 13, 1, 0.15f}, + {30, 5, 29, 8, 1, 0.25f}, {14, 29, 13, 23, 2, 38.15f}, {18, 19, 9, 10, 7, -2.65f}, + {2, 11, 1, 10, 1, -1.95f}, {12, 13, 12, 11, 1, -1.05f}, {27, 15, 9, 5, 4, 110.25f}, + {13, 12, 7, 17, 2, 35.25f}, {8, 17, 1, 26, 1, 2.65f}, {20, 24, 11, 12, 4, 3.65f}, + {12, 24, 10, 22, 6, 18.75f}, {19, 29, 14, 20, 1, 89.45f}, {20, 27, 20, 25, 2, -0.55f}, + {9, 25, 8, 27, 1, 0.35f}, {7, 11, 5, 11, 1, 0.25f}, {20, 11, 11, 8, 1, 105.05f}, + {9, 8, 9, 5, 1, 0.25f}, {27, 9, 25, 10, 1, 1.25f}, {30, 20, 22, 20, 1, -34.65f}, + {26, 21, 26, 20, 1, -0.55f}, {30, 14, 27, 16, 1, -0.15f}, {12, 16, 11, 19, 3, 0.75f}, + {7, 28, 6, 29, 1, -0.15f}, {17, 23, 17, 22, 2, -0.15f}, {12, 17, 2, 2, 1, -94.25f}, + {17, 14, 17, 13, 1, -12.05f}, {18, 12, 16, 16, 1, -15.35f}, {7, 23, 7, 17, 1, -1.75f}, + {25, 12, 9, 15, 4, 31.35f}, {16, 6, 16, 5, 5, 0.15f}, {8, 16, 7, 16, 7, -2.15f}, + {6, 7, 5, 7, 5, -0.15f}, {15, 13, 15, 12, 2, -12.05f}, {13, 15, 13, 13, 3, -0.35f}, + {16, 12, 16, 11, 1, -0.65f}, {18, 15, 15, 14, 3, -0.55f}, {17, 8, 14, 5, 4, 28.95f}, + {9, 26, 6, 22, 5, 39.05f}, {17, 16, 14, 17, 3, 7.05f}, {25, 1, 24, 2, 1, 0.65f}, + {14, 16, 14, 15, 1, -1.65f}, {24, 22, 4, 23, 4, 2.85f}, {30, 29, 27, 29, 1, 4.85f}, + {17, 18, 17, 17, 1, -1.85f}, {19, 30, 19, 28, 1, 1.75f}, {21, 27, 21, 23, 3, -31.65f}, + {16, 18, 15, 20, 1, 0.05f}, {27, 27, 13, 12, 4, 13.75f}, {30, 25, 27, 26, 1, -0.35f}, + {4, 21, 3, 7, 1, 0.35f}, {10, 5, 10, 4, 4, -5.75f}, {14, 14, 5, 3, 1, 83.85f}, + {23, 6, 21, 3, 3, 0.95f}, {9, 20, 2, 15, 2, 27.95f}, {23, 9, 20, 13, 1, -0.25f}, + {15, 14, 12, 3, 3, -19.05f}, {19, 25, 19, 18, 4, 2.45f}, {27, 25, 24, 22, 4, -13.25f}, + {15, 15, 15, 11, 1, 44.05f}, {17, 16, 14, 13, 1, 51.85f}, {12, 18, 12, 17, 1, -0.05f}, + {30, 3, 30, 2, 1, 0.05f}, {21, 20, 18, 28, 3, 79.35f}, {25, 25, 7, 14, 5, 111.25f}, + {3, 11, 2, 3, 2, -61.65f}, {25, 5, 9, 21, 4, 3.05f}, {6, 15, 4, 28, 3, -69.95f}, + {9, 9, 3, 3, 3, 35.65f}, {16, 19, 14, 16, 2, 62.05f}, {10, 25, 10, 20, 1, -0.25f}, + {2, 17, 2, 15, 1, -1.05f}, {17, 15, 15, 16, 1, -2.95f}, {20, 15, 19, 15, 1, 0.95f}, + {22, 2, 22, 1, 1, -0.15f}, {15, 19, 15, 18, 1, -18.15f}, {15, 16, 10, 12, 1, -18.65f}, + {28, 2, 23, 14, 2, 74.55f}, {11, 3, 9, 2, 1, 2.65f} +}; +static const std::vector teblid_wl_params_512(std::begin(teblid_wl_params_512_), + std::end(teblid_wl_params_512_)); diff --git a/modules/xfeatures2d/test/test_features2d.cpp b/modules/xfeatures2d/test/test_features2d.cpp index 325d5972f20..193ce5e4504 100644 --- a/modules/xfeatures2d/test/test_features2d.cpp +++ b/modules/xfeatures2d/test/test_features2d.cpp @@ -192,6 +192,13 @@ TEST(Features2d_DescriptorExtractor_BEBLID, regression ) test.safe_run(); } +TEST(Features2d_DescriptorExtractor_TEBLID, regression ) +{ + CV_DescriptorExtractorTest test("descriptor-teblid", 1, + TEBLID::create(6.75)); + test.safe_run(); +} + #ifdef OPENCV_XFEATURES2D_HAS_VGG_DATA TEST( Features2d_DescriptorExtractor_VGG, regression ) { diff --git a/modules/xfeatures2d/test/test_rotation_and_scale_invariance.cpp b/modules/xfeatures2d/test/test_rotation_and_scale_invariance.cpp index 707a0bab4da..a3dd1a98845 100644 --- a/modules/xfeatures2d/test/test_rotation_and_scale_invariance.cpp +++ b/modules/xfeatures2d/test/test_rotation_and_scale_invariance.cpp @@ -34,6 +34,10 @@ INSTANTIATE_TEST_CASE_P(BEBLID, DescriptorRotationInvariance, Values( make_tuple(IMAGE_TSUKUBA, SIFT::create(), BEBLID::create(6.75), 0.98f) )); +INSTANTIATE_TEST_CASE_P(TEBLID, DescriptorRotationInvariance, Values( + make_tuple(IMAGE_TSUKUBA, SIFT::create(), TEBLID::create(6.75), 0.98f) +)); + INSTANTIATE_TEST_CASE_P(DAISY, DescriptorRotationInvariance, Values( make_tuple(IMAGE_TSUKUBA, BRISK::create(), From 9d0a451bee4cdaf9d3f76912e5abac6000865f1a Mon Sep 17 00:00:00 2001 From: Gary Allen Date: Fri, 1 Jul 2022 00:01:52 +0200 Subject: [PATCH 26/45] Merge pull request #3296 from gvcallen:4.x Allow new glog in SFM CMake Project * Allow new glog in SFM CMakeLists.txt * Fix typo * Fixed missing bracket --- modules/sfm/CMakeLists.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/sfm/CMakeLists.txt b/modules/sfm/CMakeLists.txt index 2a7869940bd..4377a61651e 100644 --- a/modules/sfm/CMakeLists.txt +++ b/modules/sfm/CMakeLists.txt @@ -9,17 +9,17 @@ find_package(Ceres QUIET) if(NOT Gflags_FOUND) # Ceres find gflags on the own, so separate search isn't necessary find_package(Gflags QUIET) endif() -if(NOT Glog_FOUND) # Ceres find glog on the own, so separate search isn't necessary +if(NOT (Glog_FOUND OR glog_FOUND)) # Ceres find glog on the own, so separate search isn't necessary find_package(Glog QUIET) endif() -if(NOT Gflags_FOUND OR NOT Glog_FOUND) +if(NOT Gflags_FOUND OR NOT (Glog_FOUND OR glog_FOUND)) # try local search scripts list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/cmake") if(NOT Gflags_FOUND) find_package(Gflags QUIET) endif() - if(NOT Glog_FOUND) + if(NOT (Glog_FOUND OR glog_FOUND)) find_package(Glog QUIET) endif() endif() From a96a930f1241f3e3a2de477cda22cf30da1f9759 Mon Sep 17 00:00:00 2001 From: Tomoaki Teshima Date: Wed, 6 Jul 2022 23:27:45 +0900 Subject: [PATCH 27/45] suppress warning on GCC 7 and later --- modules/aruco/CMakeLists.txt | 4 ++++ modules/ccalib/CMakeLists.txt | 4 ++++ modules/datasets/CMakeLists.txt | 4 ++++ modules/dnn_objdetect/CMakeLists.txt | 5 ++++- modules/dpm/CMakeLists.txt | 4 ++++ modules/face/CMakeLists.txt | 4 ++++ modules/line_descriptor/CMakeLists.txt | 4 ++++ modules/optflow/CMakeLists.txt | 4 ++++ modules/rgbd/CMakeLists.txt | 4 ++++ modules/text/CMakeLists.txt | 4 ++++ modules/tracking/CMakeLists.txt | 4 ++++ modules/xfeatures2d/CMakeLists.txt | 4 ++++ modules/ximgproc/CMakeLists.txt | 4 ++++ modules/xobjdetect/CMakeLists.txt | 4 ++++ modules/xphoto/CMakeLists.txt | 4 ++++ 15 files changed, 60 insertions(+), 1 deletion(-) diff --git a/modules/aruco/CMakeLists.txt b/modules/aruco/CMakeLists.txt index 12467e88f4f..17f1da666a8 100644 --- a/modules/aruco/CMakeLists.txt +++ b/modules/aruco/CMakeLists.txt @@ -1,4 +1,8 @@ set(the_description "ArUco Marker Detection") +if(ARM AND CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) + # suppress warnings from GCC only on 7.1 and later + ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-psabi) +endif() ocv_define_module(aruco opencv_core opencv_imgproc opencv_calib3d WRAP python java) ocv_include_directories(${CMAKE_CURRENT_BINARY_DIR}) diff --git a/modules/ccalib/CMakeLists.txt b/modules/ccalib/CMakeLists.txt index f803322ba9b..446e7f433c1 100644 --- a/modules/ccalib/CMakeLists.txt +++ b/modules/ccalib/CMakeLists.txt @@ -1,2 +1,6 @@ set(the_description "Custom Calibration Pattern") +if(ARM AND CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) + # suppress warnings from GCC only on 7.1 and later + ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-psabi) +endif() ocv_define_module(ccalib opencv_core opencv_imgproc opencv_calib3d opencv_features2d opencv_highgui WRAP python) diff --git a/modules/datasets/CMakeLists.txt b/modules/datasets/CMakeLists.txt index 56ca9e3100c..97606bb4053 100644 --- a/modules/datasets/CMakeLists.txt +++ b/modules/datasets/CMakeLists.txt @@ -13,3 +13,7 @@ ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4267 # flann, Win64 -Wimplicit-fallthrough # tinyxml2.cpp ) +if(ARM AND CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) + # suppress warnings from GCC only on 7.1 and later + ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-psabi) +endif() diff --git a/modules/dnn_objdetect/CMakeLists.txt b/modules/dnn_objdetect/CMakeLists.txt index 895bffbeddb..c6aa4540b28 100644 --- a/modules/dnn_objdetect/CMakeLists.txt +++ b/modules/dnn_objdetect/CMakeLists.txt @@ -1,5 +1,8 @@ set(the_description "Object Detection using CNNs") - +if(ARM AND CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) + # suppress warnings from GCC only on 7.1 and later + ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-psabi) +endif() ocv_define_module(dnn_objdetect opencv_core opencv_imgproc opencv_dnn OPTIONAL opencv_highgui opencv_imgcodecs # samples ) diff --git a/modules/dpm/CMakeLists.txt b/modules/dpm/CMakeLists.txt index 4d6a302b73b..090f90366d6 100644 --- a/modules/dpm/CMakeLists.txt +++ b/modules/dpm/CMakeLists.txt @@ -3,3 +3,7 @@ set(the_description "Object Detection") ocv_define_module(dpm opencv_core opencv_imgproc opencv_objdetect OPTIONAL opencv_highgui WRAP python) ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4512) # disable warning on Win64 +if(ARM AND CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) + # suppress warnings from GCC only on 7.1 and later + ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-psabi) +endif() diff --git a/modules/face/CMakeLists.txt b/modules/face/CMakeLists.txt index 2d5f8075a68..f7e5374d6b4 100644 --- a/modules/face/CMakeLists.txt +++ b/modules/face/CMakeLists.txt @@ -1,4 +1,8 @@ set(the_description "Face recognition etc") +if(ARM AND CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) + # suppress warnings from GCC only on 7.1 and later + ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-psabi) +endif() ocv_define_module(face opencv_core opencv_imgproc opencv_objdetect diff --git a/modules/line_descriptor/CMakeLists.txt b/modules/line_descriptor/CMakeLists.txt index 0c18edf7d4a..fc3b3171183 100644 --- a/modules/line_descriptor/CMakeLists.txt +++ b/modules/line_descriptor/CMakeLists.txt @@ -1,2 +1,6 @@ set(the_description "Line descriptor") +if(ARM AND CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) + # suppress warnings from GCC only on 7.1 and later + ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-psabi) +endif() ocv_define_module(line_descriptor opencv_imgproc OPTIONAL opencv_features2d WRAP python) diff --git a/modules/optflow/CMakeLists.txt b/modules/optflow/CMakeLists.txt index 39a37f143dd..beee733840f 100644 --- a/modules/optflow/CMakeLists.txt +++ b/modules/optflow/CMakeLists.txt @@ -1,2 +1,6 @@ set(the_description "Optical Flow Algorithms") +if(ARM AND CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) + # suppress warnings from GCC only on 7.1 and later + ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-psabi) +endif() ocv_define_module(optflow opencv_core opencv_imgproc opencv_video opencv_ximgproc opencv_imgcodecs opencv_flann WRAP python) diff --git a/modules/rgbd/CMakeLists.txt b/modules/rgbd/CMakeLists.txt index f2e022fe8a7..643be62c754 100644 --- a/modules/rgbd/CMakeLists.txt +++ b/modules/rgbd/CMakeLists.txt @@ -1,2 +1,6 @@ set(the_description "RGBD algorithms") +if(ARM AND CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) + # suppress warnings from GCC only on 7.1 and later + ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-psabi) +endif() ocv_define_module(rgbd opencv_core opencv_calib3d opencv_imgproc WRAP python) diff --git a/modules/text/CMakeLists.txt b/modules/text/CMakeLists.txt index 82e1e2a7e73..36caffb5dd5 100644 --- a/modules/text/CMakeLists.txt +++ b/modules/text/CMakeLists.txt @@ -3,6 +3,10 @@ set(__extra_deps "") if(DEBUG_opencv_text) list(APPEND __extra_deps PRIVATE_REQUIRED opencv_highgui) endif() +if(ARM AND CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) + # suppress warnings from GCC only on 7.1 and later + ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-psabi) +endif() ocv_define_module(text opencv_ml opencv_imgproc opencv_core opencv_features2d opencv_dnn diff --git a/modules/tracking/CMakeLists.txt b/modules/tracking/CMakeLists.txt index 6fa88af29cb..50aae65a3db 100644 --- a/modules/tracking/CMakeLists.txt +++ b/modules/tracking/CMakeLists.txt @@ -1,3 +1,7 @@ set(the_description "Tracking API") ocv_define_module(tracking opencv_imgproc opencv_core opencv_video opencv_plot OPTIONAL opencv_dnn opencv_datasets WRAP java python) ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-shadow /wd4458) +if(ARM AND CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) + # suppress warnings from GCC only on 7.1 and later + ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-psabi) +endif() diff --git a/modules/xfeatures2d/CMakeLists.txt b/modules/xfeatures2d/CMakeLists.txt index bbc540e278c..80b8c64caab 100644 --- a/modules/xfeatures2d/CMakeLists.txt +++ b/modules/xfeatures2d/CMakeLists.txt @@ -3,6 +3,10 @@ set(the_description "Contributed/Experimental Algorithms for Salient 2D Features if(HAVE_CUDA) ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef) endif() +if(ARM AND CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) + # suppress warnings from GCC only on 7.1 and later + ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-psabi) +endif() ocv_define_module(xfeatures2d opencv_core opencv_imgproc opencv_features2d opencv_calib3d OPTIONAL opencv_shape opencv_ml opencv_cudaarithm WRAP python java) if(NOT OPENCV_SKIP_FEATURES2D_DOWNLOADING) diff --git a/modules/ximgproc/CMakeLists.txt b/modules/ximgproc/CMakeLists.txt index f6f88bec66d..b6699eeb4ff 100644 --- a/modules/ximgproc/CMakeLists.txt +++ b/modules/ximgproc/CMakeLists.txt @@ -1,2 +1,6 @@ set(the_description "Extended image processing module. It includes edge-aware filters and etc.") +if(ARM AND CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) + # suppress warnings from GCC only on 7.1 and later + ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-psabi) +endif() ocv_define_module(ximgproc opencv_core opencv_imgproc opencv_calib3d opencv_imgcodecs WRAP python java) diff --git a/modules/xobjdetect/CMakeLists.txt b/modules/xobjdetect/CMakeLists.txt index 1727a42b75b..56b1749bb59 100644 --- a/modules/xobjdetect/CMakeLists.txt +++ b/modules/xobjdetect/CMakeLists.txt @@ -1,4 +1,8 @@ set(the_description "Object detection algorithms") +if(ARM AND CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) + # suppress warnings from GCC only on 7.1 and later + ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-psabi) +endif() ocv_define_module(xobjdetect opencv_core opencv_imgproc opencv_objdetect opencv_imgcodecs WRAP python) if (BUILD_opencv_apps AND NOT APPLE_FRAMEWORK) add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/tools ${CMAKE_CURRENT_BINARY_DIR}/tools) diff --git a/modules/xphoto/CMakeLists.txt b/modules/xphoto/CMakeLists.txt index a05848d389f..877384378cc 100644 --- a/modules/xphoto/CMakeLists.txt +++ b/modules/xphoto/CMakeLists.txt @@ -1,2 +1,6 @@ set(the_description "Addon to basic photo module") +if(ARM AND CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) + # suppress warnings from GCC only on 7.1 and later + ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-psabi) +endif() ocv_define_module(xphoto opencv_core opencv_imgproc opencv_photo WRAP python java) From f4712664ea6ab44cd2cb7b87a8678b20d716bbd4 Mon Sep 17 00:00:00 2001 From: huangqinjin Date: Sat, 23 Jul 2022 21:42:04 +0800 Subject: [PATCH 28/45] ximgproc: fix mismatched new delete --- modules/ximgproc/src/edge_drawing.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ximgproc/src/edge_drawing.cpp b/modules/ximgproc/src/edge_drawing.cpp index 675738643df..6e6be14c31d 100644 --- a/modules/ximgproc/src/edge_drawing.cpp +++ b/modules/ximgproc/src/edge_drawing.cpp @@ -5603,8 +5603,8 @@ int EdgeDrawingImpl::inverse(double** TB, double** InvB, int N) void EdgeDrawingImpl::DeallocateMatrix(double** m, int noRows) { for (int i = 0; i < noRows; i++) - delete m[i]; - delete m; + delete[] m[i]; + delete[] m; } void EdgeDrawingImpl::AperB_T(double** A_, double** B_, double** _res, int _righA, int _colA, int _righB, int _colB) From 3a41fd6e68c1d6d3d7c07a3a2e3ff69b6db8aa77 Mon Sep 17 00:00:00 2001 From: AleksandrPanov Date: Fri, 29 Apr 2022 02:07:23 +0300 Subject: [PATCH 29/45] move april module create aruco_utils.hpp move Board, GridBoard, CharucoBoard to board.hpp/board.cpp refactoring _getSingleMarkerObjectPoints() refactoring _extractBits() refactoring _findMarkerContours() fix _copyVector2Output() in detectMarkers() move testCharucoCornersCollinear() to board.hpp/board.cpp move poseEstimate()/calibAruco() to aruco_calib_pose.hpp reduce include files move detectMarkers() to class ArucoDetector move refineDetectedMarkers() to class ArucoDetector add C API wrapper to detectMarkers(), refineDetectedMarkers() update tests and samples to class API add py tests: test_aruco_detector, test_aruco_detector_refine refactoring, fix docs add java tests: testArucoIssue3133, testArucoDetector add readWriteParameter(), update readParameter() implemented cv::Algorithm - read/write, added read/write to RefineParameters, added write to DetectorParameters merge PatternPos/EstimateParameters after rebase remove empty docstring for private function fixes fixes license --- modules/aruco/include/opencv2/aruco.hpp | 679 +----- .../opencv2/aruco/aruco_calib_pose.hpp | 286 +++ modules/aruco/include/opencv2/aruco/board.hpp | 229 ++ .../aruco/include/opencv2/aruco/charuco.hpp | 222 +- .../include/opencv2/aruco/dictionary.hpp | 56 +- .../aruco/include/opencv2/aruco_detector.hpp | 408 ++++ modules/aruco/misc/java/test/ArucoTest.java | 58 + modules/aruco/misc/python/test/test_aruco.py | 50 + modules/aruco/perf/perf_aruco.cpp | 9 +- modules/aruco/perf/perf_precomp.hpp | 2 +- modules/aruco/samples/aruco_dict_utils.cpp | 2 +- .../aruco/samples/aruco_samples_utility.hpp | 2 +- modules/aruco/samples/calibrate_camera.cpp | 9 +- modules/aruco/samples/create_board.cpp | 2 +- modules/aruco/samples/create_marker.cpp | 2 +- modules/aruco/samples/detect_board.cpp | 11 +- modules/aruco/samples/detect_markers.cpp | 7 +- .../{ => apriltag}/apriltag_quad_thresh.cpp | 150 +- .../{ => apriltag}/apriltag_quad_thresh.hpp | 24 +- .../predefined_dictionaries_apriltag.hpp | 0 .../aruco/src/{ => apriltag}/unionfind.hpp | 0 modules/aruco/src/{ => apriltag}/zarray.hpp | 0 modules/aruco/src/{ => apriltag}/zmaxheap.cpp | 2 +- modules/aruco/src/{ => apriltag}/zmaxheap.hpp | 0 modules/aruco/src/aruco.cpp | 1890 +---------------- modules/aruco/src/aruco_calib_pose.cpp | 257 +++ modules/aruco/src/aruco_detector.cpp | 1259 +++++++++++ modules/aruco/src/aruco_utils.cpp | 50 + modules/aruco/src/aruco_utils.hpp | 44 + modules/aruco/src/board.cpp | 381 ++++ modules/aruco/src/charuco.cpp | 418 +--- modules/aruco/src/dictionary.cpp | 113 +- modules/aruco/src/precomp.hpp | 43 +- modules/aruco/src/predefined_dictionaries.hpp | 40 +- modules/aruco/test/test_arucodetection.cpp | 96 +- modules/aruco/test/test_boarddetection.cpp | 30 +- modules/aruco/test/test_charucodetection.cpp | 63 +- modules/aruco/test/test_precomp.hpp | 1 - modules/ovis/samples/aruco_ar_demo.cpp | 2 +- 39 files changed, 3347 insertions(+), 3550 deletions(-) create mode 100644 modules/aruco/include/opencv2/aruco/aruco_calib_pose.hpp create mode 100644 modules/aruco/include/opencv2/aruco/board.hpp create mode 100644 modules/aruco/include/opencv2/aruco_detector.hpp create mode 100644 modules/aruco/misc/java/test/ArucoTest.java rename modules/aruco/src/{ => apriltag}/apriltag_quad_thresh.cpp (91%) rename modules/aruco/src/{ => apriltag}/apriltag_quad_thresh.hpp (91%) rename modules/aruco/src/{ => apriltag}/predefined_dictionaries_apriltag.hpp (100%) rename modules/aruco/src/{ => apriltag}/unionfind.hpp (100%) rename modules/aruco/src/{ => apriltag}/zarray.hpp (100%) rename modules/aruco/src/{ => apriltag}/zmaxheap.cpp (99%) rename modules/aruco/src/{ => apriltag}/zmaxheap.hpp (100%) create mode 100644 modules/aruco/src/aruco_calib_pose.cpp create mode 100644 modules/aruco/src/aruco_detector.cpp create mode 100644 modules/aruco/src/aruco_utils.cpp create mode 100644 modules/aruco/src/aruco_utils.hpp create mode 100644 modules/aruco/src/board.cpp diff --git a/modules/aruco/include/opencv2/aruco.hpp b/modules/aruco/include/opencv2/aruco.hpp index fac4dee81d4..3213c595089 100644 --- a/modules/aruco/include/opencv2/aruco.hpp +++ b/modules/aruco/include/opencv2/aruco.hpp @@ -1,683 +1,34 @@ -/* -By downloading, copying, installing or using the software you agree to this -license. If you do not agree to this license, do not download, install, -copy or use the software. - - License Agreement - For Open Source Computer Vision Library - (3-clause BSD License) - -Copyright (C) 2013, OpenCV Foundation, all rights reserved. -Third party copyrights are property of their respective owners. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the names of the copyright holders nor the names of the contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -This software is provided by the copyright holders and contributors "as is" and -any express or implied warranties, including, but not limited to, the implied -warranties of merchantability and fitness for a particular purpose are -disclaimed. In no event shall copyright holders or contributors be liable for -any direct, indirect, incidental, special, exemplary, or consequential damages -(including, but not limited to, procurement of substitute goods or services; -loss of use, data, or profits; or business interruption) however caused -and on any theory of liability, whether in contract, strict liability, -or tort (including negligence or otherwise) arising in any way out of -the use of this software, even if advised of the possibility of such damage. -*/ - +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html #ifndef __OPENCV_ARUCO_HPP__ #define __OPENCV_ARUCO_HPP__ -#include -#include -#include -#include "opencv2/aruco/dictionary.hpp" - -/** - * @defgroup aruco ArUco Marker Detection - * This module is dedicated to square fiducial markers (also known as Augmented Reality Markers) - * These markers are useful for easy, fast and robust camera pose estimation.ç - * - * The main functionalities are: - * - Detection of markers in an image - * - Pose estimation from a single marker or from a board/set of markers - * - Detection of ChArUco board for high subpixel accuracy - * - Camera calibration from both, ArUco boards and ChArUco boards. - * - Detection of ChArUco diamond markers - * The samples directory includes easy examples of how to use the module. - * - * The implementation is based on the ArUco Library by R. Muñoz-Salinas and S. Garrido-Jurado @cite Aruco2014. - * - * Markers can also be detected based on the AprilTag 2 @cite wang2016iros fiducial detection method. - * - * @sa S. Garrido-Jurado, R. Muñoz-Salinas, F. J. Madrid-Cuevas, and M. J. Marín-Jiménez. 2014. - * "Automatic generation and detection of highly reliable fiducial markers under occlusion". - * Pattern Recogn. 47, 6 (June 2014), 2280-2292. DOI=10.1016/j.patcog.2014.01.005 - * - * @sa http://www.uco.es/investiga/grupos/ava/node/26 - * - * This module has been originally developed by Sergio Garrido-Jurado as a project - * for Google Summer of Code 2015 (GSoC 15). - * - * -*/ +#include "opencv2/aruco_detector.hpp" +#include "opencv2/aruco/aruco_calib_pose.hpp" namespace cv { namespace aruco { -//! @addtogroup aruco -//! @{ - -enum CornerRefineMethod{ - CORNER_REFINE_NONE, ///< Tag and corners detection based on the ArUco approach - CORNER_REFINE_SUBPIX, ///< ArUco approach and refine the corners locations using corner subpixel accuracy - CORNER_REFINE_CONTOUR, ///< ArUco approach and refine the corners locations using the contour-points line fitting - CORNER_REFINE_APRILTAG, ///< Tag and corners detection based on the AprilTag 2 approach @cite wang2016iros -}; - -/** - * @brief Parameters for the detectMarker process: - * - adaptiveThreshWinSizeMin: minimum window size for adaptive thresholding before finding - * contours (default 3). - * - adaptiveThreshWinSizeMax: maximum window size for adaptive thresholding before finding - * contours (default 23). - * - adaptiveThreshWinSizeStep: increments from adaptiveThreshWinSizeMin to adaptiveThreshWinSizeMax - * during the thresholding (default 10). - * - adaptiveThreshConstant: constant for adaptive thresholding before finding contours (default 7) - * - minMarkerPerimeterRate: determine minimum perimeter for marker contour to be detected. This - * is defined as a rate respect to the maximum dimension of the input image (default 0.03). - * - maxMarkerPerimeterRate: determine maximum perimeter for marker contour to be detected. This - * is defined as a rate respect to the maximum dimension of the input image (default 4.0). - * - polygonalApproxAccuracyRate: minimum accuracy during the polygonal approximation process to - * determine which contours are squares. (default 0.03) - * - minCornerDistanceRate: minimum distance between corners for detected markers relative to its - * perimeter (default 0.05) - * - minDistanceToBorder: minimum distance of any corner to the image border for detected markers - * (in pixels) (default 3) - * - minMarkerDistanceRate: minimum mean distance beetween two marker corners to be considered - * similar, so that the smaller one is removed. The rate is relative to the smaller perimeter - * of the two markers (default 0.05). - * - cornerRefinementMethod: corner refinement method. (CORNER_REFINE_NONE, no refinement. - * CORNER_REFINE_SUBPIX, do subpixel refinement. CORNER_REFINE_CONTOUR use contour-Points, - * CORNER_REFINE_APRILTAG use the AprilTag2 approach). (default CORNER_REFINE_NONE) - * - cornerRefinementWinSize: window size for the corner refinement process (in pixels) (default 5). - * - cornerRefinementMaxIterations: maximum number of iterations for stop criteria of the corner - * refinement process (default 30). - * - cornerRefinementMinAccuracy: minimum error for the stop cristeria of the corner refinement - * process (default: 0.1) - * - markerBorderBits: number of bits of the marker border, i.e. marker border width (default 1). - * - perspectiveRemovePixelPerCell: number of bits (per dimension) for each cell of the marker - * when removing the perspective (default 4). - * - perspectiveRemoveIgnoredMarginPerCell: width of the margin of pixels on each cell not - * considered for the determination of the cell bit. Represents the rate respect to the total - * size of the cell, i.e. perspectiveRemovePixelPerCell (default 0.13) - * - maxErroneousBitsInBorderRate: maximum number of accepted erroneous bits in the border (i.e. - * number of allowed white bits in the border). Represented as a rate respect to the total - * number of bits per marker (default 0.35). - * - minOtsuStdDev: minimun standard deviation in pixels values during the decodification step to - * apply Otsu thresholding (otherwise, all the bits are set to 0 or 1 depending on mean higher - * than 128 or not) (default 5.0) - * - errorCorrectionRate error correction rate respect to the maximun error correction capability - * for each dictionary. (default 0.6). - * - aprilTagMinClusterPixels: reject quads containing too few pixels. (default 5) - * - aprilTagMaxNmaxima: how many corner candidates to consider when segmenting a group of pixels into a quad. (default 10) - * - aprilTagCriticalRad: Reject quads where pairs of edges have angles that are close to straight or close to - * 180 degrees. Zero means that no quads are rejected. (In radians) (default 10*PI/180) - * - aprilTagMaxLineFitMse: When fitting lines to the contours, what is the maximum mean squared error - * allowed? This is useful in rejecting contours that are far from being quad shaped; rejecting - * these quads "early" saves expensive decoding processing. (default 10.0) - * - aprilTagMinWhiteBlackDiff: When we build our model of black & white pixels, we add an extra check that - * the white model must be (overall) brighter than the black model. How much brighter? (in pixel values, [0,255]). (default 5) - * - aprilTagDeglitch: should the thresholded image be deglitched? Only useful for very noisy images. (default 0) - * - aprilTagQuadDecimate: Detection of quads can be done on a lower-resolution image, improving speed at a - * cost of pose accuracy and a slight decrease in detection rate. Decoding the binary payload is still - * done at full resolution. (default 0.0) - * - aprilTagQuadSigma: What Gaussian blur should be applied to the segmented image (used for quad detection?) - * Parameter is the standard deviation in pixels. Very noisy images benefit from non-zero values (e.g. 0.8). (default 0.0) - * - detectInvertedMarker: to check if there is a white marker. In order to generate a "white" marker just - * invert a normal marker by using a tilde, ~markerImage. (default false) - * - useAruco3Detection: to enable the new and faster Aruco detection strategy. The most important observation from the authors of - * Romero-Ramirez et al: Speeded up detection of squared fiducial markers (2018) is, that the binary - * code of a marker can be reliably detected if the canonical image (that is used to extract the binary code) - * has a size of minSideLengthCanonicalImg (in practice tau_c=16-32 pixels). - * Link to article: https://www.researchgate.net/publication/325787310_Speeded_Up_Detection_of_Squared_Fiducial_Markers - * In addition, very small markers are barely useful for pose estimation and thus a we can define a minimum marker size that we - * still want to be able to detect (e.g. 50x50 pixel). - * To decouple this from the initial image size they propose to resize the input image - * to (I_w_r, I_h_r) = (tau_c / tau_dot_i) * (I_w, I_h), with tau_dot_i = tau_c + max(I_w,I_h) * tau_i. - * Here tau_i (parameter: minMarkerLengthRatioOriginalImg) is a ratio in the range [0,1]. - * If we set this to 0, the smallest marker we can detect - * has a side length of tau_c. If we set it to 1 the marker would fill the entire image. - * For a FullHD video a good value to start with is 0.1. - * - minSideLengthCanonicalImg: minimum side length of a marker in the canonical image. - * Latter is the binarized image in which contours are searched. - * So all contours with a size smaller than minSideLengthCanonicalImg*minSideLengthCanonicalImg will omitted from the search. - * - minMarkerLengthRatioOriginalImg: range [0,1], eq (2) from paper - * The parameter tau_i has a direct influence on the processing speed. - */ -struct CV_EXPORTS_W DetectorParameters { - - DetectorParameters(); - CV_WRAP static Ptr create(); - CV_WRAP bool readDetectorParameters(const FileNode& fn); - - CV_PROP_RW int adaptiveThreshWinSizeMin; - CV_PROP_RW int adaptiveThreshWinSizeMax; - CV_PROP_RW int adaptiveThreshWinSizeStep; - CV_PROP_RW double adaptiveThreshConstant; - CV_PROP_RW double minMarkerPerimeterRate; - CV_PROP_RW double maxMarkerPerimeterRate; - CV_PROP_RW double polygonalApproxAccuracyRate; - CV_PROP_RW double minCornerDistanceRate; - CV_PROP_RW int minDistanceToBorder; - CV_PROP_RW double minMarkerDistanceRate; - CV_PROP_RW int cornerRefinementMethod; - CV_PROP_RW int cornerRefinementWinSize; - CV_PROP_RW int cornerRefinementMaxIterations; - CV_PROP_RW double cornerRefinementMinAccuracy; - CV_PROP_RW int markerBorderBits; - CV_PROP_RW int perspectiveRemovePixelPerCell; - CV_PROP_RW double perspectiveRemoveIgnoredMarginPerCell; - CV_PROP_RW double maxErroneousBitsInBorderRate; - CV_PROP_RW double minOtsuStdDev; - CV_PROP_RW double errorCorrectionRate; - - // April :: User-configurable parameters. - CV_PROP_RW float aprilTagQuadDecimate; - CV_PROP_RW float aprilTagQuadSigma; - - // April :: Internal variables - CV_PROP_RW int aprilTagMinClusterPixels; - CV_PROP_RW int aprilTagMaxNmaxima; - CV_PROP_RW float aprilTagCriticalRad; - CV_PROP_RW float aprilTagMaxLineFitMse; - CV_PROP_RW int aprilTagMinWhiteBlackDiff; - CV_PROP_RW int aprilTagDeglitch; - - // to detect white (inverted) markers - CV_PROP_RW bool detectInvertedMarker; - - // New Aruco functionality proposed in the paper: - // Romero-Ramirez et al: Speeded up detection of squared fiducial markers (2018) - CV_PROP_RW bool useAruco3Detection; - CV_PROP_RW int minSideLengthCanonicalImg; - CV_PROP_RW float minMarkerLengthRatioOriginalImg; -}; - - /** - * @brief Basic marker detection - * - * @param image input image - * @param dictionary indicates the type of markers that will be searched - * @param corners vector of detected marker corners. For each marker, its four corners - * are provided, (e.g std::vector > ). For N detected markers, - * the dimensions of this array is Nx4. The order of the corners is clockwise. - * @param ids vector of identifiers of the detected markers. The identifier is of type int - * (e.g. std::vector). For N detected markers, the size of ids is also N. - * The identifiers have the same order than the markers in the imgPoints array. - * @param parameters marker detection parameters - * @param rejectedImgPoints contains the imgPoints of those squares whose inner code has not a - * correct codification. Useful for debugging purposes. - * - * Performs marker detection in the input image. Only markers included in the specific dictionary - * are searched. For each detected marker, it returns the 2D position of its corner in the image - * and its corresponding identifier. - * Note that this function does not perform pose estimation. - * @note The function does not correct lens distortion or takes it into account. It's recommended to undistort - * input image with corresponging camera model, if camera parameters are known - * @sa undistort, estimatePoseSingleMarkers, estimatePoseBoard - * - */ +@deprecated Use class ArucoDetector +*/ CV_EXPORTS_W void detectMarkers(InputArray image, const Ptr &dictionary, OutputArrayOfArrays corners, OutputArray ids, const Ptr ¶meters = DetectorParameters::create(), OutputArrayOfArrays rejectedImgPoints = noArray()); -/** @brief - * rvec/tvec define the right handed coordinate system of the marker. - * PatternPos defines center this system and axes direction. - * Axis X (red color) - first coordinate, axis Y (green color) - second coordinate, - * axis Z (blue color) - third coordinate. - * @sa estimatePoseSingleMarkers(), @ref tutorial_aruco_detection - */ -enum PatternPos { - /** @brief The marker coordinate system is centered on the middle of the marker. - * The coordinates of the four corners (CCW order) of the marker in its own coordinate system are: - * (-markerLength/2, markerLength/2, 0), (markerLength/2, markerLength/2, 0), - * (markerLength/2, -markerLength/2, 0), (-markerLength/2, -markerLength/2, 0). - * - * These pattern points define this coordinate system: - * ![Image with axes drawn](images/singlemarkersaxes.jpg) - */ - CCW_center, - /** @brief The marker coordinate system is centered on the top-left corner of the marker. - * The coordinates of the four corners (CW order) of the marker in its own coordinate system are: - * (0, 0, 0), (markerLength, 0, 0), - * (markerLength, markerLength, 0), (0, markerLength, 0). - * - * These pattern points define this coordinate system: - * ![Image with axes drawn](images/singlemarkersaxes2.jpg) - */ - CW_top_left_corner -}; - -/** @brief - * Pose estimation parameters - * @param pattern Defines center this system and axes direction (default PatternPos::CCW_center). - * @param useExtrinsicGuess Parameter used for SOLVEPNP_ITERATIVE. If true (1), the function uses the provided - * rvec and tvec values as initial approximations of the rotation and translation vectors, respectively, and further - * optimizes them (default false). - * @param solvePnPMethod Method for solving a PnP problem: see @ref calib3d_solvePnP_flags (default SOLVEPNP_ITERATIVE). - * @sa PatternPos, solvePnP(), @ref tutorial_aruco_detection - */ -struct CV_EXPORTS_W EstimateParameters { - CV_PROP_RW PatternPos pattern; - CV_PROP_RW bool useExtrinsicGuess; - CV_PROP_RW SolvePnPMethod solvePnPMethod; - - EstimateParameters(): pattern(CCW_center), useExtrinsicGuess(false), - solvePnPMethod(SOLVEPNP_ITERATIVE) {} - - CV_WRAP static Ptr create() { - return makePtr(); - } -}; - - -/** - * @brief Pose estimation for single markers - * - * @param corners vector of already detected markers corners. For each marker, its four corners - * are provided, (e.g std::vector > ). For N detected markers, - * the dimensions of this array should be Nx4. The order of the corners should be clockwise. - * @sa detectMarkers - * @param markerLength the length of the markers' side. The returning translation vectors will - * be in the same unit. Normally, unit is meters. - * @param cameraMatrix input 3x3 floating-point camera matrix - * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ - * @param distCoeffs vector of distortion coefficients - * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements - * @param rvecs array of output rotation vectors (@sa Rodrigues) (e.g. std::vector). - * Each element in rvecs corresponds to the specific marker in imgPoints. - * @param tvecs array of output translation vectors (e.g. std::vector). - * Each element in tvecs corresponds to the specific marker in imgPoints. - * @param _objPoints array of object points of all the marker corners - * @param estimateParameters set the origin of coordinate system and the coordinates of the four corners of the marker - * (default estimateParameters.pattern = PatternPos::CCW_center, estimateParameters.useExtrinsicGuess = false, - * estimateParameters.solvePnPMethod = SOLVEPNP_ITERATIVE). - * - * This function receives the detected markers and returns their pose estimation respect to - * the camera individually. So for each marker, one rotation and translation vector is returned. - * The returned transformation is the one that transforms points from each marker coordinate system - * to the camera coordinate system. - * The marker coordinate system is centered on the middle (by default) or on the top-left corner of the marker, - * with the Z axis perpendicular to the marker plane. - * estimateParameters defines the coordinates of the four corners of the marker in its own coordinate system (by default) are: - * (-markerLength/2, markerLength/2, 0), (markerLength/2, markerLength/2, 0), - * (markerLength/2, -markerLength/2, 0), (-markerLength/2, -markerLength/2, 0) - * @sa use cv::drawFrameAxes to get world coordinate system axis for object points - * @sa @ref tutorial_aruco_detection - * @sa EstimateParameters - * @sa PatternPos - */ -CV_EXPORTS_W void estimatePoseSingleMarkers(InputArrayOfArrays corners, float markerLength, - InputArray cameraMatrix, InputArray distCoeffs, - OutputArray rvecs, OutputArray tvecs, OutputArray _objPoints = noArray(), - Ptr estimateParameters = EstimateParameters::create()); - - - -/** - * @brief Board of markers - * - * A board is a set of markers in the 3D space with a common coordinate system. - * The common form of a board of marker is a planar (2D) board, however any 3D layout can be used. - * A Board object is composed by: - * - The object points of the marker corners, i.e. their coordinates respect to the board system. - * - The dictionary which indicates the type of markers of the board - * - The identifier of all the markers in the board. - */ -class CV_EXPORTS_W Board { - - public: - /** - * @brief Provide way to create Board by passing necessary data. Specially needed in Python. - * - * @param objPoints array of object points of all the marker corners in the board - * @param dictionary the dictionary of markers employed for this board - * @param ids vector of the identifiers of the markers in the board - * - */ - CV_WRAP static Ptr create(InputArrayOfArrays objPoints, const Ptr &dictionary, InputArray ids); - - /** - * @brief Set ids vector - * - * @param ids vector of the identifiers of the markers in the board (should be the same size - * as objPoints) - * - * Recommended way to set ids vector, which will fail if the size of ids does not match size - * of objPoints. - */ - CV_WRAP void setIds(InputArray ids); - - /// array of object points of all the marker corners in the board - /// each marker include its 4 corners in this order: - ///- objPoints[i][0] - left-top point of i-th marker - ///- objPoints[i][1] - right-top point of i-th marker - ///- objPoints[i][2] - right-bottom point of i-th marker - ///- objPoints[i][3] - left-bottom point of i-th marker - /// - /// Markers are placed in a certain order - row by row, left to right in every row. - /// For M markers, the size is Mx4. - CV_PROP std::vector< std::vector< Point3f > > objPoints; - - /// the dictionary of markers employed for this board - CV_PROP Ptr dictionary; - - /// vector of the identifiers of the markers in the board (same size than objPoints) - /// The identifiers refers to the board dictionary - CV_PROP_RW std::vector< int > ids; - - /// coordinate of the bottom right corner of the board, is set when calling the function create() - CV_PROP Point3f rightBottomBorder; -}; - - /** - * @brief Planar board with grid arrangement of markers - * More common type of board. All markers are placed in the same plane in a grid arrangement. - * The board can be drawn using drawPlanarBoard() function (@sa drawPlanarBoard) - */ -class CV_EXPORTS_W GridBoard : public Board { - - public: - /** - * @brief Draw a GridBoard - * - * @param outSize size of the output image in pixels. - * @param img output image with the board. The size of this image will be outSize - * and the board will be on the center, keeping the board proportions. - * @param marginSize minimum margins (in pixels) of the board in the output image - * @param borderBits width of the marker borders. - * - * This function return the image of the GridBoard, ready to be printed. - */ - CV_WRAP void draw(Size outSize, OutputArray img, int marginSize = 0, int borderBits = 1); - - - /** - * @brief Create a GridBoard object - * - * @param markersX number of markers in X direction - * @param markersY number of markers in Y direction - * @param markerLength marker side length (normally in meters) - * @param markerSeparation separation between two markers (same unit as markerLength) - * @param dictionary dictionary of markers indicating the type of markers - * @param firstMarker id of first marker in dictionary to use on board. - * @return the output GridBoard object - * - * This functions creates a GridBoard object given the number of markers in each direction and - * the marker size and marker separation. - */ - CV_WRAP static Ptr create(int markersX, int markersY, float markerLength, - float markerSeparation, const Ptr &dictionary, int firstMarker = 0); - - /** - * - */ - CV_WRAP Size getGridSize() const { return Size(_markersX, _markersY); } - - /** - * - */ - CV_WRAP float getMarkerLength() const { return _markerLength; } - - /** - * - */ - CV_WRAP float getMarkerSeparation() const { return _markerSeparation; } - - - private: - // number of markers in X and Y directions - int _markersX, _markersY; - - // marker side length (normally in meters) - float _markerLength; - - // separation between markers in the grid - float _markerSeparation; -}; - - - -/** - * @brief Pose estimation for a board of markers - * - * @param corners vector of already detected markers corners. For each marker, its four corners - * are provided, (e.g std::vector > ). For N detected markers, the - * dimensions of this array should be Nx4. The order of the corners should be clockwise. - * @param ids list of identifiers for each marker in corners - * @param board layout of markers in the board. The layout is composed by the marker identifiers - * and the positions of each marker corner in the board reference system. - * @param cameraMatrix input 3x3 floating-point camera matrix - * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ - * @param distCoeffs vector of distortion coefficients - * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements - * @param rvec Output vector (e.g. cv::Mat) corresponding to the rotation vector of the board - * (see cv::Rodrigues). Used as initial guess if not empty. - * @param tvec Output vector (e.g. cv::Mat) corresponding to the translation vector of the board. - * @param useExtrinsicGuess defines whether initial guess for \b rvec and \b tvec will be used or not. - * Used as initial guess if not empty. - * - * This function receives the detected markers and returns the pose of a marker board composed - * by those markers. - * A Board of marker has a single world coordinate system which is defined by the board layout. - * The returned transformation is the one that transforms points from the board coordinate system - * to the camera coordinate system. - * Input markers that are not included in the board layout are ignored. - * The function returns the number of markers from the input employed for the board pose estimation. - * Note that returning a 0 means the pose has not been estimated. - * @sa use cv::drawFrameAxes to get world coordinate system axis for object points - */ -CV_EXPORTS_W int estimatePoseBoard(InputArrayOfArrays corners, InputArray ids, const Ptr &board, - InputArray cameraMatrix, InputArray distCoeffs, InputOutputArray rvec, - InputOutputArray tvec, bool useExtrinsicGuess = false); - - - - -/** - * @brief Refind not detected markers based on the already detected and the board layout - * - * @param image input image - * @param board layout of markers in the board. - * @param detectedCorners vector of already detected marker corners. - * @param detectedIds vector of already detected marker identifiers. - * @param rejectedCorners vector of rejected candidates during the marker detection process. - * @param cameraMatrix optional input 3x3 floating-point camera matrix - * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ - * @param distCoeffs optional vector of distortion coefficients - * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements - * @param minRepDistance minimum distance between the corners of the rejected candidate and the - * reprojected marker in order to consider it as a correspondence. - * @param errorCorrectionRate rate of allowed erroneous bits respect to the error correction - * capability of the used dictionary. -1 ignores the error correction step. - * @param checkAllOrders Consider the four posible corner orders in the rejectedCorners array. - * If it set to false, only the provided corner order is considered (default true). - * @param recoveredIdxs Optional array to returns the indexes of the recovered candidates in the - * original rejectedCorners array. - * @param parameters marker detection parameters - * - * This function tries to find markers that were not detected in the basic detecMarkers function. - * First, based on the current detected marker and the board layout, the function interpolates - * the position of the missing markers. Then it tries to find correspondence between the reprojected - * markers and the rejected candidates based on the minRepDistance and errorCorrectionRate - * parameters. - * If camera parameters and distortion coefficients are provided, missing markers are reprojected - * using projectPoint function. If not, missing marker projections are interpolated using global - * homography, and all the marker corners in the board must have the same Z coordinate. - */ -CV_EXPORTS_W void refineDetectedMarkers( - InputArray image,const Ptr &board, InputOutputArrayOfArrays detectedCorners, - InputOutputArray detectedIds, InputOutputArrayOfArrays rejectedCorners, - InputArray cameraMatrix = noArray(), InputArray distCoeffs = noArray(), - float minRepDistance = 10.f, float errorCorrectionRate = 3.f, bool checkAllOrders = true, - OutputArray recoveredIdxs = noArray(), const Ptr ¶meters = DetectorParameters::create()); - - - -/** - * @brief Draw detected markers in image - * - * @param image input/output image. It must have 1 or 3 channels. The number of channels is not - * altered. - * @param corners positions of marker corners on input image. - * (e.g std::vector > ). For N detected markers, the dimensions of - * this array should be Nx4. The order of the corners should be clockwise. - * @param ids vector of identifiers for markers in markersCorners . - * Optional, if not provided, ids are not painted. - * @param borderColor color of marker borders. Rest of colors (text color and first corner color) - * are calculated based on this one to improve visualization. - * - * Given an array of detected marker corners and its corresponding ids, this functions draws - * the markers in the image. The marker borders are painted and the markers identifiers if provided. - * Useful for debugging purposes. - * - */ -CV_EXPORTS_W void drawDetectedMarkers(InputOutputArray image, InputArrayOfArrays corners, - InputArray ids = noArray(), - Scalar borderColor = Scalar(0, 255, 0)); - - - -/** - * @brief Draw a canonical marker image - * - * @param dictionary dictionary of markers indicating the type of markers - * @param id identifier of the marker that will be returned. It has to be a valid id - * in the specified dictionary. - * @param sidePixels size of the image in pixels - * @param img output image with the marker - * @param borderBits width of the marker border. - * - * This function returns a marker image in its canonical form (i.e. ready to be printed) - */ -CV_EXPORTS_W void drawMarker(const Ptr &dictionary, int id, int sidePixels, OutputArray img, - int borderBits = 1); - - - -/** - * @brief Draw a planar board - * @sa _drawPlanarBoardImpl - * - * @param board layout of the board that will be drawn. The board should be planar, - * z coordinate is ignored - * @param outSize size of the output image in pixels. - * @param img output image with the board. The size of this image will be outSize - * and the board will be on the center, keeping the board proportions. - * @param marginSize minimum margins (in pixels) of the board in the output image - * @param borderBits width of the marker borders. - * - * This function return the image of a planar board, ready to be printed. It assumes - * the Board layout specified is planar by ignoring the z coordinates of the object points. - */ -CV_EXPORTS_W void drawPlanarBoard(const Ptr &board, Size outSize, OutputArray img, - int marginSize = 0, int borderBits = 1); - - - -/** - * @brief Implementation of drawPlanarBoard that accepts a raw Board pointer. - */ -void _drawPlanarBoardImpl(Board *board, Size outSize, OutputArray img, - int marginSize = 0, int borderBits = 1); - - - -/** - * @brief Calibrate a camera using aruco markers - * - * @param corners vector of detected marker corners in all frames. - * The corners should have the same format returned by detectMarkers (see #detectMarkers). - * @param ids list of identifiers for each marker in corners - * @param counter number of markers in each frame so that corners and ids can be split - * @param board Marker Board layout - * @param imageSize Size of the image used only to initialize the intrinsic camera matrix. - * @param cameraMatrix Output 3x3 floating-point camera matrix - * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . If CV\_CALIB\_USE\_INTRINSIC\_GUESS - * and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be - * initialized before calling the function. - * @param distCoeffs Output vector of distortion coefficients - * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements - * @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each board view - * (e.g. std::vector>). That is, each k-th rotation vector together with the corresponding - * k-th translation vector (see the next output parameter description) brings the board pattern - * from the model coordinate space (in which object points are specified) to the world coordinate - * space, that is, a real position of the board pattern in the k-th pattern view (k=0.. *M* -1). - * @param tvecs Output vector of translation vectors estimated for each pattern view. - * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters. - * Order of deviations values: - * \f$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3, - * s_4, \tau_x, \tau_y)\f$ If one of parameters is not estimated, it's deviation is equals to zero. - * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters. - * Order of deviations values: \f$(R_1, T_1, \dotsc , R_M, T_M)\f$ where M is number of pattern views, - * \f$R_i, T_i\f$ are concatenated 1x3 vectors. - * @param perViewErrors Output vector of average re-projection errors estimated for each pattern view. - * @param flags flags Different flags for the calibration process (see #calibrateCamera for details). - * @param criteria Termination criteria for the iterative optimization algorithm. - * - * This function calibrates a camera using an Aruco Board. The function receives a list of - * detected markers from several views of the Board. The process is similar to the chessboard - * calibration in calibrateCamera(). The function returns the final re-projection error. - */ -CV_EXPORTS_AS(calibrateCameraArucoExtended) double calibrateCameraAruco( - InputArrayOfArrays corners, InputArray ids, InputArray counter, const Ptr &board, - Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs, - OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, - OutputArray stdDeviationsIntrinsics, OutputArray stdDeviationsExtrinsics, - OutputArray perViewErrors, int flags = 0, - TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON)); - - -/** @brief It's the same function as #calibrateCameraAruco but without calibration error estimation. - */ -CV_EXPORTS_W double calibrateCameraAruco( - InputArrayOfArrays corners, InputArray ids, InputArray counter, const Ptr &board, - Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs, - OutputArrayOfArrays rvecs = noArray(), OutputArrayOfArrays tvecs = noArray(), int flags = 0, - TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON)); - - -/** - * @brief Given a board configuration and a set of detected markers, returns the corresponding - * image points and object points to call solvePnP - * - * @param board Marker board layout. - * @param detectedCorners List of detected marker corners of the board. - * @param detectedIds List of identifiers for each marker. - * @param objPoints Vector of vectors of board marker points in the board coordinate space. - * @param imgPoints Vector of vectors of the projections of board marker corner points. +@deprecated Use class ArucoDetector */ -CV_EXPORTS_W void getBoardObjectAndImagePoints(const Ptr &board, InputArrayOfArrays detectedCorners, - InputArray detectedIds, OutputArray objPoints, OutputArray imgPoints); - +CV_EXPORTS_W void refineDetectedMarkers(InputArray image,const Ptr &board, + InputOutputArrayOfArrays detectedCorners, + InputOutputArray detectedIds, InputOutputArrayOfArrays rejectedCorners, + InputArray cameraMatrix = noArray(), InputArray distCoeffs = noArray(), + float minRepDistance = 10.f, float errorCorrectionRate = 3.f, + bool checkAllOrders = true, OutputArray recoveredIdxs = noArray(), + const Ptr ¶meters = DetectorParameters::create()); -//! @} } } diff --git a/modules/aruco/include/opencv2/aruco/aruco_calib_pose.hpp b/modules/aruco/include/opencv2/aruco/aruco_calib_pose.hpp new file mode 100644 index 00000000000..23257ff766d --- /dev/null +++ b/modules/aruco/include/opencv2/aruco/aruco_calib_pose.hpp @@ -0,0 +1,286 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html +#ifndef __OPENCV_ARUCO_CALIB_POSE_HPP__ +#define __OPENCV_ARUCO_CALIB_POSE_HPP__ +#include +#include + +namespace cv { +namespace aruco { + +//! @addtogroup aruco +//! @{ + +/** @brief + * rvec/tvec define the right handed coordinate system of the marker. + * PatternPos defines center this system and axes direction. + * Axis X (red color) - first coordinate, axis Y (green color) - second coordinate, + * axis Z (blue color) - third coordinate. + * @sa estimatePoseSingleMarkers(), @ref tutorial_aruco_detection + */ +enum PatternPos { + /** @brief The marker coordinate system is centered on the middle of the marker. + * The coordinates of the four corners (CCW order) of the marker in its own coordinate system are: + * (-markerLength/2, markerLength/2, 0), (markerLength/2, markerLength/2, 0), + * (markerLength/2, -markerLength/2, 0), (-markerLength/2, -markerLength/2, 0). + * + * These pattern points define this coordinate system: + * ![Image with axes drawn](images/singlemarkersaxes.jpg) + */ + CCW_center, + /** @brief The marker coordinate system is centered on the top-left corner of the marker. + * The coordinates of the four corners (CW order) of the marker in its own coordinate system are: + * (0, 0, 0), (markerLength, 0, 0), + * (markerLength, markerLength, 0), (0, markerLength, 0). + * + * These pattern points define this coordinate system: + * ![Image with axes drawn](images/singlemarkersaxes2.jpg) + * + * These pattern dots are convenient to use with a chessboard/ChArUco board. + */ + CW_top_left_corner +}; + +/** @brief + * Pose estimation parameters + * @param pattern Defines center this system and axes direction (default PatternPos::CCW_center). + * @param useExtrinsicGuess Parameter used for SOLVEPNP_ITERATIVE. If true (1), the function uses the provided + * rvec and tvec values as initial approximations of the rotation and translation vectors, respectively, and further + * optimizes them (default false). + * @param solvePnPMethod Method for solving a PnP problem: see @ref calib3d_solvePnP_flags (default SOLVEPNP_ITERATIVE). + * @sa PatternPos, solvePnP(), @ref tutorial_aruco_detection + */ +struct CV_EXPORTS_W EstimateParameters { + CV_PROP_RW PatternPos pattern; + CV_PROP_RW bool useExtrinsicGuess; + CV_PROP_RW SolvePnPMethod solvePnPMethod; + + EstimateParameters(): pattern(CCW_center), useExtrinsicGuess(false), + solvePnPMethod(SOLVEPNP_ITERATIVE) {} + + CV_WRAP static Ptr create() { + return makePtr(); + } +}; + + +/** + * @brief Pose estimation for single markers + * + * @param corners vector of already detected markers corners. For each marker, its four corners + * are provided, (e.g std::vector > ). For N detected markers, + * the dimensions of this array should be Nx4. The order of the corners should be clockwise. + * @sa detectMarkers + * @param markerLength the length of the markers' side. The returning translation vectors will + * be in the same unit. Normally, unit is meters. + * @param cameraMatrix input 3x3 floating-point camera matrix + * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ + * @param distCoeffs vector of distortion coefficients + * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements + * @param rvecs array of output rotation vectors (@sa Rodrigues) (e.g. std::vector). + * Each element in rvecs corresponds to the specific marker in imgPoints. + * @param tvecs array of output translation vectors (e.g. std::vector). + * Each element in tvecs corresponds to the specific marker in imgPoints. + * @param _objPoints array of object points of all the marker corners + * @param estimateParameters set the origin of coordinate system and the coordinates of the four corners of the marker + * (default estimateParameters.pattern = PatternPos::CCW_center, estimateParameters.useExtrinsicGuess = false, + * estimateParameters.solvePnPMethod = SOLVEPNP_ITERATIVE). + * + * This function receives the detected markers and returns their pose estimation respect to + * the camera individually. So for each marker, one rotation and translation vector is returned. + * The returned transformation is the one that transforms points from each marker coordinate system + * to the camera coordinate system. + * The marker coordinate system is centered on the middle (by default) or on the top-left corner of the marker, + * with the Z axis perpendicular to the marker plane. + * estimateParameters defines the coordinates of the four corners of the marker in its own coordinate system (by default) are: + * (-markerLength/2, markerLength/2, 0), (markerLength/2, markerLength/2, 0), + * (markerLength/2, -markerLength/2, 0), (-markerLength/2, -markerLength/2, 0) + * @sa use cv::drawFrameAxes to get world coordinate system axis for object points + * @sa @ref tutorial_aruco_detection + * @sa EstimateParameters + * @sa PatternPos + */ +CV_EXPORTS_W void estimatePoseSingleMarkers(InputArrayOfArrays corners, float markerLength, + InputArray cameraMatrix, InputArray distCoeffs, + OutputArray rvecs, OutputArray tvecs, OutputArray _objPoints = noArray(), + Ptr estimateParameters = EstimateParameters::create()); + +/** + * @brief Pose estimation for a board of markers + * + * @param corners vector of already detected markers corners. For each marker, its four corners + * are provided, (e.g std::vector > ). For N detected markers, the + * dimensions of this array should be Nx4. The order of the corners should be clockwise. + * @param ids list of identifiers for each marker in corners + * @param board layout of markers in the board. The layout is composed by the marker identifiers + * and the positions of each marker corner in the board reference system. + * @param cameraMatrix input 3x3 floating-point camera matrix + * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ + * @param distCoeffs vector of distortion coefficients + * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements + * @param rvec Output vector (e.g. cv::Mat) corresponding to the rotation vector of the board + * (see cv::Rodrigues). Used as initial guess if not empty. + * @param tvec Output vector (e.g. cv::Mat) corresponding to the translation vector of the board. + * @param useExtrinsicGuess defines whether initial guess for \b rvec and \b tvec will be used or not. + * Used as initial guess if not empty. + * + * This function receives the detected markers and returns the pose of a marker board composed + * by those markers. + * A Board of marker has a single world coordinate system which is defined by the board layout. + * The returned transformation is the one that transforms points from the board coordinate system + * to the camera coordinate system. + * Input markers that are not included in the board layout are ignored. + * The function returns the number of markers from the input employed for the board pose estimation. + * Note that returning a 0 means the pose has not been estimated. + * @sa use cv::drawFrameAxes to get world coordinate system axis for object points + */ +CV_EXPORTS_W int estimatePoseBoard(InputArrayOfArrays corners, InputArray ids, const Ptr &board, + InputArray cameraMatrix, InputArray distCoeffs, InputOutputArray rvec, + InputOutputArray tvec, bool useExtrinsicGuess = false); + +/** + * @brief Given a board configuration and a set of detected markers, returns the corresponding + * image points and object points to call solvePnP + * + * @param board Marker board layout. + * @param detectedCorners List of detected marker corners of the board. + * @param detectedIds List of identifiers for each marker. + * @param objPoints Vector of vectors of board marker points in the board coordinate space. + * @param imgPoints Vector of vectors of the projections of board marker corner points. +*/ +CV_EXPORTS_W void getBoardObjectAndImagePoints(const Ptr &board, InputArrayOfArrays detectedCorners, + InputArray detectedIds, OutputArray objPoints, OutputArray imgPoints); + +/** + * @brief Calibrate a camera using aruco markers + * + * @param corners vector of detected marker corners in all frames. + * The corners should have the same format returned by detectMarkers (see #detectMarkers). + * @param ids list of identifiers for each marker in corners + * @param counter number of markers in each frame so that corners and ids can be split + * @param board Marker Board layout + * @param imageSize Size of the image used only to initialize the intrinsic camera matrix. + * @param cameraMatrix Output 3x3 floating-point camera matrix + * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . If CV\_CALIB\_USE\_INTRINSIC\_GUESS + * and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be + * initialized before calling the function. + * @param distCoeffs Output vector of distortion coefficients + * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements + * @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each board view + * (e.g. std::vector>). That is, each k-th rotation vector together with the corresponding + * k-th translation vector (see the next output parameter description) brings the board pattern + * from the model coordinate space (in which object points are specified) to the world coordinate + * space, that is, a real position of the board pattern in the k-th pattern view (k=0.. *M* -1). + * @param tvecs Output vector of translation vectors estimated for each pattern view. + * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters. + * Order of deviations values: + * \f$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3, + * s_4, \tau_x, \tau_y)\f$ If one of parameters is not estimated, it's deviation is equals to zero. + * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters. + * Order of deviations values: \f$(R_1, T_1, \dotsc , R_M, T_M)\f$ where M is number of pattern views, + * \f$R_i, T_i\f$ are concatenated 1x3 vectors. + * @param perViewErrors Output vector of average re-projection errors estimated for each pattern view. + * @param flags flags Different flags for the calibration process (see #calibrateCamera for details). + * @param criteria Termination criteria for the iterative optimization algorithm. + * + * This function calibrates a camera using an Aruco Board. The function receives a list of + * detected markers from several views of the Board. The process is similar to the chessboard + * calibration in calibrateCamera(). The function returns the final re-projection error. + */ +CV_EXPORTS_AS(calibrateCameraArucoExtended) +double calibrateCameraAruco(InputArrayOfArrays corners, InputArray ids, InputArray counter, const Ptr &board, + Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs, + OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, OutputArray stdDeviationsIntrinsics, + OutputArray stdDeviationsExtrinsics, OutputArray perViewErrors, int flags = 0, + TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON)); + +/** @brief It's the same function as #calibrateCameraAruco but without calibration error estimation. + */ +CV_EXPORTS_W double calibrateCameraAruco(InputArrayOfArrays corners, InputArray ids, InputArray counter, + const Ptr &board, Size imageSize, InputOutputArray cameraMatrix, + InputOutputArray distCoeffs, OutputArrayOfArrays rvecs = noArray(), + OutputArrayOfArrays tvecs = noArray(), int flags = 0, + TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, + 30, DBL_EPSILON)); + +/** + * @brief Pose estimation for a ChArUco board given some of their corners + * @param charucoCorners vector of detected charuco corners + * @param charucoIds list of identifiers for each corner in charucoCorners + * @param board layout of ChArUco board. + * @param cameraMatrix input 3x3 floating-point camera matrix + * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ + * @param distCoeffs vector of distortion coefficients + * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements + * @param rvec Output vector (e.g. cv::Mat) corresponding to the rotation vector of the board + * (see cv::Rodrigues). + * @param tvec Output vector (e.g. cv::Mat) corresponding to the translation vector of the board. + * @param useExtrinsicGuess defines whether initial guess for \b rvec and \b tvec will be used or not. + * + * This function estimates a Charuco board pose from some detected corners. + * The function checks if the input corners are enough and valid to perform pose estimation. + * If pose estimation is valid, returns true, else returns false. + * @sa use cv::drawFrameAxes to get world coordinate system axis for object points + */ +CV_EXPORTS_W bool estimatePoseCharucoBoard(InputArray charucoCorners, InputArray charucoIds, + const Ptr &board, InputArray cameraMatrix, + InputArray distCoeffs, InputOutputArray rvec, + InputOutputArray tvec, bool useExtrinsicGuess = false); + +/** + * @brief Calibrate a camera using Charuco corners + * + * @param charucoCorners vector of detected charuco corners per frame + * @param charucoIds list of identifiers for each corner in charucoCorners per frame + * @param board Marker Board layout + * @param imageSize input image size + * @param cameraMatrix Output 3x3 floating-point camera matrix + * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . If CV\_CALIB\_USE\_INTRINSIC\_GUESS + * and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be + * initialized before calling the function. + * @param distCoeffs Output vector of distortion coefficients + * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements + * @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each board view + * (e.g. std::vector>). That is, each k-th rotation vector together with the corresponding + * k-th translation vector (see the next output parameter description) brings the board pattern + * from the model coordinate space (in which object points are specified) to the world coordinate + * space, that is, a real position of the board pattern in the k-th pattern view (k=0.. *M* -1). + * @param tvecs Output vector of translation vectors estimated for each pattern view. + * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters. + * Order of deviations values: + * \f$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3, + * s_4, \tau_x, \tau_y)\f$ If one of parameters is not estimated, it's deviation is equals to zero. + * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters. + * Order of deviations values: \f$(R_1, T_1, \dotsc , R_M, T_M)\f$ where M is number of pattern views, + * \f$R_i, T_i\f$ are concatenated 1x3 vectors. + * @param perViewErrors Output vector of average re-projection errors estimated for each pattern view. + * @param flags flags Different flags for the calibration process (see #calibrateCamera for details). + * @param criteria Termination criteria for the iterative optimization algorithm. + * + * This function calibrates a camera using a set of corners of a Charuco Board. The function + * receives a list of detected corners and its identifiers from several views of the Board. + * The function returns the final re-projection error. + */ +CV_EXPORTS_AS(calibrateCameraCharucoExtended) +double calibrateCameraCharuco(InputArrayOfArrays charucoCorners, InputArrayOfArrays charucoIds, + const Ptr &board, Size imageSize, InputOutputArray cameraMatrix, + InputOutputArray distCoeffs, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, + OutputArray stdDeviationsIntrinsics, OutputArray stdDeviationsExtrinsics, + OutputArray perViewErrors, int flags = 0, TermCriteria criteria = TermCriteria( + TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON)); + +/** @brief It's the same function as #calibrateCameraCharuco but without calibration error estimation. +*/ +CV_EXPORTS_W double calibrateCameraCharuco(InputArrayOfArrays charucoCorners, InputArrayOfArrays charucoIds, + const Ptr &board, Size imageSize, + InputOutputArray cameraMatrix, InputOutputArray distCoeffs, + OutputArrayOfArrays rvecs = noArray(), + OutputArrayOfArrays tvecs = noArray(), int flags = 0, + TermCriteria criteria=TermCriteria(TermCriteria::COUNT + + TermCriteria::EPS, 30, DBL_EPSILON)); +//! @} + +} +} +#endif diff --git a/modules/aruco/include/opencv2/aruco/board.hpp b/modules/aruco/include/opencv2/aruco/board.hpp new file mode 100644 index 00000000000..254bbe0902a --- /dev/null +++ b/modules/aruco/include/opencv2/aruco/board.hpp @@ -0,0 +1,229 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html +#ifndef __OPENCV_ARUCO_BOARD_HPP__ +#define __OPENCV_ARUCO_BOARD_HPP__ + +#include +#include + +namespace cv { +namespace aruco { +//! @addtogroup aruco +//! @{ + +class Dictionary; + +/** + * @brief Board of markers + * + * A board is a set of markers in the 3D space with a common coordinate system. + * The common form of a board of marker is a planar (2D) board, however any 3D layout can be used. + * A Board object is composed by: + * - The object points of the marker corners, i.e. their coordinates respect to the board system. + * - The dictionary which indicates the type of markers of the board + * - The identifier of all the markers in the board. + */ +class CV_EXPORTS_W Board { + public: + /** + * @brief Provide way to create Board by passing necessary data. Specially needed in Python. + * + * @param objPoints array of object points of all the marker corners in the board + * @param dictionary the dictionary of markers employed for this board + * @param ids vector of the identifiers of the markers in the board + * + */ + CV_WRAP static Ptr create(InputArrayOfArrays objPoints, const Ptr &dictionary, InputArray ids); + + /** + * @brief Set ids vector + * + * @param ids vector of the identifiers of the markers in the board (should be the same size + * as objPoints) + * + * Recommended way to set ids vector, which will fail if the size of ids does not match size + * of objPoints. + */ + CV_WRAP void setIds(InputArray ids); + + /// array of object points of all the marker corners in the board + /// each marker include its 4 corners in this order: + ///- objPoints[i][0] - left-top point of i-th marker + ///- objPoints[i][1] - right-top point of i-th marker + ///- objPoints[i][2] - right-bottom point of i-th marker + ///- objPoints[i][3] - left-bottom point of i-th marker + /// + /// Markers are placed in a certain order - row by row, left to right in every row. + /// For M markers, the size is Mx4. + CV_PROP std::vector< std::vector< Point3f > > objPoints; + + /// the dictionary of markers employed for this board + CV_PROP Ptr dictionary; + + /// vector of the identifiers of the markers in the board (same size than objPoints) + /// The identifiers refers to the board dictionary + CV_PROP_RW std::vector< int > ids; + + /// coordinate of the bottom right corner of the board, is set when calling the function create() + CV_PROP Point3f rightBottomBorder; +}; + +/** + * @brief Draw a planar board + * @sa drawPlanarBoard + * + * @param board layout of the board that will be drawn. The board should be planar, + * z coordinate is ignored + * @param outSize size of the output image in pixels. + * @param img output image with the board. The size of this image will be outSize + * and the board will be on the center, keeping the board proportions. + * @param marginSize minimum margins (in pixels) of the board in the output image + * @param borderBits width of the marker borders. + * + * This function return the image of a planar board, ready to be printed. It assumes + * the Board layout specified is planar by ignoring the z coordinates of the object points. + */ +CV_EXPORTS_W void drawPlanarBoard(const Ptr &board, Size outSize, OutputArray img, + int marginSize = 0, int borderBits = 1); + +/** + * @brief Planar board with grid arrangement of markers + * More common type of board. All markers are placed in the same plane in a grid arrangement. + * The board can be drawn using drawPlanarBoard() function (@sa drawPlanarBoard) + */ + +class CV_EXPORTS_W GridBoard : public Board { + public: + /** + * @brief Draw a GridBoard + * + * @param outSize size of the output image in pixels. + * @param img output image with the board. The size of this image will be outSize + * and the board will be on the center, keeping the board proportions. + * @param marginSize minimum margins (in pixels) of the board in the output image + * @param borderBits width of the marker borders. + * + * This function return the image of the GridBoard, ready to be printed. + */ + CV_WRAP void draw(Size outSize, OutputArray img, int marginSize = 0, int borderBits = 1); + + /** + * @brief Create a GridBoard object + * + * @param markersX number of markers in X direction + * @param markersY number of markers in Y direction + * @param markerLength marker side length (normally in meters) + * @param markerSeparation separation between two markers (same unit as markerLength) + * @param dictionary dictionary of markers indicating the type of markers + * @param firstMarker id of first marker in dictionary to use on board. + * @return the output GridBoard object + * + * This functions creates a GridBoard object given the number of markers in each direction and + * the marker size and marker separation. + */ + CV_WRAP static Ptr create(int markersX, int markersY, float markerLength, float markerSeparation, + const Ptr &dictionary, int firstMarker = 0); + + CV_WRAP Size getGridSize() const { return Size(_markersX, _markersY); } + + CV_WRAP float getMarkerLength() const { return _markerLength; } + + CV_WRAP float getMarkerSeparation() const { return _markerSeparation; } + + private: + // number of markers in X and Y directions + int _markersX, _markersY; + + // marker side length (normally in meters) + float _markerLength; + + // separation between markers in the grid + float _markerSeparation; +}; + +/** + * @brief ChArUco board + * Specific class for ChArUco boards. A ChArUco board is a planar board where the markers are placed + * inside the white squares of a chessboard. The benefits of ChArUco boards is that they provide + * both, ArUco markers versatility and chessboard corner precision, which is important for + * calibration and pose estimation. + * This class also allows the easy creation and drawing of ChArUco boards. + */ +class CV_EXPORTS_W CharucoBoard : public Board { + public: + // vector of chessboard 3D corners precalculated + CV_PROP std::vector< Point3f > chessboardCorners; + + // for each charuco corner, nearest marker id and nearest marker corner id of each marker + CV_PROP std::vector< std::vector< int > > nearestMarkerIdx; + CV_PROP std::vector< std::vector< int > > nearestMarkerCorners; + + /** + * @brief Draw a ChArUco board + * + * @param outSize size of the output image in pixels. + * @param img output image with the board. The size of this image will be outSize + * and the board will be on the center, keeping the board proportions. + * @param marginSize minimum margins (in pixels) of the board in the output image + * @param borderBits width of the marker borders. + * + * This function return the image of the ChArUco board, ready to be printed. + */ + CV_WRAP void draw(Size outSize, OutputArray img, int marginSize = 0, int borderBits = 1); + + + /** + * @brief Create a CharucoBoard object + * + * @param squaresX number of chessboard squares in X direction + * @param squaresY number of chessboard squares in Y direction + * @param squareLength chessboard square side length (normally in meters) + * @param markerLength marker side length (same unit than squareLength) + * @param dictionary dictionary of markers indicating the type of markers. + * The first markers in the dictionary are used to fill the white chessboard squares. + * @return the output CharucoBoard object + * + * This functions creates a CharucoBoard object given the number of squares in each direction + * and the size of the markers and chessboard squares. + */ + CV_WRAP static Ptr create(int squaresX, int squaresY, float squareLength, + float markerLength, const Ptr &dictionary); + + CV_WRAP Size getChessboardSize() const { return Size(_squaresX, _squaresY); } + + CV_WRAP float getSquareLength() const { return _squareLength; } + + CV_WRAP float getMarkerLength() const { return _markerLength; } + + private: + void _getNearestMarkerCorners(); + + // number of markers in X and Y directions + int _squaresX, _squaresY; + + // size of chessboard squares side (normally in meters) + float _squareLength; + + // marker side length (normally in meters) + float _markerLength; +}; + +/** + * @brief test whether the ChArUco markers are collinear + * + * @param board layout of ChArUco board. + * @param charucoIds list of identifiers for each corner in charucoCorners per frame. + * @return bool value, 1 (true) if detected corners form a line, 0 (false) if they do not. + solvePnP, calibration functions will fail if the corners are collinear (true). + * + * The number of ids in charucoIDs should be <= the number of chessboard corners in the board. This functions checks whether the charuco corners are on a straight line (returns true, if so), or not (false). Axis parallel, as well as diagonal and other straight lines detected. Degenerate cases: for number of charucoIDs <= 2, the function returns true. + */ +CV_EXPORTS_W bool testCharucoCornersCollinear(const Ptr &board, InputArray charucoIds); + +//! @} + +} +} + +#endif diff --git a/modules/aruco/include/opencv2/aruco/charuco.hpp b/modules/aruco/include/opencv2/aruco/charuco.hpp index 2417b2fd895..d961c7ef00e 100644 --- a/modules/aruco/include/opencv2/aruco/charuco.hpp +++ b/modules/aruco/include/opencv2/aruco/charuco.hpp @@ -1,47 +1,14 @@ -/* -By downloading, copying, installing or using the software you agree to this -license. If you do not agree to this license, do not download, install, -copy or use the software. - - License Agreement - For Open Source Computer Vision Library - (3-clause BSD License) - -Copyright (C) 2013, OpenCV Foundation, all rights reserved. -Third party copyrights are property of their respective owners. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the names of the copyright holders nor the names of the contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -This software is provided by the copyright holders and contributors "as is" and -any express or implied warranties, including, but not limited to, the implied -warranties of merchantability and fitness for a particular purpose are -disclaimed. In no event shall copyright holders or contributors be liable for -any direct, indirect, incidental, special, exemplary, or consequential damages -(including, but not limited to, procurement of substitute goods or services; -loss of use, data, or profits; or business interruption) however caused -and on any theory of liability, whether in contract, strict liability, -or tort (including negligence or otherwise) arising in any way out of -the use of this software, even if advised of the possibility of such damage. -*/ - +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html #ifndef __OPENCV_CHARUCO_HPP__ #define __OPENCV_CHARUCO_HPP__ #include #include #include +#include +#include namespace cv { @@ -50,87 +17,6 @@ namespace aruco { //! @addtogroup aruco //! @{ - -/** - * @brief ChArUco board - * Specific class for ChArUco boards. A ChArUco board is a planar board where the markers are placed - * inside the white squares of a chessboard. The benefits of ChArUco boards is that they provide - * both, ArUco markers versatility and chessboard corner precision, which is important for - * calibration and pose estimation. - * This class also allows the easy creation and drawing of ChArUco boards. - */ -class CV_EXPORTS_W CharucoBoard : public Board { - - public: - // vector of chessboard 3D corners precalculated - CV_PROP std::vector< Point3f > chessboardCorners; - - // for each charuco corner, nearest marker id and nearest marker corner id of each marker - CV_PROP std::vector< std::vector< int > > nearestMarkerIdx; - CV_PROP std::vector< std::vector< int > > nearestMarkerCorners; - - /** - * @brief Draw a ChArUco board - * - * @param outSize size of the output image in pixels. - * @param img output image with the board. The size of this image will be outSize - * and the board will be on the center, keeping the board proportions. - * @param marginSize minimum margins (in pixels) of the board in the output image - * @param borderBits width of the marker borders. - * - * This function return the image of the ChArUco board, ready to be printed. - */ - CV_WRAP void draw(Size outSize, OutputArray img, int marginSize = 0, int borderBits = 1); - - - /** - * @brief Create a CharucoBoard object - * - * @param squaresX number of chessboard squares in X direction - * @param squaresY number of chessboard squares in Y direction - * @param squareLength chessboard square side length (normally in meters) - * @param markerLength marker side length (same unit than squareLength) - * @param dictionary dictionary of markers indicating the type of markers. - * The first markers in the dictionary are used to fill the white chessboard squares. - * @return the output CharucoBoard object - * - * This functions creates a CharucoBoard object given the number of squares in each direction - * and the size of the markers and chessboard squares. - */ - CV_WRAP static Ptr create(int squaresX, int squaresY, float squareLength, - float markerLength, const Ptr &dictionary); - - /** - * - */ - CV_WRAP Size getChessboardSize() const { return Size(_squaresX, _squaresY); } - - /** - * - */ - CV_WRAP float getSquareLength() const { return _squareLength; } - - /** - * - */ - CV_WRAP float getMarkerLength() const { return _markerLength; } - - private: - void _getNearestMarkerCorners(); - - // number of markers in X and Y directions - int _squaresX, _squaresY; - - // size of chessboard squares side (normally in meters) - float _squareLength; - - // marker side length (normally in meters) - float _markerLength; -}; - - - - /** * @brief Interpolate position of ChArUco board corners * @param markerCorners vector of already detected markers corners. For each marker, its four @@ -161,36 +47,6 @@ CV_EXPORTS_W int interpolateCornersCharuco(InputArrayOfArrays markerCorners, Inp InputArray cameraMatrix = noArray(), InputArray distCoeffs = noArray(), int minMarkers = 2); - - - -/** - * @brief Pose estimation for a ChArUco board given some of their corners - * @param charucoCorners vector of detected charuco corners - * @param charucoIds list of identifiers for each corner in charucoCorners - * @param board layout of ChArUco board. - * @param cameraMatrix input 3x3 floating-point camera matrix - * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ - * @param distCoeffs vector of distortion coefficients - * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements - * @param rvec Output vector (e.g. cv::Mat) corresponding to the rotation vector of the board - * (see cv::Rodrigues). - * @param tvec Output vector (e.g. cv::Mat) corresponding to the translation vector of the board. - * @param useExtrinsicGuess defines whether initial guess for \b rvec and \b tvec will be used or not. - * - * This function estimates a Charuco board pose from some detected corners. - * The function checks if the input corners are enough and valid to perform pose estimation. - * If pose estimation is valid, returns true, else returns false. - * @sa use cv::drawFrameAxes to get world coordinate system axis for object points - */ -CV_EXPORTS_W bool estimatePoseCharucoBoard(InputArray charucoCorners, InputArray charucoIds, - const Ptr &board, InputArray cameraMatrix, - InputArray distCoeffs, InputOutputArray rvec, - InputOutputArray tvec, bool useExtrinsicGuess = false); - - - - /** * @brief Draws a set of Charuco corners * @param image input/output image. It must have 1 or 3 channels. The number of channels is not @@ -206,60 +62,6 @@ CV_EXPORTS_W void drawDetectedCornersCharuco(InputOutputArray image, InputArray InputArray charucoIds = noArray(), Scalar cornerColor = Scalar(255, 0, 0)); - - -/** - * @brief Calibrate a camera using Charuco corners - * - * @param charucoCorners vector of detected charuco corners per frame - * @param charucoIds list of identifiers for each corner in charucoCorners per frame - * @param board Marker Board layout - * @param imageSize input image size - * @param cameraMatrix Output 3x3 floating-point camera matrix - * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . If CV\_CALIB\_USE\_INTRINSIC\_GUESS - * and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be - * initialized before calling the function. - * @param distCoeffs Output vector of distortion coefficients - * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements - * @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each board view - * (e.g. std::vector>). That is, each k-th rotation vector together with the corresponding - * k-th translation vector (see the next output parameter description) brings the board pattern - * from the model coordinate space (in which object points are specified) to the world coordinate - * space, that is, a real position of the board pattern in the k-th pattern view (k=0.. *M* -1). - * @param tvecs Output vector of translation vectors estimated for each pattern view. - * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters. - * Order of deviations values: - * \f$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3, - * s_4, \tau_x, \tau_y)\f$ If one of parameters is not estimated, it's deviation is equals to zero. - * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters. - * Order of deviations values: \f$(R_1, T_1, \dotsc , R_M, T_M)\f$ where M is number of pattern views, - * \f$R_i, T_i\f$ are concatenated 1x3 vectors. - * @param perViewErrors Output vector of average re-projection errors estimated for each pattern view. - * @param flags flags Different flags for the calibration process (see #calibrateCamera for details). - * @param criteria Termination criteria for the iterative optimization algorithm. - * - * This function calibrates a camera using a set of corners of a Charuco Board. The function - * receives a list of detected corners and its identifiers from several views of the Board. - * The function returns the final re-projection error. - */ -CV_EXPORTS_AS(calibrateCameraCharucoExtended) double calibrateCameraCharuco( - InputArrayOfArrays charucoCorners, InputArrayOfArrays charucoIds, const Ptr &board, - Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs, - OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, - OutputArray stdDeviationsIntrinsics, OutputArray stdDeviationsExtrinsics, - OutputArray perViewErrors, int flags = 0, - TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON)); - -/** @brief It's the same function as #calibrateCameraCharuco but without calibration error estimation. -*/ -CV_EXPORTS_W double calibrateCameraCharuco( - InputArrayOfArrays charucoCorners, InputArrayOfArrays charucoIds, const Ptr &board, - Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs, - OutputArrayOfArrays rvecs = noArray(), OutputArrayOfArrays tvecs = noArray(), int flags = 0, - TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON)); - - - /** * @brief Detect ChArUco Diamond markers * @@ -335,20 +137,6 @@ CV_EXPORTS_W void drawCharucoDiamond(const Ptr &dictionary, Vec4i id int markerLength, OutputArray img, int marginSize = 0, int borderBits = 1); - -/** - * @brief test whether the ChArUco markers are collinear - * - * @param _board layout of ChArUco board. - * @param _charucoIds list of identifiers for each corner in charucoCorners per frame. - * @return bool value, 1 (true) if detected corners form a line, 0 (false) if they do not. - solvePnP, calibration functions will fail if the corners are collinear (true). - * - * The number of ids in charucoIDs should be <= the number of chessboard corners in the board. This functions checks whether the charuco corners are on a straight line (returns true, if so), or not (false). Axis parallel, as well as diagonal and other straight lines detected. Degenerate cases: for number of charucoIDs <= 2, the function returns true. - */ -CV_EXPORTS_W bool testCharucoCornersCollinear(const Ptr &_board, - InputArray _charucoIds); - //! @} } } diff --git a/modules/aruco/include/opencv2/aruco/dictionary.hpp b/modules/aruco/include/opencv2/aruco/dictionary.hpp index ca9d0b4a0fa..cc692cf4890 100644 --- a/modules/aruco/include/opencv2/aruco/dictionary.hpp +++ b/modules/aruco/include/opencv2/aruco/dictionary.hpp @@ -1,41 +1,6 @@ -/* -By downloading, copying, installing or using the software you agree to this -license. If you do not agree to this license, do not download, install, -copy or use the software. - - License Agreement - For Open Source Computer Vision Library - (3-clause BSD License) - -Copyright (C) 2013, OpenCV Foundation, all rights reserved. -Third party copyrights are property of their respective owners. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the names of the copyright holders nor the names of the contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -This software is provided by the copyright holders and contributors "as is" and -any express or implied warranties, including, but not limited to, the implied -warranties of merchantability and fitness for a particular purpose are -disclaimed. In no event shall copyright holders or contributors be liable for -any direct, indirect, incidental, special, exemplary, or consequential damages -(including, but not limited to, procurement of substitute goods or services; -loss of use, data, or profits; or business interruption) however caused -and on any theory of liability, whether in contract, strict liability, -or tort (including negligence or otherwise) arising in any way out of -the use of this software, even if advised of the possibility of such damage. -*/ - +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html #ifndef __OPENCV_DICTIONARY_HPP__ #define __OPENCV_DICTIONARY_HPP__ @@ -66,28 +31,22 @@ class CV_EXPORTS_W Dictionary { CV_PROP_RW int maxCorrectionBits; // maximum number of bits that can be corrected - /** - */ Dictionary(const Mat &_bytesList = Mat(), int _markerSize = 0, int _maxcorr = 0); /** - Dictionary(const Dictionary &_dictionary); + * Dictionary(const Dictionary &_dictionary); */ + Dictionary(const Ptr &dictionary); - /** - */ - Dictionary(const Ptr &_dictionary); - - - /** + /** @brief returns generateCustomDictionary(nMarkers, markerSize, randomSeed) * @see generateCustomDictionary */ CV_WRAP_AS(create) static Ptr create(int nMarkers, int markerSize, int randomSeed=0); - /** + /** @brief returns generateCustomDictionary(nMarkers, markerSize, baseDictionary, randomSeed) * @see generateCustomDictionary */ CV_WRAP_AS(create_from) static Ptr create(int nMarkers, int markerSize, @@ -108,6 +67,7 @@ class CV_EXPORTS_W Dictionary { * @brief Write a dictionary to FileStorage. Format is the same as in readDictionary(). */ CV_WRAP void writeDictionary(Ptr& fs); + /** * @see getPredefinedDictionary */ diff --git a/modules/aruco/include/opencv2/aruco_detector.hpp b/modules/aruco/include/opencv2/aruco_detector.hpp new file mode 100644 index 00000000000..52eebeb139d --- /dev/null +++ b/modules/aruco/include/opencv2/aruco_detector.hpp @@ -0,0 +1,408 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html +#ifndef __OPENCV_ARUCO_DETECTOR_HPP__ +#define __OPENCV_ARUCO_DETECTOR_HPP__ +#include +#include + +/** + * @defgroup aruco ArUco Marker Detection + * This module is dedicated to square fiducial markers (also known as Augmented Reality Markers) + * These markers are useful for easy, fast and robust camera pose estimation.ç + * + * The main functionalities are: + * - Detection of markers in an image + * - Pose estimation from a single marker or from a board/set of markers + * - Detection of ChArUco board for high subpixel accuracy + * - Camera calibration from both, ArUco boards and ChArUco boards. + * - Detection of ChArUco diamond markers + * The samples directory includes easy examples of how to use the module. + * + * The implementation is based on the ArUco Library by R. Muñoz-Salinas and S. Garrido-Jurado @cite Aruco2014. + * + * Markers can also be detected based on the AprilTag 2 @cite wang2016iros fiducial detection method. + * + * @sa S. Garrido-Jurado, R. Muñoz-Salinas, F. J. Madrid-Cuevas, and M. J. Marín-Jiménez. 2014. + * "Automatic generation and detection of highly reliable fiducial markers under occlusion". + * Pattern Recogn. 47, 6 (June 2014), 2280-2292. DOI=10.1016/j.patcog.2014.01.005 + * + * @sa http://www.uco.es/investiga/grupos/ava/node/26 + * + * This module has been originally developed by Sergio Garrido-Jurado as a project + * for Google Summer of Code 2015 (GSoC 15). + * + * +*/ + +namespace cv { +namespace aruco { + +//! @addtogroup aruco +//! @{ + +enum CornerRefineMethod{ + CORNER_REFINE_NONE, ///< Tag and corners detection based on the ArUco approach + CORNER_REFINE_SUBPIX, ///< ArUco approach and refine the corners locations using corner subpixel accuracy + CORNER_REFINE_CONTOUR, ///< ArUco approach and refine the corners locations using the contour-points line fitting + CORNER_REFINE_APRILTAG, ///< Tag and corners detection based on the AprilTag 2 approach @cite wang2016iros +}; + +struct CV_EXPORTS_W DetectorParameters { + DetectorParameters() { + adaptiveThreshWinSizeMin = 3; + adaptiveThreshWinSizeMax = 23; + adaptiveThreshWinSizeStep = 10; + adaptiveThreshConstant = 7; + minMarkerPerimeterRate = 0.03; + maxMarkerPerimeterRate = 4.; + polygonalApproxAccuracyRate = 0.03; + minCornerDistanceRate = 0.05; + minDistanceToBorder = 3; + minMarkerDistanceRate = 0.05; + cornerRefinementMethod = CORNER_REFINE_NONE; + cornerRefinementWinSize = 5; + cornerRefinementMaxIterations = 30; + cornerRefinementMinAccuracy = 0.1; + markerBorderBits = 1; + perspectiveRemovePixelPerCell = 4; + perspectiveRemoveIgnoredMarginPerCell = 0.13; + maxErroneousBitsInBorderRate = 0.35; + minOtsuStdDev = 5.0; + errorCorrectionRate = 0.6; + aprilTagQuadDecimate = 0.0; + aprilTagQuadSigma = 0.0; + aprilTagMinClusterPixels = 5; + aprilTagMaxNmaxima = 10; + aprilTagCriticalRad = (float)(10* CV_PI /180); + aprilTagMaxLineFitMse = 10.0; + aprilTagMinWhiteBlackDiff = 5; + aprilTagDeglitch = 0; + detectInvertedMarker = false; + useAruco3Detection = false; + minSideLengthCanonicalImg = 32; + minMarkerLengthRatioOriginalImg = 0.0; + }; + + /** + * @brief Create a new set of DetectorParameters with default values. + */ + CV_WRAP static Ptr create() { + Ptr params = makePtr(); + return params; + } + + /** + * @brief Read a new set of DetectorParameters from FileNode (use FileStorage.root()). + */ + CV_WRAP bool readDetectorParameters(const FileNode& fn); + + /** + * @brief Write a set of DetectorParameters to FileStorage + */ + CV_WRAP bool writeDetectorParameters(const Ptr& fs); + + /// minimum window size for adaptive thresholding before finding contours (default 3). + CV_PROP_RW int adaptiveThreshWinSizeMin; + + /// maximum window size for adaptive thresholding before finding contours (default 23). + CV_PROP_RW int adaptiveThreshWinSizeMax; + + /// increments from adaptiveThreshWinSizeMin to adaptiveThreshWinSizeMax during the thresholding (default 10). + CV_PROP_RW int adaptiveThreshWinSizeStep; + + /// constant for adaptive thresholding before finding contours (default 7) + CV_PROP_RW double adaptiveThreshConstant; + + /// determine minimum perimeter for marker contour to be detected. This is defined as a rate respect to the + /// maximum dimension of the input image (default 0.03). + CV_PROP_RW double minMarkerPerimeterRate; + + /// determine maximum perimeter for marker contour to be detected. This is defined as a rate respect to + /// the maximum dimension of the input image (default 4.0). + CV_PROP_RW double maxMarkerPerimeterRate; + + /// minimum accuracy during the polygonal approximation process to determine which contours are squares. (default 0.03) + CV_PROP_RW double polygonalApproxAccuracyRate; + + /// minimum distance between corners for detected markers relative to its perimeter (default 0.05) + CV_PROP_RW double minCornerDistanceRate; + + /// minimum distance of any corner to the image border for detected markers (in pixels) (default 3) + CV_PROP_RW int minDistanceToBorder; + + /// minimum mean distance beetween two marker corners to be considered imilar, so that the + /// smaller one is removed. The rate is relative to the smaller perimeter of the two markers (default 0.05). + CV_PROP_RW double minMarkerDistanceRate; + + /// corner refinement method (default CORNER_REFINE_NONE). + /// 0:CORNER_REFINE_NONE, no refinement. + /// 1: CORNER_REFINE_SUBPIX, do subpixel refinement. + /// 2: CORNER_REFINE_CONTOUR use contour-Points, + /// 3: CORNER_REFINE_APRILTAG use the AprilTag2 approach). + CV_PROP_RW int cornerRefinementMethod; + + /// window size for the corner refinement process (in pixels) (default 5). + CV_PROP_RW int cornerRefinementWinSize; + + /// maximum number of iterations for stop criteria of the corner refinement process (default 30). + CV_PROP_RW int cornerRefinementMaxIterations; + + /// minimum error for the stop cristeria of the corner refinement process (default: 0.1) + CV_PROP_RW double cornerRefinementMinAccuracy; + + /// number of bits of the marker border, i.e. marker border width (default 1). + CV_PROP_RW int markerBorderBits; + + /// number of bits (per dimension) for each cell of the marker when removing the perspective (default 4). + CV_PROP_RW int perspectiveRemovePixelPerCell; + + /// width of the margin of pixels on each cell not considered for the + /// determination of the cell bit. Represents the rate respect to the total size of the cell, i.e. + /// perspectiveRemovePixelPerCell (default 0.13) + CV_PROP_RW double perspectiveRemoveIgnoredMarginPerCell; + + /// maximum number of accepted erroneous bits in the border (i.e. number of allowed + /// white bits in the border). Represented as a rate respect to the total number of bits per marker (default 0.35). + CV_PROP_RW double maxErroneousBitsInBorderRate; + + /// minimun standard deviation in pixels values during the decodification step to apply Otsu + /// thresholding (otherwise, all the bits are set to 0 or 1 depending on mean higher than 128 or not) (default 5.0) + CV_PROP_RW double minOtsuStdDev; + + /// error correction rate respect to the maximun error correction capability for each dictionary (default 0.6). + CV_PROP_RW double errorCorrectionRate; + + // April :: User-configurable parameters. + /// detection of quads can be done on a lower-resolution image, improving speed at a cost of + /// pose accuracy and a slight decrease in detection rate. Decoding the binary payload is still + CV_PROP_RW float aprilTagQuadDecimate; + + /// what Gaussian blur should be applied to the segmented image (used for quad detection?) + CV_PROP_RW float aprilTagQuadSigma; + + // April :: Internal variables + /// reject quads containing too few pixels (default 5). + CV_PROP_RW int aprilTagMinClusterPixels; + + /// how many corner candidates to consider when segmenting a group of pixels into a quad (default 10). + CV_PROP_RW int aprilTagMaxNmaxima; + + /// reject quads where pairs of edges have angles that are close to straight or close to 180 degrees. + /// Zero means that no quads are rejected. (In radians) (default 10*PI/180) + CV_PROP_RW float aprilTagCriticalRad; + + /// when fitting lines to the contours, what is the maximum mean squared error + CV_PROP_RW float aprilTagMaxLineFitMse; + + /// when we build our model of black & white pixels, we add an extra check that the + /// white model must be (overall) brighter than the black model. How much brighter? (in pixel values, [0,255]). + /// (default 5) + CV_PROP_RW int aprilTagMinWhiteBlackDiff; + + /// should the thresholded image be deglitched? Only useful for very noisy images (default 0). + CV_PROP_RW int aprilTagDeglitch; + + /// to check if there is a white marker. In order to generate a "white" marker just invert a + /// normal marker by using a tilde, ~markerImage. (default false) + CV_PROP_RW bool detectInvertedMarker; + + /// new Aruco functionality proposed in the paper: + /// Romero-Ramirez et al: Speeded up detection of squared fiducial markers (2018) + /// https://www.researchgate.net/publication/325787310_Speeded_Up_Detection_of_Squared_Fiducial_Markers + + /// to enable the new and faster Aruco detection strategy. + CV_PROP_RW bool useAruco3Detection; + + /// minimum side length of a marker in the canonical image. Latter is the binarized image in which contours are searched. + CV_PROP_RW int minSideLengthCanonicalImg; + + /// range [0,1], eq (2) from paper. The parameter tau_i has a direct influence on the processing speed. + CV_PROP_RW float minMarkerLengthRatioOriginalImg; + +private: + bool readWrite(const Ptr& readNode = nullptr, const Ptr& writeStorage = nullptr); +}; + +struct CV_EXPORTS_W RefineParameters { + RefineParameters() { + minRepDistance = 10.f; + errorCorrectionRate = 3.f; + checkAllOrders = true; + } + + RefineParameters(float _minRepDistance, float _errorCorrectionRate, bool _checkAllOrders): + minRepDistance(_minRepDistance), errorCorrectionRate(_errorCorrectionRate), checkAllOrders(_checkAllOrders) {} + + CV_WRAP static Ptr create(float _minRepDistance = 10.f, float _errorCorrectionRate = 3.f, + bool _checkAllOrders = true) { + return makePtr(_minRepDistance, _errorCorrectionRate, _checkAllOrders); + } + + + /** + * @brief Read a new set of RefineParameters from FileNode (use FileStorage.root()). + */ + CV_WRAP bool readRefineParameters(const FileNode& fn); + + /** + * @brief Write a set of RefineParameters to FileStorage + */ + CV_WRAP bool writeRefineParameters(const Ptr& fs); + + /// minRepDistance minimum distance between the corners of the rejected candidate and the reprojected marker in + /// order to consider it as a correspondence. + CV_PROP_RW float minRepDistance; + /// minRepDistance rate of allowed erroneous bits respect to the error correction + /// capability of the used dictionary. -1 ignores the error correction step. + CV_PROP_RW float errorCorrectionRate; + /// checkAllOrders consider the four posible corner orders in the rejectedCorners array. + // * If it set to false, only the provided corner order is considered (default true). + CV_PROP_RW bool checkAllOrders; +private: + bool readWrite(const Ptr& readNode = nullptr, const Ptr& writeStorage = nullptr); +}; + +class CV_EXPORTS_W ArucoDetector : public Algorithm +{ +public: + /// dictionary indicates the type of markers that will be searched + CV_PROP_RW Ptr dictionary; + + /// marker detection parameters + CV_PROP_RW Ptr params; + + /// marker refine parameters + CV_PROP_RW Ptr refineParams; + + ArucoDetector(const Ptr &_dictionary = getPredefinedDictionary(DICT_4X4_50), const Ptr &_params = DetectorParameters::create(), + const Ptr &_refineParams = RefineParameters::create()): + dictionary(_dictionary), params(_params), refineParams(_refineParams) {} + + CV_WRAP static Ptr create(const Ptr &_dictionary, const Ptr &_params) { + return makePtr(_dictionary, _params); + } + + /** + * @brief Basic marker detection + * + * @param image input image + * @param corners vector of detected marker corners. For each marker, its four corners + * are provided, (e.g std::vector > ). For N detected markers, + * the dimensions of this array is Nx4. The order of the corners is clockwise. + * @param ids vector of identifiers of the detected markers. The identifier is of type int + * (e.g. std::vector). For N detected markers, the size of ids is also N. + * The identifiers have the same order than the markers in the imgPoints array. + * @param rejectedImgPoints contains the imgPoints of those squares whose inner code has not a + * correct codification. Useful for debugging purposes. + * + * Performs marker detection in the input image. Only markers included in the specific dictionary + * are searched. For each detected marker, it returns the 2D position of its corner in the image + * and its corresponding identifier. + * Note that this function does not perform pose estimation. + * @note The function does not correct lens distortion or takes it into account. It's recommended to undistort + * input image with corresponging camera model, if camera parameters are known + * @sa undistort, estimatePoseSingleMarkers, estimatePoseBoard + * + */ + CV_WRAP void detectMarkers(InputArray image, OutputArrayOfArrays corners, OutputArray ids, + OutputArrayOfArrays rejectedImgPoints = noArray()); + + /** + * @brief Refind not detected markers based on the already detected and the board layout + * + * @param image input image + * @param board layout of markers in the board. + * @param detectedCorners vector of already detected marker corners. + * @param detectedIds vector of already detected marker identifiers. + * @param rejectedCorners vector of rejected candidates during the marker detection process. + * @param cameraMatrix optional input 3x3 floating-point camera matrix + * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ + * @param distCoeffs optional vector of distortion coefficients + * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements + * @param recoveredIdxs Optional array to returns the indexes of the recovered candidates in the + * original rejectedCorners array. + * + * This function tries to find markers that were not detected in the basic detecMarkers function. + * First, based on the current detected marker and the board layout, the function interpolates + * the position of the missing markers. Then it tries to find correspondence between the reprojected + * markers and the rejected candidates based on the minRepDistance and errorCorrectionRate + * parameters. + * If camera parameters and distortion coefficients are provided, missing markers are reprojected + * using projectPoint function. If not, missing marker projections are interpolated using global + * homography, and all the marker corners in the board must have the same Z coordinate. + */ + CV_WRAP void refineDetectedMarkers(InputArray image, const Ptr &board, + InputOutputArrayOfArrays detectedCorners, + InputOutputArray detectedIds, InputOutputArrayOfArrays rejectedCorners, + InputArray cameraMatrix = noArray(), InputArray distCoeffs = noArray(), + OutputArray recoveredIdxs = noArray()); + + /** @brief Stores algorithm parameters in a file storage + */ + virtual void write(FileStorage& fs) const override { + Ptr pfs = makePtr(fs); + dictionary->writeDictionary(pfs); + params->writeDetectorParameters(pfs); + refineParams->writeRefineParameters(pfs); + } + + /** @brief simplified API for language bindings + * @overload + */ + CV_WRAP void write(const String& fileName) const { + FileStorage fs(fileName, FileStorage::WRITE); + write(fs); + } + + /** @brief Reads algorithm parameters from a file storage + */ + CV_WRAP virtual void read(const FileNode& fn) override { + dictionary->readDictionary(fn); + params->readDetectorParameters(fn); + refineParams->readRefineParameters(fn); + } +}; + +/** + * @brief Draw detected markers in image + * + * @param image input/output image. It must have 1 or 3 channels. The number of channels is not + * altered. + * @param corners positions of marker corners on input image. + * (e.g std::vector > ). For N detected markers, the dimensions of + * this array should be Nx4. The order of the corners should be clockwise. + * @param ids vector of identifiers for markers in markersCorners . + * Optional, if not provided, ids are not painted. + * @param borderColor color of marker borders. Rest of colors (text color and first corner color) + * are calculated based on this one to improve visualization. + * + * Given an array of detected marker corners and its corresponding ids, this functions draws + * the markers in the image. The marker borders are painted and the markers identifiers if provided. + * Useful for debugging purposes. + * + */ +CV_EXPORTS_W void drawDetectedMarkers(InputOutputArray image, InputArrayOfArrays corners, + InputArray ids = noArray(), Scalar borderColor = Scalar(0, 255, 0)); + +/** + * @brief Draw a canonical marker image + * + * @param dictionary dictionary of markers indicating the type of markers + * @param id identifier of the marker that will be returned. It has to be a valid id + * in the specified dictionary. + * @param sidePixels size of the image in pixels + * @param img output image with the marker + * @param borderBits width of the marker border. + * + * This function returns a marker image in its canonical form (i.e. ready to be printed) + */ +CV_EXPORTS_W void drawMarker(const Ptr &dictionary, int id, int sidePixels, OutputArray img, + int borderBits = 1); + +//! @} + +} +} + +#endif diff --git a/modules/aruco/misc/java/test/ArucoTest.java b/modules/aruco/misc/java/test/ArucoTest.java new file mode 100644 index 00000000000..6e70eab1752 --- /dev/null +++ b/modules/aruco/misc/java/test/ArucoTest.java @@ -0,0 +1,58 @@ +package org.opencv.test.aruco; + +import java.util.ArrayList; +import java.util.List; + +import org.opencv.test.OpenCVTestCase; +import org.opencv.core.Scalar; +import org.opencv.core.Mat; +import org.opencv.core.CvType; +import org.opencv.aruco.*; + + +public class ArucoTest extends OpenCVTestCase { + + public void testArucoIssue3133() { + byte[][] marker = {{0,1,1},{1,1,1},{0,1,1}}; + Dictionary dictionary = Dictionary.create(1, 3); + dictionary.set_maxCorrectionBits(0); + Mat markerBits = new Mat(3, 3, CvType.CV_8UC1); + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 3; j++) { + markerBits.put(i, j, marker[i][j]); + } + } + + Mat markerCompressed = Dictionary.getByteListFromBits(markerBits); + assertMatNotEqual(markerCompressed, dictionary.get_bytesList()); + + dictionary.set_bytesList(markerCompressed); + assertMatEqual(markerCompressed, dictionary.get_bytesList()); + } + + public void testArucoDetector() { + Dictionary dictionary = Dictionary.get(0); + DetectorParameters detectorParameters = DetectorParameters.create(); + ArucoDetector detector = ArucoDetector.create(dictionary, detectorParameters); + + Mat markerImage = new Mat(); + int id = 1, offset = 5, size = 40; + Aruco.drawMarker(dictionary, id, size, markerImage, detectorParameters.get_markerBorderBits()); + + Mat image = new Mat(markerImage.rows() + 2*offset, markerImage.cols() + 2*offset, + CvType.CV_8UC1, new Scalar(255)); + Mat m = image.submat(offset, size+offset, offset, size+offset); + markerImage.copyTo(m); + + List corners = new ArrayList(); + Mat ids = new Mat(); + detector.detectMarkers(image, corners, ids); + + assertEquals(1, corners.size()); + Mat res = corners.get(0); + assertArrayEquals(new double[]{offset, offset}, res.get(0, 0), 0.0); + assertArrayEquals(new double[]{size + offset - 1, offset}, res.get(0, 1), 0.0); + assertArrayEquals(new double[]{size + offset - 1, size + offset - 1}, res.get(0, 2), 0.0); + assertArrayEquals(new double[]{offset, size + offset - 1}, res.get(0, 3), 0.0); + } +} diff --git a/modules/aruco/misc/python/test/test_aruco.py b/modules/aruco/misc/python/test/test_aruco.py index 6c76fb5ed92..349d7b73524 100644 --- a/modules/aruco/misc/python/test/test_aruco.py +++ b/modules/aruco/misc/python/test/test_aruco.py @@ -85,5 +85,55 @@ def test_getDistanceToId(self): self.assertEqual(dist, 0) + def test_aruco_detector(self): + aruco_params = cv.aruco.DetectorParameters_create() + aruco_dict = cv.aruco.Dictionary_get(cv.aruco.DICT_4X4_250) + aruco_detector = cv.aruco.ArucoDetector_create(aruco_dict, aruco_params) + id = 2 + marker_size = 100 + offset = 10 + img_marker = cv.aruco.drawMarker(aruco_dict, id, marker_size, aruco_params.markerBorderBits) + img_marker = np.pad(img_marker, pad_width=offset, mode='constant', constant_values=255) + gold_corners = np.array([[offset, offset],[marker_size+offset-1.0,offset], + [marker_size+offset-1.0,marker_size+offset-1.0], + [offset, marker_size+offset-1.0]], dtype=np.float32) + expected_corners, expected_ids, expected_rejected = cv.aruco.detectMarkers(img_marker, aruco_dict, + parameters=aruco_params) + + corners, ids, rejected = aruco_detector.detectMarkers(img_marker) + + self.assertEqual(1, len(ids)) + self.assertEqual(id, ids[0]) + for i in range(0, len(ids)): + np.testing.assert_array_equal(expected_corners[i], corners[i]) + np.testing.assert_array_equal(gold_corners, corners[i].reshape(4, 2)) + + def test_aruco_detector_refine(self): + aruco_params = cv.aruco.DetectorParameters_create() + aruco_dict = cv.aruco.Dictionary_get(cv.aruco.DICT_4X4_250) + aruco_detector = cv.aruco.ArucoDetector_create(aruco_dict, aruco_params) + board_size = (3, 4) + board = cv.aruco.GridBoard_create(board_size[0], board_size[1], 5.0, 1.0, aruco_dict) + board_image = board.draw((board_size[0]*50, board_size[1]*50), marginSize=10) + + corners, ids, rejected = aruco_detector.detectMarkers(board_image) + self.assertEqual(board_size[0]*board_size[1], len(ids)) + + part_corners, part_ids, part_rejected = corners[:-1], ids[:-1], list(rejected) + part_rejected.append(corners[-1]) + + refine_corners, refine_ids, refine_rejected, recovered_ids = aruco_detector.refineDetectedMarkers(board_image, board, part_corners, part_ids, part_rejected) + refine_corners_c, _, _, _ = cv.aruco.refineDetectedMarkers(board_image, board, part_corners, part_ids, part_rejected) + + self.assertEqual(board_size[0] * board_size[1], len(refine_ids)) + self.assertEqual(1, len(recovered_ids)) + + for i in range(0, len(ids)): + np.testing.assert_array_equal(refine_corners_c[i], refine_corners[i]) + #self.assertEqual(ids[-1], recovered_ids[0]) + self.assertEqual(ids[-1], refine_ids[-1]) + self.assertEqual((1, 4, 2), refine_corners[0].shape) + np.testing.assert_array_equal(corners, refine_corners) + if __name__ == '__main__': NewOpenCVTests.bootstrap() diff --git a/modules/aruco/perf/perf_aruco.cpp b/modules/aruco/perf/perf_aruco.cpp index 6ab830fa280..6cde729ff2e 100644 --- a/modules/aruco/perf/perf_aruco.cpp +++ b/modules/aruco/perf/perf_aruco.cpp @@ -190,6 +190,7 @@ PERF_TEST_P(EstimateAruco, ArucoFirst, ESTIMATE_PARAMS) detectorParams->minSideLengthCanonicalImg = 32; detectorParams->minMarkerLengthRatioOriginalImg = 0.04f / numMarkersInRow; } + aruco::ArucoDetector detector(dictionary, detectorParams); MarkerPainter painter(markerSize); auto image_map = painter.getProjectMarkersTile(numMarkersInRow, detectorParams, dictionary); @@ -198,7 +199,7 @@ PERF_TEST_P(EstimateAruco, ArucoFirst, ESTIMATE_PARAMS) vector ids; TEST_CYCLE() { - aruco::detectMarkers(image_map.first, dictionary, corners, ids, detectorParams); + detector.detectMarkers(image_map.first, corners, ids); } ASSERT_EQ(numMarkersInRow*numMarkersInRow, static_cast(ids.size())); double maxDistance = getMaxDistance(image_map.second, ids, corners); @@ -221,6 +222,7 @@ PERF_TEST_P(EstimateAruco, ArucoSecond, ESTIMATE_PARAMS) detectorParams->minSideLengthCanonicalImg = 64; detectorParams->minMarkerLengthRatioOriginalImg = 0.f; } + aruco::ArucoDetector detector(dictionary, detectorParams); const int markerSize = 200; const int numMarkersInRow = 11; MarkerPainter painter(markerSize); @@ -231,7 +233,7 @@ PERF_TEST_P(EstimateAruco, ArucoSecond, ESTIMATE_PARAMS) vector ids; TEST_CYCLE() { - aruco::detectMarkers(image_map.first, dictionary, corners, ids, detectorParams); + detector.detectMarkers(image_map.first, corners, ids); } ASSERT_EQ(numMarkersInRow*numMarkersInRow, static_cast(ids.size())); double maxDistance = getMaxDistance(image_map.second, ids, corners); @@ -276,6 +278,7 @@ PERF_TEST_P(EstimateLargeAruco, ArucoFHD, ESTIMATE_FHD_PARAMS) detectorParams->minSideLengthCanonicalImg = get<0>(testParams).minSideLengthCanonicalImg; detectorParams->minMarkerLengthRatioOriginalImg = get<0>(testParams).minMarkerLengthRatioOriginalImg; } + aruco::ArucoDetector detector(dictionary, detectorParams); const int markerSize = get<1>(testParams).first; // 1440 or 480 or 144 const int numMarkersInRow = get<1>(testParams).second; // 1 or 3 or 144 MarkerPainter painter(markerSize); // num pixels is 1440x1440 as in FHD 1920x1080 @@ -286,7 +289,7 @@ PERF_TEST_P(EstimateLargeAruco, ArucoFHD, ESTIMATE_FHD_PARAMS) vector ids; TEST_CYCLE() { - aruco::detectMarkers(image_map.first, dictionary, corners, ids, detectorParams); + detector.detectMarkers(image_map.first, corners, ids); } ASSERT_EQ(numMarkersInRow*numMarkersInRow, static_cast(ids.size())); double maxDistance = getMaxDistance(image_map.second, ids, corners); diff --git a/modules/aruco/perf/perf_precomp.hpp b/modules/aruco/perf/perf_precomp.hpp index e4df3216e0c..a72903624c8 100644 --- a/modules/aruco/perf/perf_precomp.hpp +++ b/modules/aruco/perf/perf_precomp.hpp @@ -5,7 +5,7 @@ #define __OPENCV_PERF_PRECOMP_HPP__ #include "opencv2/ts.hpp" -#include "opencv2/aruco.hpp" +#include "opencv2/aruco_detector.hpp" #include "opencv2/calib3d.hpp" #endif diff --git a/modules/aruco/samples/aruco_dict_utils.cpp b/modules/aruco/samples/aruco_dict_utils.cpp index 465513b3410..ab32f4f58a9 100644 --- a/modules/aruco/samples/aruco_dict_utils.cpp +++ b/modules/aruco/samples/aruco_dict_utils.cpp @@ -1,5 +1,5 @@ #include -#include +#include #include using namespace cv; diff --git a/modules/aruco/samples/aruco_samples_utility.hpp b/modules/aruco/samples/aruco_samples_utility.hpp index c1cfe626cb7..ebdbcc1d7ec 100644 --- a/modules/aruco/samples/aruco_samples_utility.hpp +++ b/modules/aruco/samples/aruco_samples_utility.hpp @@ -1,5 +1,5 @@ #include -#include +#include #include #include diff --git a/modules/aruco/samples/calibrate_camera.cpp b/modules/aruco/samples/calibrate_camera.cpp index a52bab5b161..bf056b15270 100644 --- a/modules/aruco/samples/calibrate_camera.cpp +++ b/modules/aruco/samples/calibrate_camera.cpp @@ -39,7 +39,8 @@ the use of this software, even if advised of the possibility of such damage. #include #include -#include +#include +#include #include #include #include @@ -162,6 +163,8 @@ int main(int argc, char *argv[]) { vector< vector< int > > allIds; Size imgSize; + aruco::ArucoDetector detector(dictionary, detectorParams); + while(inputVideo.grab()) { Mat image, imageCopy; inputVideo.retrieve(image); @@ -170,10 +173,10 @@ int main(int argc, char *argv[]) { vector< vector< Point2f > > corners, rejected; // detect markers - aruco::detectMarkers(image, dictionary, corners, ids, detectorParams, rejected); + detector.detectMarkers(image, corners, ids, rejected); // refind strategy to detect more markers - if(refindStrategy) aruco::refineDetectedMarkers(image, board, corners, ids, rejected); + if(refindStrategy) detector.refineDetectedMarkers(image, board, corners, ids, rejected); // draw results image.copyTo(imageCopy); diff --git a/modules/aruco/samples/create_board.cpp b/modules/aruco/samples/create_board.cpp index d2482bce853..242688e5cb7 100644 --- a/modules/aruco/samples/create_board.cpp +++ b/modules/aruco/samples/create_board.cpp @@ -38,7 +38,7 @@ the use of this software, even if advised of the possibility of such damage. #include -#include +#include #include #include "aruco_samples_utility.hpp" diff --git a/modules/aruco/samples/create_marker.cpp b/modules/aruco/samples/create_marker.cpp index fe31ec2972f..73ce21880f6 100644 --- a/modules/aruco/samples/create_marker.cpp +++ b/modules/aruco/samples/create_marker.cpp @@ -38,7 +38,7 @@ the use of this software, even if advised of the possibility of such damage. #include -#include +#include #include #include "aruco_samples_utility.hpp" diff --git a/modules/aruco/samples/detect_board.cpp b/modules/aruco/samples/detect_board.cpp index 883be8dd2e3..7f9c3d1ebc7 100644 --- a/modules/aruco/samples/detect_board.cpp +++ b/modules/aruco/samples/detect_board.cpp @@ -38,7 +38,8 @@ the use of this software, even if advised of the possibility of such damage. #include -#include +#include +#include #include #include #include "aruco_samples_utility.hpp" @@ -135,7 +136,7 @@ int main(int argc, char *argv[]) { cerr << "Dictionary not specified" << endl; return 0; } - + aruco::ArucoDetector detector(dictionary, detectorParams); VideoCapture inputVideo; int waitTime; if(!video.empty()) { @@ -168,12 +169,12 @@ int main(int argc, char *argv[]) { Vec3d rvec, tvec; // detect markers - aruco::detectMarkers(image, dictionary, corners, ids, detectorParams, rejected); + detector.detectMarkers(image, corners, ids, rejected); // refind strategy to detect more markers if(refindStrategy) - aruco::refineDetectedMarkers(image, board, corners, ids, rejected, camMatrix, - distCoeffs); + detector.refineDetectedMarkers(image, board, corners, ids, rejected, camMatrix, + distCoeffs); // estimate board pose int markersOfBoardDetected = 0; diff --git a/modules/aruco/samples/detect_markers.cpp b/modules/aruco/samples/detect_markers.cpp index f7d17b9f8a8..de38c2a1b2c 100644 --- a/modules/aruco/samples/detect_markers.cpp +++ b/modules/aruco/samples/detect_markers.cpp @@ -38,7 +38,8 @@ the use of this software, even if advised of the possibility of such damage. #include -#include +#include +#include #include #include "aruco_samples_utility.hpp" @@ -134,7 +135,7 @@ int main(int argc, char *argv[]) { return 0; } } - + aruco::ArucoDetector detector(dictionary, detectorParams); VideoCapture inputVideo; int waitTime; if(!video.empty()) { @@ -159,7 +160,7 @@ int main(int argc, char *argv[]) { vector< Vec3d > rvecs, tvecs; // detect markers and estimate pose - aruco::detectMarkers(image, dictionary, corners, ids, detectorParams, rejected); + detector.detectMarkers(image, corners, ids, rejected); if(estimatePose && ids.size() > 0) aruco::estimatePoseSingleMarkers(corners, markerLength, camMatrix, distCoeffs, rvecs, tvecs); diff --git a/modules/aruco/src/apriltag_quad_thresh.cpp b/modules/aruco/src/apriltag/apriltag_quad_thresh.cpp similarity index 91% rename from modules/aruco/src/apriltag_quad_thresh.cpp rename to modules/aruco/src/apriltag/apriltag_quad_thresh.cpp index 20c193725fa..fcc038b25fd 100644 --- a/modules/aruco/src/apriltag_quad_thresh.cpp +++ b/modules/aruco/src/apriltag/apriltag_quad_thresh.cpp @@ -16,7 +16,7 @@ // because we use a fixed-point 16 bit integer representation with one // fractional bit. -#include "precomp.hpp" +#include "../precomp.hpp" #include "apriltag_quad_thresh.hpp" //#define APRIL_DEBUG @@ -1028,17 +1028,7 @@ int fit_quad(const Ptr &_params, const Mat im, zarray_t *clu return res; } -/** - * - * @param nCidx0 - * @param nCidx1 - * @param nClusters - * @param nW - * @param nH - * @param nquads - * @param td - * @param im - */ + static void do_quad(int nCidx0, int nCidx1, zarray_t &nClusters, int nW, int nH, zarray_t *nquads, const Ptr &td, const Mat im){ CV_Assert(nquads != NULL); @@ -1078,12 +1068,6 @@ static void do_quad(int nCidx0, int nCidx1, zarray_t &nClusters, int nW, int nH, } } -/** - * - * @param mIm - * @param parameters - * @param mThresh - */ void threshold(const Mat mIm, const Ptr ¶meters, Mat& mThresh){ int w = mIm.cols, h = mIm.rows; int s = (unsigned) mIm.step; @@ -1306,14 +1290,7 @@ static void _darken(const Mat &im){ } #endif -/** - * - * @param parameters - * @param mImg - * @param contours - * @return - */ -zarray_t *apriltag_quad_thresh(const Ptr ¶meters, const Mat & mImg, std::vector< std::vector< Point > > &contours){ +zarray_t *apriltag_quad_thresh(const Ptr ¶meters, const Mat & mImg, std::vector > &contours){ //////////////////////////////////////////////////////// // step 1. threshold the image, creating the edge image. @@ -1499,7 +1476,7 @@ out = Mat::zeros(h, w, CV_8UC3); zarray_t *cluster; _zarray_get(clusters, i, &cluster); - std::vector< Point > cnt; + std::vector cnt; for (int j = 0; j < _zarray_size(cluster); j++) { struct pt *p; _zarray_get_volatile(cluster, j, &p); @@ -1567,4 +1544,123 @@ imwrite("2.5 debug_lines.pnm", out); return quads; } +void _apriltag(Mat im_orig, const Ptr & _params, std::vector > &candidates, + std::vector > &contours){ + + /////////////////////////////////////////////////////////// + /// Step 1. Detect quads according to requested image decimation + /// and blurring parameters. + Mat quad_im; + + if (_params->aprilTagQuadDecimate > 1){ + resize(im_orig, quad_im, Size(), 1/_params->aprilTagQuadDecimate, 1/_params->aprilTagQuadDecimate, INTER_AREA); + } + else { + im_orig.copyTo(quad_im); + } + + // Apply a Blur + if (_params->aprilTagQuadSigma != 0) { + // compute a reasonable kernel width by figuring that the + // kernel should go out 2 std devs. + // + // max sigma ksz + // 0.499 1 (disabled) + // 0.999 3 + // 1.499 5 + // 1.999 7 + + float sigma = fabsf((float) _params->aprilTagQuadSigma); + + int ksz = cvFloor(4 * sigma); // 2 std devs in each direction + ksz |= 1; // make odd number + + if (ksz > 1) { + if (_params->aprilTagQuadSigma > 0) + GaussianBlur(quad_im, quad_im, Size(ksz, ksz), sigma, sigma, BORDER_REPLICATE); + else { + Mat orig; + quad_im.copyTo(orig); + GaussianBlur(quad_im, quad_im, Size(ksz, ksz), sigma, sigma, BORDER_REPLICATE); + + // SHARPEN the image by subtracting the low frequency components. + for (int y = 0; y < orig.rows; y++) { + for (int x = 0; x < orig.cols; x++) { + int vorig = orig.data[y*orig.step + x]; + int vblur = quad_im.data[y*quad_im.step + x]; + + int v = 2*vorig - vblur; + if (v < 0) + v = 0; + if (v > 255) + v = 255; + + quad_im.data[y*quad_im.step + x] = (uint8_t) v; + } + } + } + } + } + +#ifdef APRIL_DEBUG + imwrite("1.1 debug_preprocess.pnm", quad_im); +#endif + + /////////////////////////////////////////////////////////// + /// Step 2. do the Threshold :: get the set of candidate quads + zarray_t *quads = apriltag_quad_thresh(_params, quad_im, contours); + + CV_Assert(quads != NULL); + + // adjust centers of pixels so that they correspond to the + // original full-resolution image. + if (_params->aprilTagQuadDecimate > 1) { + for (int i = 0; i < _zarray_size(quads); i++) { + struct sQuad *q; + _zarray_get_volatile(quads, i, &q); + for (int j = 0; j < 4; j++) { + q->p[j][0] *= _params->aprilTagQuadDecimate; + q->p[j][1] *= _params->aprilTagQuadDecimate; + } + } + } + +#ifdef APRIL_DEBUG + Mat im_quads = im_orig.clone(); + im_quads = im_quads*0.5; + srandom(0); + + for (int i = 0; i < _zarray_size(quads); i++) { + struct sQuad *quad; + _zarray_get_volatile(quads, i, &quad); + + const int bias = 100; + int color = bias + (random() % (255-bias)); + + line(im_quads, Point(quad->p[0][0], quad->p[0][1]), Point(quad->p[1][0], quad->p[1][1]), color, 1); + line(im_quads, Point(quad->p[1][0], quad->p[1][1]), Point(quad->p[2][0], quad->p[2][1]), color, 1); + line(im_quads, Point(quad->p[2][0], quad->p[2][1]), Point(quad->p[3][0], quad->p[3][1]), color, 1); + line(im_quads, Point(quad->p[3][0], quad->p[3][1]), Point(quad->p[0][0], quad->p[0][1]), color, 1); + } + imwrite("1.2 debug_quads_raw.pnm", im_quads); +#endif + + //////////////////////////////////////////////////////////////// + /// Step 3. Save the output :: candidate corners + for (int i = 0; i < _zarray_size(quads); i++) { + struct sQuad *quad; + _zarray_get_volatile(quads, i, &quad); + + std::vector corners; + corners.push_back(Point2f(quad->p[3][0], quad->p[3][1])); //pA + corners.push_back(Point2f(quad->p[0][0], quad->p[0][1])); //pB + corners.push_back(Point2f(quad->p[1][0], quad->p[1][1])); //pC + corners.push_back(Point2f(quad->p[2][0], quad->p[2][1])); //pD + + candidates.push_back(corners); + } + + _zarray_destroy(quads); +} + }} diff --git a/modules/aruco/src/apriltag_quad_thresh.hpp b/modules/aruco/src/apriltag/apriltag_quad_thresh.hpp similarity index 91% rename from modules/aruco/src/apriltag_quad_thresh.hpp rename to modules/aruco/src/apriltag/apriltag_quad_thresh.hpp index fd392c84477..2b8647ca15d 100644 --- a/modules/aruco/src/apriltag_quad_thresh.hpp +++ b/modules/aruco/src/apriltag/apriltag_quad_thresh.hpp @@ -19,7 +19,8 @@ #ifndef _OPENCV_APRIL_QUAD_THRESH_HPP_ #define _OPENCV_APRIL_QUAD_THRESH_HPP_ -#include "opencv2/aruco.hpp" +#include +#include "opencv2/aruco_detector.hpp" #include "unionfind.hpp" #include "zmaxheap.hpp" #include "zarray.hpp" @@ -104,22 +105,15 @@ int quad_segment_agg(int sz, struct line_fit_pt *lfps, int indices[4]); **/ int fit_quad(const Ptr &_params, const Mat im, zarray_t *cluster, struct sQuad *quad); -/** - * - * @param mIm - * @param parameters - * @param mThresh - */ + void threshold(const Mat mIm, const Ptr ¶meters, Mat& mThresh); -/** - * - * @param parameters - * @param mImg - * @param contours - * @return - */ -zarray_t *apriltag_quad_thresh(const Ptr ¶meters, const Mat & mImg, std::vector< std::vector< Point > > &contours); + +zarray_t *apriltag_quad_thresh(const Ptr ¶meters, const Mat & mImg, + std::vector > &contours); + +void _apriltag(Mat im_orig, const Ptr & _params, std::vector > &candidates, + std::vector > &contours); }} #endif diff --git a/modules/aruco/src/predefined_dictionaries_apriltag.hpp b/modules/aruco/src/apriltag/predefined_dictionaries_apriltag.hpp similarity index 100% rename from modules/aruco/src/predefined_dictionaries_apriltag.hpp rename to modules/aruco/src/apriltag/predefined_dictionaries_apriltag.hpp diff --git a/modules/aruco/src/unionfind.hpp b/modules/aruco/src/apriltag/unionfind.hpp similarity index 100% rename from modules/aruco/src/unionfind.hpp rename to modules/aruco/src/apriltag/unionfind.hpp diff --git a/modules/aruco/src/zarray.hpp b/modules/aruco/src/apriltag/zarray.hpp similarity index 100% rename from modules/aruco/src/zarray.hpp rename to modules/aruco/src/apriltag/zarray.hpp diff --git a/modules/aruco/src/zmaxheap.cpp b/modules/aruco/src/apriltag/zmaxheap.cpp similarity index 99% rename from modules/aruco/src/zmaxheap.cpp rename to modules/aruco/src/apriltag/zmaxheap.cpp index fe17821936a..f3b315e7317 100644 --- a/modules/aruco/src/zmaxheap.cpp +++ b/modules/aruco/src/apriltag/zmaxheap.cpp @@ -12,7 +12,7 @@ // of the authors and should not be interpreted as representing official policies, // either expressed or implied, of the Regents of The University of Michigan. -#include "precomp.hpp" +#include "../precomp.hpp" #include "zmaxheap.hpp" diff --git a/modules/aruco/src/zmaxheap.hpp b/modules/aruco/src/apriltag/zmaxheap.hpp similarity index 100% rename from modules/aruco/src/zmaxheap.hpp rename to modules/aruco/src/apriltag/zmaxheap.hpp diff --git a/modules/aruco/src/aruco.cpp b/modules/aruco/src/aruco.cpp index fccb3d965ad..16b23a75569 100644 --- a/modules/aruco/src/aruco.cpp +++ b/modules/aruco/src/aruco.cpp @@ -1,1904 +1,32 @@ -/* -By downloading, copying, installing or using the software you agree to this -license. If you do not agree to this license, do not download, install, -copy or use the software. +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html - License Agreement - For Open Source Computer Vision Library - (3-clause BSD License) - -Copyright (C) 2013, OpenCV Foundation, all rights reserved. -Third party copyrights are property of their respective owners. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the names of the copyright holders nor the names of the contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -This software is provided by the copyright holders and contributors "as is" and -any express or implied warranties, including, but not limited to, the implied -warranties of merchantability and fitness for a particular purpose are -disclaimed. In no event shall copyright holders or contributors be liable for -any direct, indirect, incidental, special, exemplary, or consequential damages -(including, but not limited to, procurement of substitute goods or services; -loss of use, data, or profits; or business interruption) however caused -and on any theory of liability, whether in contract, strict liability, -or tort (including negligence or otherwise) arising in any way out of -the use of this software, even if advised of the possibility of such damage. -*/ - -#include "precomp.hpp" #include "opencv2/aruco.hpp" -#include -#include - -#include "apriltag_quad_thresh.hpp" -#include "zarray.hpp" - -#include - -//#define APRIL_DEBUG -#ifdef APRIL_DEBUG -#include "opencv2/imgcodecs.hpp" -#endif namespace cv { namespace aruco { using namespace std; - -/** - * - */ -DetectorParameters::DetectorParameters() - : adaptiveThreshWinSizeMin(3), - adaptiveThreshWinSizeMax(23), - adaptiveThreshWinSizeStep(10), - adaptiveThreshConstant(7), - minMarkerPerimeterRate(0.03), - maxMarkerPerimeterRate(4.), - polygonalApproxAccuracyRate(0.03), - minCornerDistanceRate(0.05), - minDistanceToBorder(3), - minMarkerDistanceRate(0.05), - cornerRefinementMethod(CORNER_REFINE_NONE), - cornerRefinementWinSize(5), - cornerRefinementMaxIterations(30), - cornerRefinementMinAccuracy(0.1), - markerBorderBits(1), - perspectiveRemovePixelPerCell(4), - perspectiveRemoveIgnoredMarginPerCell(0.13), - maxErroneousBitsInBorderRate(0.35), - minOtsuStdDev(5.0), - errorCorrectionRate(0.6), - aprilTagQuadDecimate(0.0), - aprilTagQuadSigma(0.0), - aprilTagMinClusterPixels(5), - aprilTagMaxNmaxima(10), - aprilTagCriticalRad( (float)(10* CV_PI /180) ), - aprilTagMaxLineFitMse(10.0), - aprilTagMinWhiteBlackDiff(5), - aprilTagDeglitch(0), - detectInvertedMarker(false), - useAruco3Detection(false), - minSideLengthCanonicalImg(32), - minMarkerLengthRatioOriginalImg(0.0) -{} - - -/** - * @brief Create a new set of DetectorParameters with default values. - */ -Ptr DetectorParameters::create() { - Ptr params = makePtr(); - return params; -} - -template -static inline bool readParameter(const FileNode& node, T& parameter) -{ - if (!node.empty()) { - node >> parameter; - return true; - } - return false; -} - -/** - * @brief Read a new set of DetectorParameters from FileStorage. - */ -bool DetectorParameters::readDetectorParameters(const FileNode& fn) -{ - if(fn.empty()) - return true; - bool checkRead = false; - checkRead |= readParameter(fn["adaptiveThreshWinSizeMin"], this->adaptiveThreshWinSizeMin); - checkRead |= readParameter(fn["adaptiveThreshWinSizeMax"], this->adaptiveThreshWinSizeMax); - checkRead |= readParameter(fn["adaptiveThreshWinSizeStep"], this->adaptiveThreshWinSizeStep); - checkRead |= readParameter(fn["adaptiveThreshConstant"], this->adaptiveThreshConstant); - checkRead |= readParameter(fn["minMarkerPerimeterRate"], this->minMarkerPerimeterRate); - checkRead |= readParameter(fn["maxMarkerPerimeterRate"], this->maxMarkerPerimeterRate); - checkRead |= readParameter(fn["polygonalApproxAccuracyRate"], this->polygonalApproxAccuracyRate); - checkRead |= readParameter(fn["minCornerDistanceRate"], this->minCornerDistanceRate); - checkRead |= readParameter(fn["minDistanceToBorder"], this->minDistanceToBorder); - checkRead |= readParameter(fn["minMarkerDistanceRate"], this->minMarkerDistanceRate); - checkRead |= readParameter(fn["cornerRefinementMethod"], this->cornerRefinementMethod); - checkRead |= readParameter(fn["cornerRefinementWinSize"], this->cornerRefinementWinSize); - checkRead |= readParameter(fn["cornerRefinementMaxIterations"], this->cornerRefinementMaxIterations); - checkRead |= readParameter(fn["cornerRefinementMinAccuracy"], this->cornerRefinementMinAccuracy); - checkRead |= readParameter(fn["markerBorderBits"], this->markerBorderBits); - checkRead |= readParameter(fn["perspectiveRemovePixelPerCell"], this->perspectiveRemovePixelPerCell); - checkRead |= readParameter(fn["perspectiveRemoveIgnoredMarginPerCell"], this->perspectiveRemoveIgnoredMarginPerCell); - checkRead |= readParameter(fn["maxErroneousBitsInBorderRate"], this->maxErroneousBitsInBorderRate); - checkRead |= readParameter(fn["minOtsuStdDev"], this->minOtsuStdDev); - checkRead |= readParameter(fn["errorCorrectionRate"], this->errorCorrectionRate); - // new aruco 3 functionality - checkRead |= readParameter(fn["useAruco3Detection"], this->useAruco3Detection); - checkRead |= readParameter(fn["minSideLengthCanonicalImg"], this->minSideLengthCanonicalImg); - checkRead |= readParameter(fn["minMarkerLengthRatioOriginalImg"], this->minMarkerLengthRatioOriginalImg); - return checkRead; -} - - -/** - * @brief Convert input image to gray if it is a 3-channels image - */ -static void _convertToGrey(InputArray _in, OutputArray _out) { - - CV_Assert(_in.type() == CV_8UC1 || _in.type() == CV_8UC3); - - if(_in.type() == CV_8UC3) - cvtColor(_in, _out, COLOR_BGR2GRAY); - else - _in.copyTo(_out); -} - - -/** - * @brief Threshold input image using adaptive thresholding - */ -static void _threshold(InputArray _in, OutputArray _out, int winSize, double constant) { - - CV_Assert(winSize >= 3); - if(winSize % 2 == 0) winSize++; // win size must be odd - adaptiveThreshold(_in, _out, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY_INV, winSize, constant); -} - - -/** - * @brief Given a tresholded image, find the contours, calculate their polygonal approximation - * and take those that accomplish some conditions - */ -static void _findMarkerContours(InputArray _in, vector< vector< Point2f > > &candidates, - vector< vector< Point > > &contoursOut, double minPerimeterRate, - double maxPerimeterRate, double accuracyRate, - double minCornerDistanceRate, int minDistanceToBorder, int minSize) { - - CV_Assert(minPerimeterRate > 0 && maxPerimeterRate > 0 && accuracyRate > 0 && - minCornerDistanceRate >= 0 && minDistanceToBorder >= 0); - - // calculate maximum and minimum sizes in pixels - unsigned int minPerimeterPixels = - (unsigned int)(minPerimeterRate * max(_in.getMat().cols, _in.getMat().rows)); - unsigned int maxPerimeterPixels = - (unsigned int)(maxPerimeterRate * max(_in.getMat().cols, _in.getMat().rows)); - - // for aruco3 functionality - if (minSize != 0) { - minPerimeterPixels = 4*minSize; - } - - Mat contoursImg; - _in.getMat().copyTo(contoursImg); - vector< vector< Point > > contours; - findContours(contoursImg, contours, RETR_LIST, CHAIN_APPROX_NONE); - // now filter list of contours - for(unsigned int i = 0; i < contours.size(); i++) { - // check perimeter - if(contours[i].size() < minPerimeterPixels || contours[i].size() > maxPerimeterPixels) - continue; - - // check is square and is convex - vector< Point > approxCurve; - approxPolyDP(contours[i], approxCurve, double(contours[i].size()) * accuracyRate, true); - if(approxCurve.size() != 4 || !isContourConvex(approxCurve)) continue; - - // check min distance between corners - double minDistSq = - max(contoursImg.cols, contoursImg.rows) * max(contoursImg.cols, contoursImg.rows); - for(int j = 0; j < 4; j++) { - double d = (double)(approxCurve[j].x - approxCurve[(j + 1) % 4].x) * - (double)(approxCurve[j].x - approxCurve[(j + 1) % 4].x) + - (double)(approxCurve[j].y - approxCurve[(j + 1) % 4].y) * - (double)(approxCurve[j].y - approxCurve[(j + 1) % 4].y); - minDistSq = min(minDistSq, d); - } - double minCornerDistancePixels = double(contours[i].size()) * minCornerDistanceRate; - if(minDistSq < minCornerDistancePixels * minCornerDistancePixels) continue; - - // check if it is too near to the image border - bool tooNearBorder = false; - for(int j = 0; j < 4; j++) { - if(approxCurve[j].x < minDistanceToBorder || approxCurve[j].y < minDistanceToBorder || - approxCurve[j].x > contoursImg.cols - 1 - minDistanceToBorder || - approxCurve[j].y > contoursImg.rows - 1 - minDistanceToBorder) - tooNearBorder = true; - } - if(tooNearBorder) continue; - - // if it passes all the test, add to candidates vector - vector< Point2f > currentCandidate; - currentCandidate.resize(4); - for(int j = 0; j < 4; j++) { - currentCandidate[j] = Point2f((float)approxCurve[j].x, (float)approxCurve[j].y); - } - candidates.push_back(currentCandidate); - contoursOut.push_back(contours[i]); - } -} - - -/** - * @brief Assure order of candidate corners is clockwise direction - */ -static void _reorderCandidatesCorners(vector< vector< Point2f > > &candidates) { - - for(unsigned int i = 0; i < candidates.size(); i++) { - double dx1 = candidates[i][1].x - candidates[i][0].x; - double dy1 = candidates[i][1].y - candidates[i][0].y; - double dx2 = candidates[i][2].x - candidates[i][0].x; - double dy2 = candidates[i][2].y - candidates[i][0].y; - double crossProduct = (dx1 * dy2) - (dy1 * dx2); - - if(crossProduct < 0.0) { // not clockwise direction - swap(candidates[i][1], candidates[i][3]); - } - } -} - -/** - * @brief to make sure that the corner's order of both candidates (default/white) is the same - */ -static vector< Point2f > alignContourOrder( Point2f corner, vector< Point2f > candidate){ - uint8_t r=0; - double min = cv::norm( Vec2f( corner - candidate[0] ), NORM_L2SQR); - for(uint8_t pos=1; pos < 4; pos++) { - double nDiff = cv::norm( Vec2f( corner - candidate[pos] ), NORM_L2SQR); - if(nDiff < min){ - r = pos; - min =nDiff; - } - } - std::rotate(candidate.begin(), candidate.begin() + r, candidate.end()); - return candidate; -} - -/** - * @brief Check candidates that are too close to each other, save the potential candidates - * (i.e. biggest/smallest contour) and remove the rest - */ -static void _filterTooCloseCandidates(const vector< vector< Point2f > > &candidatesIn, - vector< vector< vector< Point2f > > > &candidatesSetOut, - const vector< vector< Point > > &contoursIn, - vector< vector< vector< Point > > > &contoursSetOut, - double minMarkerDistanceRate, bool detectInvertedMarker) { - - CV_Assert(minMarkerDistanceRate >= 0); - vector candGroup; - candGroup.resize(candidatesIn.size(), -1); - vector< vector > groupedCandidates; - for(unsigned int i = 0; i < candidatesIn.size(); i++) { - bool isSingleContour = true; - for(unsigned int j = i + 1; j < candidatesIn.size(); j++) { - - int minimumPerimeter = min((int)contoursIn[i].size(), (int)contoursIn[j].size() ); - - // fc is the first corner considered on one of the markers, 4 combinations are possible - for(int fc = 0; fc < 4; fc++) { - double distSq = 0; - for(int c = 0; c < 4; c++) { - // modC is the corner considering first corner is fc - int modC = (c + fc) % 4; - distSq += (candidatesIn[i][modC].x - candidatesIn[j][c].x) * - (candidatesIn[i][modC].x - candidatesIn[j][c].x) + - (candidatesIn[i][modC].y - candidatesIn[j][c].y) * - (candidatesIn[i][modC].y - candidatesIn[j][c].y); - } - distSq /= 4.; - - // if mean square distance is too low, remove the smaller one of the two markers - double minMarkerDistancePixels = double(minimumPerimeter) * minMarkerDistanceRate; - if(distSq < minMarkerDistancePixels * minMarkerDistancePixels) { - isSingleContour = false; - // i and j are not related to a group - if(candGroup[i]<0 && candGroup[j]<0){ - // mark candidates with their corresponding group number - candGroup[i] = candGroup[j] = (int)groupedCandidates.size(); - - // create group - vector grouped; - grouped.push_back(i); - grouped.push_back(j); - groupedCandidates.push_back( grouped ); - } - // i is related to a group - else if(candGroup[i] > -1 && candGroup[j] == -1){ - int group = candGroup[i]; - candGroup[j] = group; - - // add to group - groupedCandidates[group].push_back( j ); - } - // j is related to a group - else if(candGroup[j] > -1 && candGroup[i] == -1){ - int group = candGroup[j]; - candGroup[i] = group; - - // add to group - groupedCandidates[group].push_back( i ); - } - } - } - } - if (isSingleContour && candGroup[i] < 0) - { - candGroup[i] = (int)groupedCandidates.size(); - vector grouped; - grouped.push_back(i); - grouped.push_back(i); // step "save possible candidates" require minimum 2 elements - groupedCandidates.push_back(grouped); - } - } - - // save possible candidates - candidatesSetOut.clear(); - contoursSetOut.clear(); - - vector< vector< Point2f > > biggerCandidates; - vector< vector< Point > > biggerContours; - vector< vector< Point2f > > smallerCandidates; - vector< vector< Point > > smallerContours; - - // save possible candidates - for(unsigned int i = 0; i < groupedCandidates.size(); i++) { - unsigned int smallerIdx = groupedCandidates[i][0]; - unsigned int biggerIdx = smallerIdx; - double smallerArea = contourArea(candidatesIn[smallerIdx]); - double biggerArea = smallerArea; - - // evaluate group elements - for(unsigned int j = 1; j < groupedCandidates[i].size(); j++) { - unsigned int currIdx = groupedCandidates[i][j]; - double currArea = contourArea(candidatesIn[currIdx]); - - // check if current contour is bigger - if(currArea >= biggerArea) { - biggerIdx = currIdx; - biggerArea = currArea; - } - - // check if current contour is smaller - if(currArea < smallerArea && detectInvertedMarker) { - smallerIdx = currIdx; - smallerArea = currArea; - } - } - - // add contours and candidates - biggerCandidates.push_back(candidatesIn[biggerIdx]); - biggerContours.push_back(contoursIn[biggerIdx]); - if(detectInvertedMarker) { - smallerCandidates.push_back(alignContourOrder(candidatesIn[biggerIdx][0], candidatesIn[smallerIdx])); - smallerContours.push_back(contoursIn[smallerIdx]); - } - } - // to preserve the structure :: candidateSet< defaultCandidates, whiteCandidates > - // default candidates - candidatesSetOut.push_back(biggerCandidates); - contoursSetOut.push_back(biggerContours); - // white candidates - candidatesSetOut.push_back(smallerCandidates); - contoursSetOut.push_back(smallerContours); -} - -/** - * @brief Initial steps on finding square candidates - */ -static void _detectInitialCandidates(const Mat &grey, vector< vector< Point2f > > &candidates, - vector< vector< Point > > &contours, - const Ptr ¶ms) { - - CV_Assert(params->adaptiveThreshWinSizeMin >= 3 && params->adaptiveThreshWinSizeMax >= 3); - CV_Assert(params->adaptiveThreshWinSizeMax >= params->adaptiveThreshWinSizeMin); - CV_Assert(params->adaptiveThreshWinSizeStep > 0); - - // number of window sizes (scales) to apply adaptive thresholding - int nScales = (params->adaptiveThreshWinSizeMax - params->adaptiveThreshWinSizeMin) / - params->adaptiveThreshWinSizeStep + 1; - - vector< vector< vector< Point2f > > > candidatesArrays((size_t) nScales); - vector< vector< vector< Point > > > contoursArrays((size_t) nScales); - - ////for each value in the interval of thresholding window sizes - parallel_for_(Range(0, nScales), [&](const Range& range) { - const int begin = range.start; - const int end = range.end; - - for (int i = begin; i < end; i++) { - int currScale = params->adaptiveThreshWinSizeMin + i * params->adaptiveThreshWinSizeStep; - // threshold - Mat thresh; - _threshold(grey, thresh, currScale, params->adaptiveThreshConstant); - - // detect rectangles - _findMarkerContours(thresh, candidatesArrays[i], contoursArrays[i], - params->minMarkerPerimeterRate, params->maxMarkerPerimeterRate, - params->polygonalApproxAccuracyRate, params->minCornerDistanceRate, - params->minDistanceToBorder, params->minSideLengthCanonicalImg); - } - }); - // join candidates - for(int i = 0; i < nScales; i++) { - for(unsigned int j = 0; j < candidatesArrays[i].size(); j++) { - candidates.push_back(candidatesArrays[i][j]); - contours.push_back(contoursArrays[i][j]); - } - } -} - - -/** - * @brief Detect square candidates in the input image - */ -static void _detectCandidates(InputArray _grayImage, vector< vector< vector< Point2f > > >& candidatesSetOut, - vector< vector< vector< Point > > >& contoursSetOut, const Ptr &_params) { - Mat grey = _grayImage.getMat(); - CV_DbgAssert(grey.total() != 0); - CV_DbgAssert(grey.type() == CV_8UC1); - - /// 1. DETECT FIRST SET OF CANDIDATES - vector< vector< Point2f > > candidates; - vector< vector< Point > > contours; - _detectInitialCandidates(grey, candidates, contours, _params); - /// 2. SORT CORNERS - _reorderCandidatesCorners(candidates); - - /// 3. FILTER OUT NEAR CANDIDATE PAIRS - // save the outter/inner border (i.e. potential candidates) - _filterTooCloseCandidates(candidates, candidatesSetOut, contours, contoursSetOut, - _params->minMarkerDistanceRate, _params->detectInvertedMarker); -} - - -/** - * @brief Given an input image and candidate corners, extract the bits of the candidate, including - * the border bits - */ -static Mat _extractBits(InputArray _image, InputArray _corners, int markerSize, - int markerBorderBits, int cellSize, double cellMarginRate, - double minStdDevOtsu) { - - CV_Assert(_image.getMat().channels() == 1); - CV_Assert(_corners.total() == 4); - CV_Assert(markerBorderBits > 0 && cellSize > 0 && cellMarginRate >= 0 && cellMarginRate <= 1); - CV_Assert(minStdDevOtsu >= 0); - - // number of bits in the marker - int markerSizeWithBorders = markerSize + 2 * markerBorderBits; - int cellMarginPixels = int(cellMarginRate * cellSize); - - Mat resultImg; // marker image after removing perspective - int resultImgSize = markerSizeWithBorders * cellSize; - Mat resultImgCorners(4, 1, CV_32FC2); - resultImgCorners.ptr< Point2f >(0)[0] = Point2f(0, 0); - resultImgCorners.ptr< Point2f >(0)[1] = Point2f((float)resultImgSize - 1, 0); - resultImgCorners.ptr< Point2f >(0)[2] = - Point2f((float)resultImgSize - 1, (float)resultImgSize - 1); - resultImgCorners.ptr< Point2f >(0)[3] = Point2f(0, (float)resultImgSize - 1); - - // remove perspective - Mat transformation = getPerspectiveTransform(_corners, resultImgCorners); - warpPerspective(_image, resultImg, transformation, Size(resultImgSize, resultImgSize), - INTER_NEAREST); - - // output image containing the bits - Mat bits(markerSizeWithBorders, markerSizeWithBorders, CV_8UC1, Scalar::all(0)); - - // check if standard deviation is enough to apply Otsu - // if not enough, it probably means all bits are the same color (black or white) - Mat mean, stddev; - // Remove some border just to avoid border noise from perspective transformation - Mat innerRegion = resultImg.colRange(cellSize / 2, resultImg.cols - cellSize / 2) - .rowRange(cellSize / 2, resultImg.rows - cellSize / 2); - meanStdDev(innerRegion, mean, stddev); - if(stddev.ptr< double >(0)[0] < minStdDevOtsu) { - // all black or all white, depending on mean value - if(mean.ptr< double >(0)[0] > 127) - bits.setTo(1); - else - bits.setTo(0); - return bits; - } - - // now extract code, first threshold using Otsu - threshold(resultImg, resultImg, 125, 255, THRESH_BINARY | THRESH_OTSU); - - // for each cell - for(int y = 0; y < markerSizeWithBorders; y++) { - for(int x = 0; x < markerSizeWithBorders; x++) { - int Xstart = x * (cellSize) + cellMarginPixels; - int Ystart = y * (cellSize) + cellMarginPixels; - Mat square = resultImg(Rect(Xstart, Ystart, cellSize - 2 * cellMarginPixels, - cellSize - 2 * cellMarginPixels)); - // count white pixels on each cell to assign its value - size_t nZ = (size_t) countNonZero(square); - if(nZ > square.total() / 2) bits.at< unsigned char >(y, x) = 1; - } - } - - return bits; -} - - - -/** - * @brief Return number of erroneous bits in border, i.e. number of white bits in border. - */ -static int _getBorderErrors(const Mat &bits, int markerSize, int borderSize) { - - int sizeWithBorders = markerSize + 2 * borderSize; - - CV_Assert(markerSize > 0 && bits.cols == sizeWithBorders && bits.rows == sizeWithBorders); - - int totalErrors = 0; - for(int y = 0; y < sizeWithBorders; y++) { - for(int k = 0; k < borderSize; k++) { - if(bits.ptr< unsigned char >(y)[k] != 0) totalErrors++; - if(bits.ptr< unsigned char >(y)[sizeWithBorders - 1 - k] != 0) totalErrors++; - } - } - for(int x = borderSize; x < sizeWithBorders - borderSize; x++) { - for(int k = 0; k < borderSize; k++) { - if(bits.ptr< unsigned char >(k)[x] != 0) totalErrors++; - if(bits.ptr< unsigned char >(sizeWithBorders - 1 - k)[x] != 0) totalErrors++; - } - } - return totalErrors; -} - - -/** - * @brief Tries to identify one candidate given the dictionary - * @return candidate typ. zero if the candidate is not valid, - * 1 if the candidate is a black candidate (default candidate) - * 2 if the candidate is a white candidate - */ -static uint8_t _identifyOneCandidate(const Ptr& dictionary, InputArray _image, - const vector& _corners, int& idx, - const Ptr& params, int& rotation, - const float scale = 1.f) -{ - CV_DbgAssert(_corners.size() == 4); - CV_DbgAssert(_image.getMat().total() != 0); - CV_DbgAssert(params->markerBorderBits > 0); - uint8_t typ=1; - // get bits - // scale corners to the correct size to search on the corresponding image pyramid - vector scaled_corners(4); - for (int i = 0; i < 4; ++i) { - scaled_corners[i].x = _corners[i].x * scale; - scaled_corners[i].y = _corners[i].y * scale; - } - - Mat candidateBits = - _extractBits(_image, scaled_corners, dictionary->markerSize, params->markerBorderBits, - params->perspectiveRemovePixelPerCell, - params->perspectiveRemoveIgnoredMarginPerCell, params->minOtsuStdDev); - - // analyze border bits - int maximumErrorsInBorder = - int(dictionary->markerSize * dictionary->markerSize * params->maxErroneousBitsInBorderRate); - int borderErrors = - _getBorderErrors(candidateBits, dictionary->markerSize, params->markerBorderBits); - - // check if it is a white marker - if(params->detectInvertedMarker){ - // to get from 255 to 1 - Mat invertedImg = ~candidateBits-254; - int invBError = _getBorderErrors(invertedImg, dictionary->markerSize, params->markerBorderBits); - // white marker - if(invBError maximumErrorsInBorder) return 0; // border is wrong - - // take only inner bits - Mat onlyBits = - candidateBits.rowRange(params->markerBorderBits, - candidateBits.rows - params->markerBorderBits) - .colRange(params->markerBorderBits, candidateBits.cols - params->markerBorderBits); - - // try to indentify the marker - if(!dictionary->identify(onlyBits, idx, rotation, params->errorCorrectionRate)) - return 0; - - return typ; -} - -/** - * @brief Copy the contents of a corners vector to an OutputArray, settings its size. - */ -static void _copyVector2Output(vector< vector< Point2f > > &vec, OutputArrayOfArrays out, const float scale = 1.f) { - out.create((int)vec.size(), 1, CV_32FC2); - - if(out.isMatVector()) { - for (unsigned int i = 0; i < vec.size(); i++) { - out.create(4, 1, CV_32FC2, i); - Mat &m = out.getMatRef(i); - Mat(Mat(vec[i]).t()*scale).copyTo(m); - } - } - else if(out.isUMatVector()) { - for (unsigned int i = 0; i < vec.size(); i++) { - out.create(4, 1, CV_32FC2, i); - UMat &m = out.getUMatRef(i); - Mat(Mat(vec[i]).t()*scale).copyTo(m); - } - } - else if(out.kind() == _OutputArray::STD_VECTOR_VECTOR){ - for (unsigned int i = 0; i < vec.size(); i++) { - out.create(4, 1, CV_32FC2, i); - Mat m = out.getMat(i); - Mat(Mat(vec[i]).t()*scale).copyTo(m); - } - } - else { - CV_Error(cv::Error::StsNotImplemented, - "Only Mat vector, UMat vector, and vector OutputArrays are currently supported."); - } -} - -/** - * @brief rotate the initial corner to get to the right position - */ -static void correctCornerPosition( vector< Point2f >& _candidate, int rotate){ - std::rotate(_candidate.begin(), _candidate.begin() + 4 - rotate, _candidate.end()); -} - -static size_t _findOptPyrImageForCanonicalImg( - const std::vector& img_pyr, - const int scaled_width, - const int cur_perimeter, - const int min_perimeter) { - CV_Assert(scaled_width > 0); - size_t optLevel = 0; - float dist = std::numeric_limits::max(); - for (size_t i = 0; i < img_pyr.size(); ++i) { - const float scale = img_pyr[i].cols / static_cast(scaled_width); - const float perimeter_scaled = cur_perimeter * scale; - // instead of std::abs() favor the larger pyramid level by checking if the distance is postive - // will slow down the algorithm but find more corners in the end - const float new_dist = perimeter_scaled - min_perimeter; - if (new_dist < dist && new_dist > 0.f) { - dist = new_dist; - optLevel = i; - } - } - return optLevel; -} - -/** - * @brief Identify square candidates according to a marker dictionary - */ - -static void _identifyCandidates(InputArray grey, - const std::vector& image_pyr, - vector< vector< vector< Point2f > > >& _candidatesSet, - vector< vector< vector > >& _contoursSet, const Ptr &_dictionary, - vector< vector< Point2f > >& _accepted, vector< vector >& _contours, vector< int >& ids, - const Ptr ¶ms, - OutputArrayOfArrays _rejected = noArray()) { - CV_DbgAssert(grey.getMat().total() != 0); - CV_DbgAssert(grey.getMat().type() == CV_8UC1); - int ncandidates = (int)_candidatesSet[0].size(); - vector< vector< Point2f > > accepted; - vector< vector< Point2f > > rejected; - vector< vector< Point > > contours; - - vector< int > idsTmp(ncandidates, -1); - vector< int > rotated(ncandidates, 0); - vector< uint8_t > validCandidates(ncandidates, 0); - - //// Analyze each of the candidates - parallel_for_(Range(0, ncandidates), [&](const Range &range) { - const int begin = range.start; - const int end = range.end; - - vector< vector< Point2f > >& candidates = params->detectInvertedMarker ? _candidatesSet[1] : _candidatesSet[0]; - vector< vector< Point > >& contourS = params->detectInvertedMarker ? _contoursSet[1] : _contoursSet[0]; - - for(int i = begin; i < end; i++) { - int currId = -1; - // implements equation (4) - if (params->useAruco3Detection) { - const int perimeterOfContour = static_cast(contourS[i].size()); - const int min_perimeter = params->minSideLengthCanonicalImg * 4; - const size_t nearestImgId = _findOptPyrImageForCanonicalImg(image_pyr, grey.cols(), perimeterOfContour, min_perimeter); - const float scale = image_pyr[nearestImgId].cols / static_cast(grey.cols()); - - validCandidates[i] = _identifyOneCandidate(_dictionary, image_pyr[nearestImgId], candidates[i], currId, params, rotated[i], scale); - } - else { - validCandidates[i] = _identifyOneCandidate(_dictionary, grey, candidates[i], currId, params, rotated[i]); - } - - if(validCandidates[i] > 0) - idsTmp[i] = currId; - } - }); - - for(int i = 0; i < ncandidates; i++) { - if(validCandidates[i] > 0) { - // to choose the right set of candidates :: 0 for default, 1 for white markers - uint8_t set = validCandidates[i]-1; - - // shift corner positions to the correct rotation - correctCornerPosition(_candidatesSet[set][i], rotated[i]); - - if( !params->detectInvertedMarker && validCandidates[i] == 2 ) - continue; - - // add valid candidate - accepted.push_back(_candidatesSet[set][i]); - ids.push_back(idsTmp[i]); - - contours.push_back(_contoursSet[set][i]); - - } else { - rejected.push_back(_candidatesSet[0][i]); - } - } - - // parse output - _accepted = accepted; - - _contours= contours; - - if(_rejected.needed()) { - _copyVector2Output(rejected, _rejected); - } -} - - -/** - * @brief Return object points for the system centered in a middle (by default) or in a top left corner of single - * marker, given the marker length - */ -static void _getSingleMarkerObjectPoints(float markerLength, OutputArray _objPoints, - EstimateParameters estimateParameters) { - - CV_Assert(markerLength > 0); - - _objPoints.create(4, 1, CV_32FC3); - Mat objPoints = _objPoints.getMat(); - // set coordinate system in the top-left corner of the marker, with Z pointing out - if (estimateParameters.pattern == CW_top_left_corner) { - objPoints.ptr(0)[0] = Vec3f(0.f, 0.f, 0); - objPoints.ptr(0)[1] = Vec3f(markerLength, 0.f, 0); - objPoints.ptr(0)[2] = Vec3f(markerLength, markerLength, 0); - objPoints.ptr(0)[3] = Vec3f(0.f, markerLength, 0); - } - else if (estimateParameters.pattern == CCW_center) { - objPoints.ptr(0)[0] = Vec3f(-markerLength/2.f, markerLength/2.f, 0); - objPoints.ptr(0)[1] = Vec3f(markerLength/2.f, markerLength/2.f, 0); - objPoints.ptr(0)[2] = Vec3f(markerLength/2.f, -markerLength/2.f, 0); - objPoints.ptr(0)[3] = Vec3f(-markerLength/2.f, -markerLength/2.f, 0); - } - else - CV_Error(Error::StsBadArg, "Unknown estimateParameters pattern"); -} - -/** - * Line fitting A * B = C :: Called from function refineCandidateLines - * @param nContours, contour-container - */ -static Point3f _interpolate2Dline(const std::vector& nContours){ - CV_Assert(nContours.size() >= 2); - float minX, minY, maxX, maxY; - minX = maxX = nContours[0].x; - minY = maxY = nContours[0].y; - - for(unsigned int i = 0; i< nContours.size(); i++){ - minX = nContours[i].x < minX ? nContours[i].x : minX; - minY = nContours[i].y < minY ? nContours[i].y : minY; - maxX = nContours[i].x > maxX ? nContours[i].x : maxX; - maxY = nContours[i].y > maxY ? nContours[i].y : maxY; - } - - Mat A = Mat::ones((int)nContours.size(), 2, CV_32F); // Coefficient Matrix (N x 2) - Mat B((int)nContours.size(), 1, CV_32F); // Variables Matrix (N x 1) - Mat C; // Constant - - if(maxX - minX > maxY - minY){ - for(unsigned int i =0; i < nContours.size(); i++){ - A.at(i,0)= nContours[i].x; - B.at(i,0)= nContours[i].y; - } - - solve(A, B, C, DECOMP_NORMAL); - - return Point3f(C.at(0, 0), -1., C.at(1, 0)); - } - else{ - for(unsigned int i =0; i < nContours.size(); i++){ - A.at(i,0)= nContours[i].y; - B.at(i,0)= nContours[i].x; - } - - solve(A, B, C, DECOMP_NORMAL); - - return Point3f(-1., C.at(0, 0), C.at(1, 0)); - } - -} - -/** - * Find the Point where the lines crosses :: Called from function refineCandidateLines - * @param nLine1 - * @param nLine2 - * @return Crossed Point - */ -static Point2f _getCrossPoint(Point3f nLine1, Point3f nLine2){ - Matx22f A(nLine1.x, nLine1.y, nLine2.x, nLine2.y); - Vec2f B(-nLine1.z, -nLine2.z); - return Vec2f(A.solve(B).val); -} - -/** - * Refine Corners using the contour vector :: Called from function detectMarkers - * @param nContours, contour-container - * @param nCorners, candidate Corners - * @param camMatrix, cameraMatrix input 3x3 floating-point camera matrix - * @param distCoeff, distCoeffs vector of distortion coefficient - */ -static void _refineCandidateLines(std::vector& nContours, std::vector& nCorners){ - vector contour2f(nContours.begin(), nContours.end()); - /* 5 groups :: to group the edges - * 4 - classified by its corner - * extra group - (temporary) if contours do not begin with a corner - */ - vector cntPts[5]; - int cornerIndex[4]={-1}; - int group=4; - - for ( unsigned int i =0; i < nContours.size(); i++ ) { - for(unsigned int j=0; j<4; j++){ - if ( nCorners[j] == contour2f[i] ){ - cornerIndex[j] = i; - group=j; - } - } - cntPts[group].push_back(contour2f[i]); - } - for (int i = 0; i < 4; i++) - { - CV_Assert(cornerIndex[i] != -1); - } - - // saves extra group into corresponding - if( !cntPts[4].empty() ){ - for( unsigned int i=0; i < cntPts[4].size() ; i++ ) - cntPts[group].push_back(cntPts[4].at(i)); - cntPts[4].clear(); - } - - //Evaluate contour direction :: using the position of the detected corners - int inc=1; - - inc = ( (cornerIndex[0] > cornerIndex[1]) && (cornerIndex[3] > cornerIndex[0]) ) ? -1:inc; - inc = ( (cornerIndex[2] > cornerIndex[3]) && (cornerIndex[1] > cornerIndex[2]) ) ? -1:inc; - - // calculate the line :: who passes through the grouped points - Point3f lines[4]; - for(int i=0; i<4; i++){ - lines[i]=_interpolate2Dline(cntPts[i]); - } - - /* - * calculate the corner :: where the lines crosses to each other - * clockwise direction no clockwise direction - * 0 1 - * .---. 1 .---. 2 - * | | | | - * 3 .___. 0 .___. - * 2 3 - */ - for(int i=0; i < 4; i++){ - if(inc<0) - nCorners[i] = _getCrossPoint(lines[ i ], lines[ (i+1)%4 ]); // 01 12 23 30 - else - nCorners[i] = _getCrossPoint(lines[ i ], lines[ (i+3)%4 ]); // 30 01 12 23 - } -} - -#ifdef APRIL_DEBUG -static void _darken(const Mat &im){ - for (int y = 0; y < im.rows; y++) { - for (int x = 0; x < im.cols; x++) { - im.data[im.cols*y+x] /= 2; - } - } -} -#endif - -/** - * - * @param im_orig - * @param _params - * @param candidates - * @param contours - */ -static void _apriltag(Mat im_orig, const Ptr & _params, std::vector< std::vector< Point2f > > &candidates, - std::vector< std::vector< Point > > &contours){ - - /////////////////////////////////////////////////////////// - /// Step 1. Detect quads according to requested image decimation - /// and blurring parameters. - Mat quad_im; - im_orig.copyTo(quad_im); - - if (_params->aprilTagQuadDecimate > 1){ - resize(im_orig, quad_im, Size(), 1/_params->aprilTagQuadDecimate, 1/_params->aprilTagQuadDecimate, INTER_AREA ); - } - - // Apply a Blur - if (_params->aprilTagQuadSigma != 0) { - // compute a reasonable kernel width by figuring that the - // kernel should go out 2 std devs. - // - // max sigma ksz - // 0.499 1 (disabled) - // 0.999 3 - // 1.499 5 - // 1.999 7 - - float sigma = fabsf((float) _params->aprilTagQuadSigma); - - int ksz = cvFloor(4 * sigma); // 2 std devs in each direction - ksz |= 1; // make odd number - - if (ksz > 1) { - if (_params->aprilTagQuadSigma > 0) - GaussianBlur(quad_im, quad_im, Size(ksz, ksz), sigma, sigma, BORDER_REPLICATE); - else { - Mat orig; - quad_im.copyTo(orig); - GaussianBlur(quad_im, quad_im, Size(ksz, ksz), sigma, sigma, BORDER_REPLICATE); - - // SHARPEN the image by subtracting the low frequency components. - for (int y = 0; y < orig.rows; y++) { - for (int x = 0; x < orig.cols; x++) { - int vorig = orig.data[y*orig.step + x]; - int vblur = quad_im.data[y*quad_im.step + x]; - - int v = 2*vorig - vblur; - if (v < 0) - v = 0; - if (v > 255) - v = 255; - - quad_im.data[y*quad_im.step + x] = (uint8_t) v; - } - } - } - } - } - -#ifdef APRIL_DEBUG - imwrite("1.1 debug_preprocess.pnm", quad_im); -#endif - - /////////////////////////////////////////////////////////// - /// Step 2. do the Threshold :: get the set of candidate quads - zarray_t *quads = apriltag_quad_thresh(_params, quad_im, contours); - - CV_Assert(quads != NULL); - - // adjust centers of pixels so that they correspond to the - // original full-resolution image. - if (_params->aprilTagQuadDecimate > 1) { - for (int i = 0; i < _zarray_size(quads); i++) { - struct sQuad *q; - _zarray_get_volatile(quads, i, &q); - for (int j = 0; j < 4; j++) { - q->p[j][0] *= _params->aprilTagQuadDecimate; - q->p[j][1] *= _params->aprilTagQuadDecimate; - } - } - } - -#ifdef APRIL_DEBUG - Mat im_quads = im_orig.clone(); - im_quads = im_quads*0.5; - srandom(0); - - for (int i = 0; i < _zarray_size(quads); i++) { - struct sQuad *quad; - _zarray_get_volatile(quads, i, &quad); - - const int bias = 100; - int color = bias + (random() % (255-bias)); - - line(im_quads, Point(quad->p[0][0], quad->p[0][1]), Point(quad->p[1][0], quad->p[1][1]), color, 1); - line(im_quads, Point(quad->p[1][0], quad->p[1][1]), Point(quad->p[2][0], quad->p[2][1]), color, 1); - line(im_quads, Point(quad->p[2][0], quad->p[2][1]), Point(quad->p[3][0], quad->p[3][1]), color, 1); - line(im_quads, Point(quad->p[3][0], quad->p[3][1]), Point(quad->p[0][0], quad->p[0][1]), color, 1); - } - imwrite("1.2 debug_quads_raw.pnm", im_quads); -#endif - - //////////////////////////////////////////////////////////////// - /// Step 3. Save the output :: candidate corners - for (int i = 0; i < _zarray_size(quads); i++) { - struct sQuad *quad; - _zarray_get_volatile(quads, i, &quad); - - std::vector< Point2f > corners; - corners.push_back(Point2f(quad->p[3][0], quad->p[3][1])); //pA - corners.push_back(Point2f(quad->p[0][0], quad->p[0][1])); //pB - corners.push_back(Point2f(quad->p[1][0], quad->p[1][1])); //pC - corners.push_back(Point2f(quad->p[2][0], quad->p[2][1])); //pD - - candidates.push_back(corners); - } - - _zarray_destroy(quads); -} - -static inline void findCornerInPyrImage(const float scale_init, const int closest_pyr_image_idx, - const std::vector& grey_pyramid, Mat corners, - const Ptr& params) { - // scale them to the closest pyramid level - if (scale_init != 1.f) - corners *= scale_init; // scale_init * scale_pyr - for (int idx = closest_pyr_image_idx - 1; idx >= 0; --idx) { - // scale them to new pyramid level - corners *= 2.f; // *= scale_pyr; - // use larger win size for larger images - const int subpix_win_size = std::max(grey_pyramid[idx].cols, grey_pyramid[idx].rows) > 1080 ? 5 : 3; - cornerSubPix(grey_pyramid[idx], corners, - Size(subpix_win_size, subpix_win_size), - Size(-1, -1), - TermCriteria(TermCriteria::MAX_ITER | TermCriteria::EPS, - params->cornerRefinementMaxIterations, - params->cornerRefinementMinAccuracy)); - } -} - -/** - */ void detectMarkers(InputArray _image, const Ptr &_dictionary, OutputArrayOfArrays _corners, OutputArray _ids, const Ptr &_params, OutputArrayOfArrays _rejectedImgPoints) { - - CV_Assert(!_image.empty()); - CV_Assert(_params->markerBorderBits > 0); - // check that the parameters are set correctly if Aruco3 is used - CV_Assert(!(_params->useAruco3Detection == true && - _params->minSideLengthCanonicalImg == 0 && - _params->minMarkerLengthRatioOriginalImg == 0.0)); - - Mat grey; - _convertToGrey(_image.getMat(), grey); - - // Aruco3 functionality is the extension of Aruco. - // The description can be found in: - // [1] Speeded up detection of squared fiducial markers, 2018, FJ Romera-Ramirez et al. - // if Aruco3 functionality if not wanted - // change some parameters to be sure to turn it off - if (!_params->useAruco3Detection) { - _params->minMarkerLengthRatioOriginalImg = 0.0; - _params->minSideLengthCanonicalImg = 0; - } - else { - // always turn on corner refinement in case of Aruco3, due to upsampling - _params->cornerRefinementMethod = CORNER_REFINE_SUBPIX; - } - - /// Step 0: equation (2) from paper [1] - const float fxfy = (!_params->useAruco3Detection ? 1.f : _params->minSideLengthCanonicalImg / - (_params->minSideLengthCanonicalImg + std::max(grey.cols, grey.rows)*_params->minMarkerLengthRatioOriginalImg)); - - /// Step 1: create image pyramid. Section 3.4. in [1] - std::vector grey_pyramid; - int closest_pyr_image_idx = 0, num_levels = 0; - //// Step 1.1: resize image with equation (1) from paper [1] - if (_params->useAruco3Detection) { - const float scale_pyr = 2.f; - const float img_area = static_cast(grey.rows*grey.cols); - const float min_area_marker = static_cast(_params->minSideLengthCanonicalImg*_params->minSideLengthCanonicalImg); - // find max level - num_levels = static_cast(log2(img_area / min_area_marker)/scale_pyr); - // the closest pyramid image to the downsampled segmentation image - // will later be used as start index for corner upsampling - const float scale_img_area = img_area * fxfy * fxfy; - closest_pyr_image_idx = cvRound(log2(img_area / scale_img_area)/scale_pyr); - } - cv::buildPyramid(grey, grey_pyramid, num_levels); - - // resize to segmentation image - // in this reduces size the contours will be detected - if (fxfy != 1.f) - cv::resize(grey, grey, cv::Size(cvRound(fxfy * grey.cols), cvRound(fxfy * grey.rows))); - - /// STEP 2: Detect marker candidates - vector< vector< Point2f > > candidates; - vector< vector< Point > > contours; - vector< int > ids; - - vector< vector< vector< Point2f > > > candidatesSet; - vector< vector< vector< Point > > > contoursSet; - - /// STEP 2.a Detect marker candidates :: using AprilTag - if(_params->cornerRefinementMethod == CORNER_REFINE_APRILTAG){ - _apriltag(grey, _params, candidates, contours); - - candidatesSet.push_back(candidates); - contoursSet.push_back(contours); - } - /// STEP 2.b Detect marker candidates :: traditional way - else - _detectCandidates(grey, candidatesSet, contoursSet, _params); - - /// STEP 2: Check candidate codification (identify markers) - _identifyCandidates(grey, grey_pyramid, candidatesSet, contoursSet, _dictionary, - candidates, contours, ids, _params, _rejectedImgPoints); - - // copy to output arrays - _copyVector2Output(candidates, _corners); - Mat(ids).copyTo(_ids); - - /// STEP 3: Corner refinement :: use corner subpix - if( _params->cornerRefinementMethod == CORNER_REFINE_SUBPIX ) { - CV_Assert(_params->cornerRefinementWinSize > 0 && _params->cornerRefinementMaxIterations > 0 && - _params->cornerRefinementMinAccuracy > 0); - // Do subpixel estimation. In Aruco3 start on the lowest pyramid level and upscale the corners - parallel_for_(Range(0, _corners.cols()), [&](const Range& range) { - const int begin = range.start; - const int end = range.end; - - for (int i = begin; i < end; i++) { - if (_params->useAruco3Detection) { - const float scale_init = (float) grey_pyramid[closest_pyr_image_idx].cols / grey.cols; - findCornerInPyrImage(scale_init, closest_pyr_image_idx, grey_pyramid, _corners.getMat(i), _params); - } - else - cornerSubPix(grey, _corners.getMat(i), - Size(_params->cornerRefinementWinSize, _params->cornerRefinementWinSize), - Size(-1, -1), - TermCriteria(TermCriteria::MAX_ITER | TermCriteria::EPS, - _params->cornerRefinementMaxIterations, - _params->cornerRefinementMinAccuracy)); - } - }); - } - - /// STEP 3, Optional : Corner refinement :: use contour container - if( _params->cornerRefinementMethod == CORNER_REFINE_CONTOUR){ - - if(! _ids.empty()){ - - // do corner refinement using the contours for each detected markers - parallel_for_(Range(0, _corners.cols()), [&](const Range& range) { - for (int i = range.start; i < range.end; i++) { - _refineCandidateLines(contours[i], candidates[i]); - } - }); - - // copy the corners to the output array - _copyVector2Output(candidates, _corners); - } - } - if (_params->cornerRefinementMethod != CORNER_REFINE_APRILTAG && - _params->cornerRefinementMethod != CORNER_REFINE_SUBPIX) { - // scale to orignal size, this however will lead to inaccurate detections! - _copyVector2Output(candidates, _corners, 1.f/fxfy); - } -} - -/** - */ -void estimatePoseSingleMarkers(InputArrayOfArrays _corners, float markerLength, - InputArray _cameraMatrix, InputArray _distCoeffs, - OutputArray _rvecs, OutputArray _tvecs, OutputArray _objPoints, - Ptr estimateParameters) { - - CV_Assert(markerLength > 0); - CV_Assert(estimateParameters); - - Mat markerObjPoints; - _getSingleMarkerObjectPoints(markerLength, markerObjPoints, *estimateParameters); - int nMarkers = (int)_corners.total(); - _rvecs.create(nMarkers, 1, CV_64FC3); - _tvecs.create(nMarkers, 1, CV_64FC3); - - Mat rvecs = _rvecs.getMat(), tvecs = _tvecs.getMat(); - - //// for each marker, calculate its pose - parallel_for_(Range(0, nMarkers), [&](const Range& range) { - const int begin = range.start; - const int end = range.end; - - for (int i = begin; i < end; i++) { - solvePnP(markerObjPoints, _corners.getMat(i), _cameraMatrix, _distCoeffs, rvecs.at(i), - tvecs.at(i), estimateParameters->useExtrinsicGuess, estimateParameters->solvePnPMethod); - } - }); - - if(_objPoints.needed()){ - markerObjPoints.convertTo(_objPoints, -1); - } -} - - - -void getBoardObjectAndImagePoints(const Ptr &board, InputArrayOfArrays detectedCorners, - InputArray detectedIds, OutputArray objPoints, OutputArray imgPoints) { - - CV_Assert(board->ids.size() == board->objPoints.size()); - CV_Assert(detectedIds.total() == detectedCorners.total()); - - size_t nDetectedMarkers = detectedIds.total(); - - vector< Point3f > objPnts; - objPnts.reserve(nDetectedMarkers); - - vector< Point2f > imgPnts; - imgPnts.reserve(nDetectedMarkers); - - // look for detected markers that belong to the board and get their information - for(unsigned int i = 0; i < nDetectedMarkers; i++) { - int currentId = detectedIds.getMat().ptr< int >(0)[i]; - for(unsigned int j = 0; j < board->ids.size(); j++) { - if(currentId == board->ids[j]) { - for(int p = 0; p < 4; p++) { - objPnts.push_back(board->objPoints[j][p]); - imgPnts.push_back(detectedCorners.getMat(i).ptr< Point2f >(0)[p]); - } - } - } - } - - // create output - Mat(objPnts).copyTo(objPoints); - Mat(imgPnts).copyTo(imgPoints); + ArucoDetector detector(_dictionary, _params); + detector.detectMarkers(_image, _corners, _ids, _rejectedImgPoints); } - - -/** - * Project board markers that are not included in the list of detected markers - */ -static void _projectUndetectedMarkers(const Ptr &_board, InputOutputArrayOfArrays _detectedCorners, - InputOutputArray _detectedIds, InputArray _cameraMatrix, - InputArray _distCoeffs, - vector< vector< Point2f > >& _undetectedMarkersProjectedCorners, - OutputArray _undetectedMarkersIds) { - - // first estimate board pose with the current avaible markers - Mat rvec, tvec; - int boardDetectedMarkers; - boardDetectedMarkers = aruco::estimatePoseBoard(_detectedCorners, _detectedIds, _board, - _cameraMatrix, _distCoeffs, rvec, tvec); - - // at least one marker from board so rvec and tvec are valid - if(boardDetectedMarkers == 0) return; - - // search undetected markers and project them using the previous pose - vector< vector< Point2f > > undetectedCorners; - vector< int > undetectedIds; - for(unsigned int i = 0; i < _board->ids.size(); i++) { - int foundIdx = -1; - for(unsigned int j = 0; j < _detectedIds.total(); j++) { - if(_board->ids[i] == _detectedIds.getMat().ptr< int >()[j]) { - foundIdx = j; - break; - } - } - - // not detected - if(foundIdx == -1) { - undetectedCorners.push_back(vector< Point2f >()); - undetectedIds.push_back(_board->ids[i]); - projectPoints(_board->objPoints[i], rvec, tvec, _cameraMatrix, _distCoeffs, - undetectedCorners.back()); - } - } - - - // parse output - Mat(undetectedIds).copyTo(_undetectedMarkersIds); - _undetectedMarkersProjectedCorners = undetectedCorners; -} - - - -/** - * Interpolate board markers that are not included in the list of detected markers using - * global homography - */ -static void _projectUndetectedMarkers(const Ptr &_board, InputOutputArrayOfArrays _detectedCorners, - InputOutputArray _detectedIds, - vector< vector< Point2f > >& _undetectedMarkersProjectedCorners, - OutputArray _undetectedMarkersIds) { - - - // check board points are in the same plane, if not, global homography cannot be applied - CV_Assert(_board->objPoints.size() > 0); - CV_Assert(_board->objPoints[0].size() > 0); - float boardZ = _board->objPoints[0][0].z; - for(unsigned int i = 0; i < _board->objPoints.size(); i++) { - for(unsigned int j = 0; j < _board->objPoints[i].size(); j++) { - CV_Assert(boardZ == _board->objPoints[i][j].z); - } - } - - vector< Point2f > detectedMarkersObj2DAll; // Object coordinates (without Z) of all the detected - // marker corners in a single vector - vector< Point2f > imageCornersAll; // Image corners of all detected markers in a single vector - vector< vector< Point2f > > undetectedMarkersObj2D; // Object coordinates (without Z) of all - // missing markers in different vectors - vector< int > undetectedMarkersIds; // ids of missing markers - // find markers included in board, and missing markers from board. Fill the previous vectors - for(unsigned int j = 0; j < _board->ids.size(); j++) { - bool found = false; - for(unsigned int i = 0; i < _detectedIds.total(); i++) { - if(_detectedIds.getMat().ptr< int >()[i] == _board->ids[j]) { - for(int c = 0; c < 4; c++) { - imageCornersAll.push_back(_detectedCorners.getMat(i).ptr< Point2f >()[c]); - detectedMarkersObj2DAll.push_back( - Point2f(_board->objPoints[j][c].x, _board->objPoints[j][c].y)); - } - found = true; - break; - } - } - if(!found) { - undetectedMarkersObj2D.push_back(vector< Point2f >()); - for(int c = 0; c < 4; c++) { - undetectedMarkersObj2D.back().push_back( - Point2f(_board->objPoints[j][c].x, _board->objPoints[j][c].y)); - } - undetectedMarkersIds.push_back(_board->ids[j]); - } - } - if(imageCornersAll.size() == 0) return; - - // get homography from detected markers - Mat transformation = findHomography(detectedMarkersObj2DAll, imageCornersAll); - - _undetectedMarkersProjectedCorners.resize(undetectedMarkersIds.size()); - - // for each undetected marker, apply transformation - for(unsigned int i = 0; i < undetectedMarkersObj2D.size(); i++) { - perspectiveTransform(undetectedMarkersObj2D[i], _undetectedMarkersProjectedCorners[i], transformation); - } - - Mat(undetectedMarkersIds).copyTo(_undetectedMarkersIds); -} - - - -/** - */ void refineDetectedMarkers(InputArray _image, const Ptr &_board, InputOutputArrayOfArrays _detectedCorners, InputOutputArray _detectedIds, InputOutputArrayOfArrays _rejectedCorners, InputArray _cameraMatrix, InputArray _distCoeffs, float minRepDistance, float errorCorrectionRate, bool checkAllOrders, OutputArray _recoveredIdxs, const Ptr &_params) { - - CV_Assert(minRepDistance > 0); - - if(_detectedIds.total() == 0 || _rejectedCorners.total() == 0) return; - - DetectorParameters ¶ms = *_params; - - // get projections of missing markers in the board - vector< vector< Point2f > > undetectedMarkersCorners; - vector< int > undetectedMarkersIds; - if(_cameraMatrix.total() != 0) { - // reproject based on camera projection model - _projectUndetectedMarkers(_board, _detectedCorners, _detectedIds, _cameraMatrix, _distCoeffs, - undetectedMarkersCorners, undetectedMarkersIds); - - } else { - // reproject based on global homography - _projectUndetectedMarkers(_board, _detectedCorners, _detectedIds, undetectedMarkersCorners, - undetectedMarkersIds); - } - - // list of missing markers indicating if they have been assigned to a candidate - vector< bool > alreadyIdentified(_rejectedCorners.total(), false); - - // maximum bits that can be corrected - Dictionary &dictionary = *(_board->dictionary); - int maxCorrectionRecalculated = - int(double(dictionary.maxCorrectionBits) * errorCorrectionRate); - - Mat grey; - _convertToGrey(_image, grey); - - // vector of final detected marker corners and ids - vector > finalAcceptedCorners; - vector< int > finalAcceptedIds; - // fill with the current markers - finalAcceptedCorners.resize(_detectedCorners.total()); - finalAcceptedIds.resize(_detectedIds.total()); - for(unsigned int i = 0; i < _detectedIds.total(); i++) { - finalAcceptedCorners[i] = _detectedCorners.getMat(i).clone(); - finalAcceptedIds[i] = _detectedIds.getMat().ptr< int >()[i]; - } - vector< int > recoveredIdxs; // original indexes of accepted markers in _rejectedCorners - - // for each missing marker, try to find a correspondence - for(unsigned int i = 0; i < undetectedMarkersIds.size(); i++) { - - // best match at the moment - int closestCandidateIdx = -1; - double closestCandidateDistance = minRepDistance * minRepDistance + 1; - Mat closestRotatedMarker; - - for(unsigned int j = 0; j < _rejectedCorners.total(); j++) { - if(alreadyIdentified[j]) continue; - - // check distance - double minDistance = closestCandidateDistance + 1; - bool valid = false; - int validRot = 0; - for(int c = 0; c < 4; c++) { // first corner in rejected candidate - double currentMaxDistance = 0; - for(int k = 0; k < 4; k++) { - Point2f rejCorner = _rejectedCorners.getMat(j).ptr< Point2f >()[(c + k) % 4]; - Point2f distVector = undetectedMarkersCorners[i][k] - rejCorner; - double cornerDist = distVector.x * distVector.x + distVector.y * distVector.y; - currentMaxDistance = max(currentMaxDistance, cornerDist); - } - // if distance is better than current best distance - if(currentMaxDistance < closestCandidateDistance) { - valid = true; - validRot = c; - minDistance = currentMaxDistance; - } - if(!checkAllOrders) break; - } - - if(!valid) continue; - - // apply rotation - Mat rotatedMarker; - if(checkAllOrders) { - rotatedMarker = Mat(4, 1, CV_32FC2); - for(int c = 0; c < 4; c++) - rotatedMarker.ptr< Point2f >()[c] = - _rejectedCorners.getMat(j).ptr< Point2f >()[(c + 4 + validRot) % 4]; - } - else rotatedMarker = _rejectedCorners.getMat(j); - - // last filter, check if inner code is close enough to the assigned marker code - int codeDistance = 0; - // if errorCorrectionRate, dont check code - if(errorCorrectionRate >= 0) { - - // extract bits - Mat bits = _extractBits( - grey, rotatedMarker, dictionary.markerSize, params.markerBorderBits, - params.perspectiveRemovePixelPerCell, - params.perspectiveRemoveIgnoredMarginPerCell, params.minOtsuStdDev); - - Mat onlyBits = - bits.rowRange(params.markerBorderBits, bits.rows - params.markerBorderBits) - .colRange(params.markerBorderBits, bits.rows - params.markerBorderBits); - - codeDistance = - dictionary.getDistanceToId(onlyBits, undetectedMarkersIds[i], false); - } - - // if everythin is ok, assign values to current best match - if(errorCorrectionRate < 0 || codeDistance < maxCorrectionRecalculated) { - closestCandidateIdx = j; - closestCandidateDistance = minDistance; - closestRotatedMarker = rotatedMarker; - } - } - - // if at least one good match, we have rescue the missing marker - if(closestCandidateIdx >= 0) { - - // subpixel refinement - if(_params->cornerRefinementMethod == CORNER_REFINE_SUBPIX) { - CV_Assert(params.cornerRefinementWinSize > 0 && - params.cornerRefinementMaxIterations > 0 && - params.cornerRefinementMinAccuracy > 0); - cornerSubPix(grey, closestRotatedMarker, - Size(params.cornerRefinementWinSize, params.cornerRefinementWinSize), - Size(-1, -1), TermCriteria(TermCriteria::MAX_ITER | TermCriteria::EPS, - params.cornerRefinementMaxIterations, - params.cornerRefinementMinAccuracy)); - } - - // remove from rejected - alreadyIdentified[closestCandidateIdx] = true; - - // add to detected - finalAcceptedCorners.push_back(closestRotatedMarker); - finalAcceptedIds.push_back(undetectedMarkersIds[i]); - - // add the original index of the candidate - recoveredIdxs.push_back(closestCandidateIdx); - } - } - - // parse output - if(finalAcceptedIds.size() != _detectedIds.total()) { - // parse output - Mat(finalAcceptedIds).copyTo(_detectedIds); - _copyVector2Output(finalAcceptedCorners, _detectedCorners); - - // recalculate _rejectedCorners based on alreadyIdentified - vector > finalRejected; - for(unsigned int i = 0; i < alreadyIdentified.size(); i++) { - if(!alreadyIdentified[i]) { - finalRejected.push_back(_rejectedCorners.getMat(i).clone()); - } - } - _copyVector2Output(finalRejected, _rejectedCorners); - - if(_recoveredIdxs.needed()) { - Mat(recoveredIdxs).copyTo(_recoveredIdxs); - } - } -} - - - - -/** - */ -int estimatePoseBoard(InputArrayOfArrays _corners, InputArray _ids, const Ptr &board, - InputArray _cameraMatrix, InputArray _distCoeffs, InputOutputArray _rvec, - InputOutputArray _tvec, bool useExtrinsicGuess) { - - CV_Assert(_corners.total() == _ids.total()); - - // get object and image points for the solvePnP function - Mat objPoints, imgPoints; - getBoardObjectAndImagePoints(board, _corners, _ids, objPoints, imgPoints); - - CV_Assert(imgPoints.total() == objPoints.total()); - - if(objPoints.total() == 0) // 0 of the detected markers in board - return 0; - - solvePnP(objPoints, imgPoints, _cameraMatrix, _distCoeffs, _rvec, _tvec, useExtrinsicGuess); - - // divide by four since all the four corners are concatenated in the array for each marker - return (int)objPoints.total() / 4; -} - - - - -/** - */ -void GridBoard::draw(Size outSize, OutputArray _img, int marginSize, int borderBits) { - _drawPlanarBoardImpl(this, outSize, _img, marginSize, borderBits); + Ptr refineParams = RefineParameters::create(minRepDistance, errorCorrectionRate, checkAllOrders); + ArucoDetector detector(_board->dictionary, _params, refineParams); + detector.refineDetectedMarkers(_image, _board, _detectedCorners, _detectedIds, _rejectedCorners, _cameraMatrix, + _distCoeffs, _recoveredIdxs); } - -/** -*/ -Ptr Board::create(InputArrayOfArrays objPoints, const Ptr &dictionary, InputArray ids) { - - CV_Assert(objPoints.total() == ids.total()); - CV_Assert(objPoints.type() == CV_32FC3 || objPoints.type() == CV_32FC1); - - std::vector< std::vector< Point3f > > obj_points_vector; - Point3f rightBottomBorder = Point3f(0.f, 0.f, 0.f); - for (unsigned int i = 0; i < objPoints.total(); i++) { - std::vector corners; - Mat corners_mat = objPoints.getMat(i); - - if(corners_mat.type() == CV_32FC1) - corners_mat = corners_mat.reshape(3); - CV_Assert(corners_mat.total() == 4); - - for (int j = 0; j < 4; j++) { - const Point3f& corner = corners_mat.at(j); - corners.push_back(corner); - rightBottomBorder.x = std::max(rightBottomBorder.x, corner.x); - rightBottomBorder.y = std::max(rightBottomBorder.y, corner.y); - rightBottomBorder.z = std::max(rightBottomBorder.z, corner.z); - } - obj_points_vector.push_back(corners); - } - - Ptr res = makePtr(); - ids.copyTo(res->ids); - res->objPoints = obj_points_vector; - res->dictionary = cv::makePtr(dictionary); - res->rightBottomBorder = rightBottomBorder; - return res; -} - -/** - */ -void Board::setIds(InputArray ids_) { - CV_Assert(objPoints.size() == ids_.total()); - ids_.copyTo(this->ids); -} - -/** - */ -Ptr GridBoard::create(int markersX, int markersY, float markerLength, float markerSeparation, - const Ptr &dictionary, int firstMarker) { - - CV_Assert(markersX > 0 && markersY > 0 && markerLength > 0 && markerSeparation > 0); - - Ptr res = makePtr(); - - res->_markersX = markersX; - res->_markersY = markersY; - res->_markerLength = markerLength; - res->_markerSeparation = markerSeparation; - res->dictionary = dictionary; - - size_t totalMarkers = (size_t) markersX * markersY; - res->ids.resize(totalMarkers); - res->objPoints.reserve(totalMarkers); - - // fill ids with first identifiers - for(unsigned int i = 0; i < totalMarkers; i++) { - res->ids[i] = i + firstMarker; - } - - // calculate Board objPoints - for(int y = 0; y < markersY; y++) { - for(int x = 0; x < markersX; x++) { - vector corners(4); - corners[0] = Point3f(x * (markerLength + markerSeparation), - y * (markerLength + markerSeparation), 0); - corners[1] = corners[0] + Point3f(markerLength, 0, 0); - corners[2] = corners[0] + Point3f(markerLength, markerLength, 0); - corners[3] = corners[0] + Point3f(0, markerLength, 0); - res->objPoints.push_back(corners); - } - } - res->rightBottomBorder = Point3f(markersX * markerLength + markerSeparation * (markersX - 1), - markersY * markerLength + markerSeparation * (markersY - 1), 0.f); - return res; -} - - - -/** - */ -void drawDetectedMarkers(InputOutputArray _image, InputArrayOfArrays _corners, - InputArray _ids, Scalar borderColor) { - - - CV_Assert(_image.getMat().total() != 0 && - (_image.getMat().channels() == 1 || _image.getMat().channels() == 3)); - CV_Assert((_corners.total() == _ids.total()) || _ids.total() == 0); - - // calculate colors - Scalar textColor, cornerColor; - textColor = cornerColor = borderColor; - swap(textColor.val[0], textColor.val[1]); // text color just sawp G and R - swap(cornerColor.val[1], cornerColor.val[2]); // corner color just sawp G and B - - int nMarkers = (int)_corners.total(); - for(int i = 0; i < nMarkers; i++) { - Mat currentMarker = _corners.getMat(i); - CV_Assert(currentMarker.total() == 4 && currentMarker.type() == CV_32FC2); - - // draw marker sides - for(int j = 0; j < 4; j++) { - Point2f p0, p1; - p0 = currentMarker.ptr< Point2f >(0)[j]; - p1 = currentMarker.ptr< Point2f >(0)[(j + 1) % 4]; - line(_image, p0, p1, borderColor, 1); - } - // draw first corner mark - rectangle(_image, currentMarker.ptr< Point2f >(0)[0] - Point2f(3, 3), - currentMarker.ptr< Point2f >(0)[0] + Point2f(3, 3), cornerColor, 1, LINE_AA); - - // draw ID - if(_ids.total() != 0) { - Point2f cent(0, 0); - for(int p = 0; p < 4; p++) - cent += currentMarker.ptr< Point2f >(0)[p]; - cent = cent / 4.; - stringstream s; - s << "id=" << _ids.getMat().ptr< int >(0)[i]; - putText(_image, s.str(), cent, FONT_HERSHEY_SIMPLEX, 0.5, textColor, 2); - } - } -} - - -/** - */ -void drawMarker(const Ptr &dictionary, int id, int sidePixels, OutputArray _img, int borderBits) { - dictionary->drawMarker(id, sidePixels, _img, borderBits); -} - - - -void _drawPlanarBoardImpl(Board *_board, Size outSize, OutputArray _img, int marginSize, - int borderBits) { - - CV_Assert(!outSize.empty()); - CV_Assert(marginSize >= 0); - - _img.create(outSize, CV_8UC1); - Mat out = _img.getMat(); - out.setTo(Scalar::all(255)); - out.adjustROI(-marginSize, -marginSize, -marginSize, -marginSize); - - // calculate max and min values in XY plane - CV_Assert(_board->objPoints.size() > 0); - float minX, maxX, minY, maxY; - minX = maxX = _board->objPoints[0][0].x; - minY = maxY = _board->objPoints[0][0].y; - - for(unsigned int i = 0; i < _board->objPoints.size(); i++) { - for(int j = 0; j < 4; j++) { - minX = min(minX, _board->objPoints[i][j].x); - maxX = max(maxX, _board->objPoints[i][j].x); - minY = min(minY, _board->objPoints[i][j].y); - maxY = max(maxY, _board->objPoints[i][j].y); - } - } - - float sizeX = maxX - minX; - float sizeY = maxY - minY; - - // proportion transformations - float xReduction = sizeX / float(out.cols); - float yReduction = sizeY / float(out.rows); - - // determine the zone where the markers are placed - if(xReduction > yReduction) { - int nRows = int(sizeY / xReduction); - int rowsMargins = (out.rows - nRows) / 2; - out.adjustROI(-rowsMargins, -rowsMargins, 0, 0); - } else { - int nCols = int(sizeX / yReduction); - int colsMargins = (out.cols - nCols) / 2; - out.adjustROI(0, 0, -colsMargins, -colsMargins); - } - - // now paint each marker - Dictionary &dictionary = *(_board->dictionary); - Mat marker; - Point2f outCorners[3]; - Point2f inCorners[3]; - for(unsigned int m = 0; m < _board->objPoints.size(); m++) { - // transform corners to markerZone coordinates - for(int j = 0; j < 3; j++) { - Point2f pf = Point2f(_board->objPoints[m][j].x, _board->objPoints[m][j].y); - // move top left to 0, 0 - pf -= Point2f(minX, minY); - pf.x = pf.x / sizeX * float(out.cols); - pf.y = pf.y / sizeY * float(out.rows); - outCorners[j] = pf; - } - - // get marker - Size dst_sz(outCorners[2] - outCorners[0]); // assuming CCW order - dst_sz.width = dst_sz.height = std::min(dst_sz.width, dst_sz.height); //marker should be square - dictionary.drawMarker(_board->ids[m], dst_sz.width, marker, borderBits); - - if((outCorners[0].y == outCorners[1].y) && (outCorners[1].x == outCorners[2].x)) { - // marker is aligned to image axes - marker.copyTo(out(Rect(outCorners[0], dst_sz))); - continue; - } - - // interpolate tiny marker to marker position in markerZone - inCorners[0] = Point2f(-0.5f, -0.5f); - inCorners[1] = Point2f(marker.cols - 0.5f, -0.5f); - inCorners[2] = Point2f(marker.cols - 0.5f, marker.rows - 0.5f); - - // remove perspective - Mat transformation = getAffineTransform(inCorners, outCorners); - warpAffine(marker, out, transformation, out.size(), INTER_LINEAR, - BORDER_TRANSPARENT); - } -} - - - -/** - */ -void drawPlanarBoard(const Ptr &_board, Size outSize, OutputArray _img, int marginSize, - int borderBits) { - _drawPlanarBoardImpl(_board, outSize, _img, marginSize, borderBits); -} - - - -/** - */ -double calibrateCameraAruco(InputArrayOfArrays _corners, InputArray _ids, InputArray _counter, - const Ptr &board, Size imageSize, InputOutputArray _cameraMatrix, - InputOutputArray _distCoeffs, OutputArrayOfArrays _rvecs, - OutputArrayOfArrays _tvecs, - OutputArray _stdDeviationsIntrinsics, - OutputArray _stdDeviationsExtrinsics, - OutputArray _perViewErrors, - int flags, TermCriteria criteria) { - - // for each frame, get properly processed imagePoints and objectPoints for the calibrateCamera - // function - vector< Mat > processedObjectPoints, processedImagePoints; - size_t nFrames = _counter.total(); - int markerCounter = 0; - for(size_t frame = 0; frame < nFrames; frame++) { - int nMarkersInThisFrame = _counter.getMat().ptr< int >()[frame]; - vector< Mat > thisFrameCorners; - vector< int > thisFrameIds; - - CV_Assert(nMarkersInThisFrame > 0); - - thisFrameCorners.reserve((size_t) nMarkersInThisFrame); - thisFrameIds.reserve((size_t) nMarkersInThisFrame); - for(int j = markerCounter; j < markerCounter + nMarkersInThisFrame; j++) { - thisFrameCorners.push_back(_corners.getMat(j)); - thisFrameIds.push_back(_ids.getMat().ptr< int >()[j]); - } - markerCounter += nMarkersInThisFrame; - Mat currentImgPoints, currentObjPoints; - getBoardObjectAndImagePoints(board, thisFrameCorners, thisFrameIds, currentObjPoints, - currentImgPoints); - if(currentImgPoints.total() > 0 && currentObjPoints.total() > 0) { - processedImagePoints.push_back(currentImgPoints); - processedObjectPoints.push_back(currentObjPoints); - } - } - - return calibrateCamera(processedObjectPoints, processedImagePoints, imageSize, _cameraMatrix, - _distCoeffs, _rvecs, _tvecs, _stdDeviationsIntrinsics, _stdDeviationsExtrinsics, - _perViewErrors, flags, criteria); -} - - - -/** - */ -double calibrateCameraAruco(InputArrayOfArrays _corners, InputArray _ids, InputArray _counter, - const Ptr &board, Size imageSize, InputOutputArray _cameraMatrix, - InputOutputArray _distCoeffs, OutputArrayOfArrays _rvecs, - OutputArrayOfArrays _tvecs, int flags, TermCriteria criteria) { - return calibrateCameraAruco(_corners, _ids, _counter, board, imageSize, _cameraMatrix, _distCoeffs, _rvecs, _tvecs, - noArray(), noArray(), noArray(), flags, criteria); -} - - - } } diff --git a/modules/aruco/src/aruco_calib_pose.cpp b/modules/aruco/src/aruco_calib_pose.cpp new file mode 100644 index 00000000000..7dc7200a503 --- /dev/null +++ b/modules/aruco/src/aruco_calib_pose.cpp @@ -0,0 +1,257 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html + +#include + +namespace cv { +namespace aruco { +using namespace std; + +void getBoardObjectAndImagePoints(const Ptr &board, InputArrayOfArrays detectedCorners, InputArray detectedIds, + OutputArray objPoints, OutputArray imgPoints) { + CV_Assert(board->ids.size() == board->objPoints.size()); + CV_Assert(detectedIds.total() == detectedCorners.total()); + + size_t nDetectedMarkers = detectedIds.total(); + + vector objPnts; + objPnts.reserve(nDetectedMarkers); + + vector imgPnts; + imgPnts.reserve(nDetectedMarkers); + + // look for detected markers that belong to the board and get their information + for(unsigned int i = 0; i < nDetectedMarkers; i++) { + int currentId = detectedIds.getMat().ptr< int >(0)[i]; + for(unsigned int j = 0; j < board->ids.size(); j++) { + if(currentId == board->ids[j]) { + for(int p = 0; p < 4; p++) { + objPnts.push_back(board->objPoints[j][p]); + imgPnts.push_back(detectedCorners.getMat(i).ptr< Point2f >(0)[p]); + } + } + } + } + + // create output + Mat(objPnts).copyTo(objPoints); + Mat(imgPnts).copyTo(imgPoints); +} + +/** + * @brief Return object points for the system centered in a middle (by default) or in a top left corner of single + * marker, given the marker length + */ +static Mat _getSingleMarkerObjectPoints(float markerLength, const EstimateParameters& estimateParameters) { + CV_Assert(markerLength > 0); + Mat objPoints(4, 1, CV_32FC3); + // set coordinate system in the top-left corner of the marker, with Z pointing out + if (estimateParameters.pattern == CW_top_left_corner) { + objPoints.ptr(0)[0] = Vec3f(0.f, 0.f, 0); + objPoints.ptr(0)[1] = Vec3f(markerLength, 0.f, 0); + objPoints.ptr(0)[2] = Vec3f(markerLength, markerLength, 0); + objPoints.ptr(0)[3] = Vec3f(0.f, markerLength, 0); + } + else if (estimateParameters.pattern == CCW_center) { + objPoints.ptr(0)[0] = Vec3f(-markerLength/2.f, markerLength/2.f, 0); + objPoints.ptr(0)[1] = Vec3f(markerLength/2.f, markerLength/2.f, 0); + objPoints.ptr(0)[2] = Vec3f(markerLength/2.f, -markerLength/2.f, 0); + objPoints.ptr(0)[3] = Vec3f(-markerLength/2.f, -markerLength/2.f, 0); + } + else + CV_Error(Error::StsBadArg, "Unknown estimateParameters pattern"); + return objPoints; +} + +void estimatePoseSingleMarkers(InputArrayOfArrays _corners, float markerLength, + InputArray _cameraMatrix, InputArray _distCoeffs, + OutputArray _rvecs, OutputArray _tvecs, OutputArray _objPoints, + Ptr estimateParameters) { + CV_Assert(markerLength > 0); + + Mat markerObjPoints = _getSingleMarkerObjectPoints(markerLength, *estimateParameters); + int nMarkers = (int)_corners.total(); + _rvecs.create(nMarkers, 1, CV_64FC3); + _tvecs.create(nMarkers, 1, CV_64FC3); + + Mat rvecs = _rvecs.getMat(), tvecs = _tvecs.getMat(); + + //// for each marker, calculate its pose + parallel_for_(Range(0, nMarkers), [&](const Range& range) { + const int begin = range.start; + const int end = range.end; + + for (int i = begin; i < end; i++) { + solvePnP(markerObjPoints, _corners.getMat(i), _cameraMatrix, _distCoeffs, rvecs.at(i), + tvecs.at(i), estimateParameters->useExtrinsicGuess, estimateParameters->solvePnPMethod); + } + }); + + if(_objPoints.needed()){ + markerObjPoints.convertTo(_objPoints, -1); + } +} + +int estimatePoseBoard(InputArrayOfArrays _corners, InputArray _ids, const Ptr &board, + InputArray _cameraMatrix, InputArray _distCoeffs, InputOutputArray _rvec, + InputOutputArray _tvec, bool useExtrinsicGuess) { + CV_Assert(_corners.total() == _ids.total()); + + // get object and image points for the solvePnP function + Mat objPoints, imgPoints; + getBoardObjectAndImagePoints(board, _corners, _ids, objPoints, imgPoints); + + CV_Assert(imgPoints.total() == objPoints.total()); + + if(objPoints.total() == 0) // 0 of the detected markers in board + return 0; + + solvePnP(objPoints, imgPoints, _cameraMatrix, _distCoeffs, _rvec, _tvec, useExtrinsicGuess); + + // divide by four since all the four corners are concatenated in the array for each marker + return (int)objPoints.total() / 4; +} + +/** + * Check if a set of 3d points are enough for calibration. Z coordinate is ignored. + * Only axis parallel lines are considered + */ +static bool _arePointsEnoughForPoseEstimation(const vector &points) { + if(points.size() < 4) return false; + + vector sameXValue; // different x values in points + vector sameXCounter; // number of points with the x value in sameXValue + for(unsigned int i = 0; i < points.size(); i++) { + bool found = false; + for(unsigned int j = 0; j < sameXValue.size(); j++) { + if(sameXValue[j] == points[i].x) { + found = true; + sameXCounter[j]++; + } + } + if(!found) { + sameXValue.push_back(points[i].x); + sameXCounter.push_back(1); + } + } + + // count how many x values has more than 2 points + int moreThan2 = 0; + for(unsigned int i = 0; i < sameXCounter.size(); i++) { + if(sameXCounter[i] >= 2) moreThan2++; + } + + // if we have more than 1 two xvalues with more than 2 points, calibration is ok + if(moreThan2 > 1) + return true; + return false; +} + +bool estimatePoseCharucoBoard(InputArray _charucoCorners, InputArray _charucoIds, + const Ptr &_board, InputArray _cameraMatrix, InputArray _distCoeffs, + InputOutputArray _rvec, InputOutputArray _tvec, bool useExtrinsicGuess) { + CV_Assert((_charucoCorners.getMat().total() == _charucoIds.getMat().total())); + + // need, at least, 4 corners + if(_charucoIds.getMat().total() < 4) return false; + + vector objPoints; + objPoints.reserve(_charucoIds.getMat().total()); + for(unsigned int i = 0; i < _charucoIds.getMat().total(); i++) { + int currId = _charucoIds.getMat().at< int >(i); + CV_Assert(currId >= 0 && currId < (int)_board->chessboardCorners.size()); + objPoints.push_back(_board->chessboardCorners[currId]); + } + + // points need to be in different lines, check if detected points are enough + if(!_arePointsEnoughForPoseEstimation(objPoints)) return false; + + solvePnP(objPoints, _charucoCorners, _cameraMatrix, _distCoeffs, _rvec, _tvec, useExtrinsicGuess); + return true; +} + +double calibrateCameraAruco(InputArrayOfArrays _corners, InputArray _ids, InputArray _counter, + const Ptr &board, Size imageSize, InputOutputArray _cameraMatrix, + InputOutputArray _distCoeffs, OutputArrayOfArrays _rvecs, + OutputArrayOfArrays _tvecs, + OutputArray _stdDeviationsIntrinsics, + OutputArray _stdDeviationsExtrinsics, + OutputArray _perViewErrors, + int flags, TermCriteria criteria) { + // for each frame, get properly processed imagePoints and objectPoints for the calibrateCamera + // function + vector processedObjectPoints, processedImagePoints; + size_t nFrames = _counter.total(); + int markerCounter = 0; + for(size_t frame = 0; frame < nFrames; frame++) { + int nMarkersInThisFrame = _counter.getMat().ptr< int >()[frame]; + vector thisFrameCorners; + vector thisFrameIds; + + CV_Assert(nMarkersInThisFrame > 0); + + thisFrameCorners.reserve((size_t) nMarkersInThisFrame); + thisFrameIds.reserve((size_t) nMarkersInThisFrame); + for(int j = markerCounter; j < markerCounter + nMarkersInThisFrame; j++) { + thisFrameCorners.push_back(_corners.getMat(j)); + thisFrameIds.push_back(_ids.getMat().ptr< int >()[j]); + } + markerCounter += nMarkersInThisFrame; + Mat currentImgPoints, currentObjPoints; + getBoardObjectAndImagePoints(board, thisFrameCorners, thisFrameIds, currentObjPoints, + currentImgPoints); + if(currentImgPoints.total() > 0 && currentObjPoints.total() > 0) { + processedImagePoints.push_back(currentImgPoints); + processedObjectPoints.push_back(currentObjPoints); + } + } + return calibrateCamera(processedObjectPoints, processedImagePoints, imageSize, _cameraMatrix, _distCoeffs, _rvecs, + _tvecs, _stdDeviationsIntrinsics, _stdDeviationsExtrinsics, _perViewErrors, flags, criteria); +} + +double calibrateCameraAruco(InputArrayOfArrays _corners, InputArray _ids, InputArray _counter, const Ptr &board, + Size imageSize, InputOutputArray _cameraMatrix, InputOutputArray _distCoeffs, + OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs, int flags, TermCriteria criteria) { + return calibrateCameraAruco(_corners, _ids, _counter, board, imageSize, _cameraMatrix, _distCoeffs, + _rvecs, _tvecs, noArray(), noArray(), noArray(), flags, criteria); +} + +double calibrateCameraCharuco(InputArrayOfArrays _charucoCorners, InputArrayOfArrays _charucoIds, + const Ptr &_board, Size imageSize, + InputOutputArray _cameraMatrix, InputOutputArray _distCoeffs, + OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs, + OutputArray _stdDeviationsIntrinsics, + OutputArray _stdDeviationsExtrinsics, + OutputArray _perViewErrors, + int flags, TermCriteria criteria) { + CV_Assert(_charucoIds.total() > 0 && (_charucoIds.total() == _charucoCorners.total())); + + // Join object points of charuco corners in a single vector for calibrateCamera() function + vector > allObjPoints; + allObjPoints.resize(_charucoIds.total()); + for(unsigned int i = 0; i < _charucoIds.total(); i++) { + unsigned int nCorners = (unsigned int)_charucoIds.getMat(i).total(); + CV_Assert(nCorners > 0 && nCorners == _charucoCorners.getMat(i).total()); + allObjPoints[i].reserve(nCorners); + + for(unsigned int j = 0; j < nCorners; j++) { + int pointId = _charucoIds.getMat(i).at< int >(j); + CV_Assert(pointId >= 0 && pointId < (int)_board->chessboardCorners.size()); + allObjPoints[i].push_back(_board->chessboardCorners[pointId]); + } + } + return calibrateCamera(allObjPoints, _charucoCorners, imageSize, _cameraMatrix, _distCoeffs, _rvecs, _tvecs, + _stdDeviationsIntrinsics, _stdDeviationsExtrinsics, _perViewErrors, flags, criteria); +} + +double calibrateCameraCharuco(InputArrayOfArrays _charucoCorners, InputArrayOfArrays _charucoIds, + const Ptr &_board, Size imageSize, InputOutputArray _cameraMatrix, + InputOutputArray _distCoeffs, OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs, + int flags, TermCriteria criteria) { +return calibrateCameraCharuco(_charucoCorners, _charucoIds, _board, imageSize, _cameraMatrix, _distCoeffs, _rvecs, + _tvecs, noArray(), noArray(), noArray(), flags, criteria); +} + +} +} diff --git a/modules/aruco/src/aruco_detector.cpp b/modules/aruco/src/aruco_detector.cpp new file mode 100644 index 00000000000..a9dc9af4670 --- /dev/null +++ b/modules/aruco/src/aruco_detector.cpp @@ -0,0 +1,1259 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html + +#include "precomp.hpp" +#include +#include "opencv2/aruco_detector.hpp" +#include "opencv2/aruco/aruco_calib_pose.hpp" +#include "aruco_utils.hpp" +#include "apriltag/apriltag_quad_thresh.hpp" +#include + +namespace cv { +namespace aruco { + +using namespace std; + +bool DetectorParameters::readWrite(const Ptr& readNode, const Ptr& writeStorage) { + CV_Assert(!readNode.empty() || !writeStorage.empty()); + bool check = false; + + check |= readWriteParameter("adaptiveThreshWinSizeMin", this->adaptiveThreshWinSizeMin, readNode, writeStorage); + check |= readWriteParameter("adaptiveThreshWinSizeMax", this->adaptiveThreshWinSizeMax, readNode, writeStorage); + check |= readWriteParameter("adaptiveThreshWinSizeStep", this->adaptiveThreshWinSizeStep, readNode, writeStorage); + check |= readWriteParameter("adaptiveThreshConstant", this->adaptiveThreshConstant, readNode, writeStorage); + check |= readWriteParameter("minMarkerPerimeterRate", this->minMarkerPerimeterRate, readNode, writeStorage); + check |= readWriteParameter("maxMarkerPerimeterRate", this->maxMarkerPerimeterRate, readNode, writeStorage); + check |= readWriteParameter("polygonalApproxAccuracyRate", this->polygonalApproxAccuracyRate, + readNode, writeStorage); + check |= readWriteParameter("minCornerDistanceRate", this->minCornerDistanceRate, readNode, writeStorage); + check |= readWriteParameter("minDistanceToBorder", this->minDistanceToBorder, readNode, writeStorage); + check |= readWriteParameter("minMarkerDistanceRate", this->minMarkerDistanceRate, readNode, writeStorage); + check |= readWriteParameter("cornerRefinementMethod", this->cornerRefinementMethod, readNode, writeStorage); + check |= readWriteParameter("cornerRefinementWinSize", this->cornerRefinementWinSize, readNode, writeStorage); + check |= readWriteParameter("cornerRefinementMaxIterations", this->cornerRefinementMaxIterations, + readNode, writeStorage); + check |= readWriteParameter("cornerRefinementMinAccuracy", this->cornerRefinementMinAccuracy, + readNode, writeStorage); + check |= readWriteParameter("markerBorderBits", this->markerBorderBits, readNode, writeStorage); + check |= readWriteParameter("perspectiveRemovePixelPerCell", this->perspectiveRemovePixelPerCell, + readNode, writeStorage); + check |= readWriteParameter("perspectiveRemoveIgnoredMarginPerCell", this->perspectiveRemoveIgnoredMarginPerCell, + readNode, writeStorage); + check |= readWriteParameter("maxErroneousBitsInBorderRate", this->maxErroneousBitsInBorderRate, + readNode, writeStorage); + check |= readWriteParameter("minOtsuStdDev", this->minOtsuStdDev, readNode, writeStorage); + check |= readWriteParameter("errorCorrectionRate", this->errorCorrectionRate, readNode, writeStorage); + // new aruco 3 functionality + check |= readWriteParameter("useAruco3Detection", this->useAruco3Detection, readNode, writeStorage); + check |= readWriteParameter("minSideLengthCanonicalImg", this->minSideLengthCanonicalImg, readNode, writeStorage); + check |= readWriteParameter("minMarkerLengthRatioOriginalImg", this->minMarkerLengthRatioOriginalImg, + readNode, writeStorage); + return check; +} + +bool DetectorParameters::readDetectorParameters(const FileNode& fn) { + if(fn.empty()) + return false; + Ptr pfn = makePtr(fn); + return readWrite(pfn); +} + +bool DetectorParameters::writeDetectorParameters(const Ptr& fs) +{ + if (fs.empty() && !fs->isOpened()) + return false; + return readWrite(nullptr, fs); +} + +bool RefineParameters::readWrite(const Ptr& readNode, const Ptr& writeStorage) { + CV_Assert(!readNode.empty() || !writeStorage.empty()); + bool check = false; + + check |= readWriteParameter("minRepDistance", this->minRepDistance, readNode, writeStorage); + check |= readWriteParameter("errorCorrectionRate", this->errorCorrectionRate, readNode, writeStorage); + check |= readWriteParameter("checkAllOrders", this->checkAllOrders, readNode, writeStorage); + return check; +} + +bool RefineParameters::readRefineParameters(const FileNode &fn) { + if(fn.empty()) + return false; + Ptr pfn = makePtr(fn); + return readWrite(pfn); +} + +bool RefineParameters::writeRefineParameters(const Ptr &fs) { + if(fs.empty()) + return false; + return readWrite(nullptr, fs); +} + +/** + * @brief Threshold input image using adaptive thresholding + */ +static void _threshold(InputArray _in, OutputArray _out, int winSize, double constant) { + + CV_Assert(winSize >= 3); + if(winSize % 2 == 0) winSize++; // win size must be odd + adaptiveThreshold(_in, _out, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY_INV, winSize, constant); +} + + +/** + * @brief Given a tresholded image, find the contours, calculate their polygonal approximation + * and take those that accomplish some conditions + */ +static void _findMarkerContours(const Mat &in, vector< vector< Point2f > > &candidates, + vector< vector< Point > > &contoursOut, double minPerimeterRate, + double maxPerimeterRate, double accuracyRate, + double minCornerDistanceRate, int minDistanceToBorder, int minSize) { + + CV_Assert(minPerimeterRate > 0 && maxPerimeterRate > 0 && accuracyRate > 0 && + minCornerDistanceRate >= 0 && minDistanceToBorder >= 0); + + // calculate maximum and minimum sizes in pixels + unsigned int minPerimeterPixels = + (unsigned int)(minPerimeterRate * max(in.cols, in.rows)); + unsigned int maxPerimeterPixels = + (unsigned int)(maxPerimeterRate * max(in.cols, in.rows)); + + // for aruco3 functionality + if (minSize != 0) { + minPerimeterPixels = 4*minSize; + } + + Mat contoursImg; + in.copyTo(contoursImg); + vector< vector< Point > > contours; + findContours(contoursImg, contours, RETR_LIST, CHAIN_APPROX_NONE); + // now filter list of contours + for(unsigned int i = 0; i < contours.size(); i++) { + // check perimeter + if(contours[i].size() < minPerimeterPixels || contours[i].size() > maxPerimeterPixels) + continue; + + // check is square and is convex + vector< Point > approxCurve; + approxPolyDP(contours[i], approxCurve, double(contours[i].size()) * accuracyRate, true); + if(approxCurve.size() != 4 || !isContourConvex(approxCurve)) continue; + + // check min distance between corners + double minDistSq = + max(contoursImg.cols, contoursImg.rows) * max(contoursImg.cols, contoursImg.rows); + for(int j = 0; j < 4; j++) { + double d = (double)(approxCurve[j].x - approxCurve[(j + 1) % 4].x) * + (double)(approxCurve[j].x - approxCurve[(j + 1) % 4].x) + + (double)(approxCurve[j].y - approxCurve[(j + 1) % 4].y) * + (double)(approxCurve[j].y - approxCurve[(j + 1) % 4].y); + minDistSq = min(minDistSq, d); + } + double minCornerDistancePixels = double(contours[i].size()) * minCornerDistanceRate; + if(minDistSq < minCornerDistancePixels * minCornerDistancePixels) continue; + + // check if it is too near to the image border + bool tooNearBorder = false; + for(int j = 0; j < 4; j++) { + if(approxCurve[j].x < minDistanceToBorder || approxCurve[j].y < minDistanceToBorder || + approxCurve[j].x > contoursImg.cols - 1 - minDistanceToBorder || + approxCurve[j].y > contoursImg.rows - 1 - minDistanceToBorder) + tooNearBorder = true; + } + if(tooNearBorder) continue; + + // if it passes all the test, add to candidates vector + vector< Point2f > currentCandidate; + currentCandidate.resize(4); + for(int j = 0; j < 4; j++) { + currentCandidate[j] = Point2f((float)approxCurve[j].x, (float)approxCurve[j].y); + } + candidates.push_back(currentCandidate); + contoursOut.push_back(contours[i]); + } +} + + +/** + * @brief Assure order of candidate corners is clockwise direction + */ +static void _reorderCandidatesCorners(vector< vector< Point2f > > &candidates) { + + for(unsigned int i = 0; i < candidates.size(); i++) { + double dx1 = candidates[i][1].x - candidates[i][0].x; + double dy1 = candidates[i][1].y - candidates[i][0].y; + double dx2 = candidates[i][2].x - candidates[i][0].x; + double dy2 = candidates[i][2].y - candidates[i][0].y; + double crossProduct = (dx1 * dy2) - (dy1 * dx2); + + if(crossProduct < 0.0) { // not clockwise direction + swap(candidates[i][1], candidates[i][3]); + } + } +} + +/** + * @brief to make sure that the corner's order of both candidates (default/white) is the same + */ +static vector alignContourOrder(Point2f corner, vector< Point2f > candidate) { + uint8_t r=0; + double min = cv::norm( Vec2f( corner - candidate[0] ), NORM_L2SQR); + for(uint8_t pos=1; pos < 4; pos++) { + double nDiff = cv::norm( Vec2f( corner - candidate[pos] ), NORM_L2SQR); + if(nDiff < min){ + r = pos; + min =nDiff; + } + } + std::rotate(candidate.begin(), candidate.begin() + r, candidate.end()); + return candidate; +} + +/** + * @brief Check candidates that are too close to each other, save the potential candidates + * (i.e. biggest/smallest contour) and remove the rest + */ +static void _filterTooCloseCandidates(const vector< vector< Point2f > > &candidatesIn, + vector< vector< vector< Point2f > > > &candidatesSetOut, + const vector< vector< Point > > &contoursIn, + vector< vector< vector< Point > > > &contoursSetOut, + double minMarkerDistanceRate, bool detectInvertedMarker) { + + CV_Assert(minMarkerDistanceRate >= 0); + vector candGroup; + candGroup.resize(candidatesIn.size(), -1); + vector< vector > groupedCandidates; + for(unsigned int i = 0; i < candidatesIn.size(); i++) { + bool isSingleContour = true; + for(unsigned int j = i + 1; j < candidatesIn.size(); j++) { + + int minimumPerimeter = min((int)contoursIn[i].size(), (int)contoursIn[j].size() ); + + // fc is the first corner considered on one of the markers, 4 combinations are possible + for(int fc = 0; fc < 4; fc++) { + double distSq = 0; + for(int c = 0; c < 4; c++) { + // modC is the corner considering first corner is fc + int modC = (c + fc) % 4; + distSq += (candidatesIn[i][modC].x - candidatesIn[j][c].x) * + (candidatesIn[i][modC].x - candidatesIn[j][c].x) + + (candidatesIn[i][modC].y - candidatesIn[j][c].y) * + (candidatesIn[i][modC].y - candidatesIn[j][c].y); + } + distSq /= 4.; + + // if mean square distance is too low, remove the smaller one of the two markers + double minMarkerDistancePixels = double(minimumPerimeter) * minMarkerDistanceRate; + if(distSq < minMarkerDistancePixels * minMarkerDistancePixels) { + isSingleContour = false; + // i and j are not related to a group + if(candGroup[i]<0 && candGroup[j]<0){ + // mark candidates with their corresponding group number + candGroup[i] = candGroup[j] = (int)groupedCandidates.size(); + + // create group + vector grouped; + grouped.push_back(i); + grouped.push_back(j); + groupedCandidates.push_back( grouped ); + } + // i is related to a group + else if(candGroup[i] > -1 && candGroup[j] == -1){ + int group = candGroup[i]; + candGroup[j] = group; + + // add to group + groupedCandidates[group].push_back( j ); + } + // j is related to a group + else if(candGroup[j] > -1 && candGroup[i] == -1){ + int group = candGroup[j]; + candGroup[i] = group; + + // add to group + groupedCandidates[group].push_back( i ); + } + } + } + } + if (isSingleContour && candGroup[i] < 0) + { + candGroup[i] = (int)groupedCandidates.size(); + vector grouped; + grouped.push_back(i); + grouped.push_back(i); // step "save possible candidates" require minimum 2 elements + groupedCandidates.push_back(grouped); + } + } + + // save possible candidates + candidatesSetOut.clear(); + contoursSetOut.clear(); + + vector< vector< Point2f > > biggerCandidates; + vector< vector< Point > > biggerContours; + vector< vector< Point2f > > smallerCandidates; + vector< vector< Point > > smallerContours; + + // save possible candidates + for(unsigned int i = 0; i < groupedCandidates.size(); i++) { + unsigned int smallerIdx = groupedCandidates[i][0]; + unsigned int biggerIdx = smallerIdx; + double smallerArea = contourArea(candidatesIn[smallerIdx]); + double biggerArea = smallerArea; + + // evaluate group elements + for(unsigned int j = 1; j < groupedCandidates[i].size(); j++) { + unsigned int currIdx = groupedCandidates[i][j]; + double currArea = contourArea(candidatesIn[currIdx]); + + // check if current contour is bigger + if(currArea >= biggerArea) { + biggerIdx = currIdx; + biggerArea = currArea; + } + + // check if current contour is smaller + if(currArea < smallerArea && detectInvertedMarker) { + smallerIdx = currIdx; + smallerArea = currArea; + } + } + + // add contours and candidates + biggerCandidates.push_back(candidatesIn[biggerIdx]); + biggerContours.push_back(contoursIn[biggerIdx]); + if(detectInvertedMarker) { + smallerCandidates.push_back(alignContourOrder(candidatesIn[biggerIdx][0], candidatesIn[smallerIdx])); + smallerContours.push_back(contoursIn[smallerIdx]); + } + } + // to preserve the structure :: candidateSet< defaultCandidates, whiteCandidates > + // default candidates + candidatesSetOut.push_back(biggerCandidates); + contoursSetOut.push_back(biggerContours); + // white candidates + candidatesSetOut.push_back(smallerCandidates); + contoursSetOut.push_back(smallerContours); +} + +/** + * @brief Initial steps on finding square candidates + */ +static void _detectInitialCandidates(const Mat &grey, vector< vector< Point2f > > &candidates, + vector< vector< Point > > &contours, + const Ptr ¶ms) { + + CV_Assert(params->adaptiveThreshWinSizeMin >= 3 && params->adaptiveThreshWinSizeMax >= 3); + CV_Assert(params->adaptiveThreshWinSizeMax >= params->adaptiveThreshWinSizeMin); + CV_Assert(params->adaptiveThreshWinSizeStep > 0); + + // number of window sizes (scales) to apply adaptive thresholding + int nScales = (params->adaptiveThreshWinSizeMax - params->adaptiveThreshWinSizeMin) / + params->adaptiveThreshWinSizeStep + 1; + + vector< vector< vector< Point2f > > > candidatesArrays((size_t) nScales); + vector< vector< vector< Point > > > contoursArrays((size_t) nScales); + + ////for each value in the interval of thresholding window sizes + parallel_for_(Range(0, nScales), [&](const Range& range) { + const int begin = range.start; + const int end = range.end; + + for (int i = begin; i < end; i++) { + int currScale = params->adaptiveThreshWinSizeMin + i * params->adaptiveThreshWinSizeStep; + // threshold + Mat thresh; + _threshold(grey, thresh, currScale, params->adaptiveThreshConstant); + + // detect rectangles + _findMarkerContours(thresh, candidatesArrays[i], contoursArrays[i], + params->minMarkerPerimeterRate, params->maxMarkerPerimeterRate, + params->polygonalApproxAccuracyRate, params->minCornerDistanceRate, + params->minDistanceToBorder, params->minSideLengthCanonicalImg); + } + }); + // join candidates + for(int i = 0; i < nScales; i++) { + for(unsigned int j = 0; j < candidatesArrays[i].size(); j++) { + candidates.push_back(candidatesArrays[i][j]); + contours.push_back(contoursArrays[i][j]); + } + } +} + + +/** + * @brief Detect square candidates in the input image + */ +static void _detectCandidates(InputArray _grayImage, vector< vector< vector< Point2f > > >& candidatesSetOut, + vector< vector< vector< Point > > >& contoursSetOut, const Ptr &_params) { + Mat grey = _grayImage.getMat(); + CV_DbgAssert(grey.total() != 0); + CV_DbgAssert(grey.type() == CV_8UC1); + + /// 1. DETECT FIRST SET OF CANDIDATES + vector< vector< Point2f > > candidates; + vector< vector< Point > > contours; + _detectInitialCandidates(grey, candidates, contours, _params); + /// 2. SORT CORNERS + _reorderCandidatesCorners(candidates); + + /// 3. FILTER OUT NEAR CANDIDATE PAIRS + // save the outter/inner border (i.e. potential candidates) + _filterTooCloseCandidates(candidates, candidatesSetOut, contours, contoursSetOut, + _params->minMarkerDistanceRate, _params->detectInvertedMarker); +} + + +/** + * @brief Given an input image and candidate corners, extract the bits of the candidate, including + * the border bits + */ +static Mat _extractBits(InputArray _image, const vector& corners, int markerSize, + int markerBorderBits, int cellSize, double cellMarginRate, double minStdDevOtsu) { + CV_Assert(_image.getMat().channels() == 1); + CV_Assert(corners.size() == 4ull); + CV_Assert(markerBorderBits > 0 && cellSize > 0 && cellMarginRate >= 0 && cellMarginRate <= 1); + CV_Assert(minStdDevOtsu >= 0); + + // number of bits in the marker + int markerSizeWithBorders = markerSize + 2 * markerBorderBits; + int cellMarginPixels = int(cellMarginRate * cellSize); + + Mat resultImg; // marker image after removing perspective + int resultImgSize = markerSizeWithBorders * cellSize; + Mat resultImgCorners(4, 1, CV_32FC2); + resultImgCorners.ptr< Point2f >(0)[0] = Point2f(0, 0); + resultImgCorners.ptr< Point2f >(0)[1] = Point2f((float)resultImgSize - 1, 0); + resultImgCorners.ptr< Point2f >(0)[2] = + Point2f((float)resultImgSize - 1, (float)resultImgSize - 1); + resultImgCorners.ptr< Point2f >(0)[3] = Point2f(0, (float)resultImgSize - 1); + + // remove perspective + Mat transformation = getPerspectiveTransform(corners, resultImgCorners); + warpPerspective(_image, resultImg, transformation, Size(resultImgSize, resultImgSize), + INTER_NEAREST); + + // output image containing the bits + Mat bits(markerSizeWithBorders, markerSizeWithBorders, CV_8UC1, Scalar::all(0)); + + // check if standard deviation is enough to apply Otsu + // if not enough, it probably means all bits are the same color (black or white) + Mat mean, stddev; + // Remove some border just to avoid border noise from perspective transformation + Mat innerRegion = resultImg.colRange(cellSize / 2, resultImg.cols - cellSize / 2) + .rowRange(cellSize / 2, resultImg.rows - cellSize / 2); + meanStdDev(innerRegion, mean, stddev); + if(stddev.ptr< double >(0)[0] < minStdDevOtsu) { + // all black or all white, depending on mean value + if(mean.ptr< double >(0)[0] > 127) + bits.setTo(1); + else + bits.setTo(0); + return bits; + } + + // now extract code, first threshold using Otsu + threshold(resultImg, resultImg, 125, 255, THRESH_BINARY | THRESH_OTSU); + + // for each cell + for(int y = 0; y < markerSizeWithBorders; y++) { + for(int x = 0; x < markerSizeWithBorders; x++) { + int Xstart = x * (cellSize) + cellMarginPixels; + int Ystart = y * (cellSize) + cellMarginPixels; + Mat square = resultImg(Rect(Xstart, Ystart, cellSize - 2 * cellMarginPixels, + cellSize - 2 * cellMarginPixels)); + // count white pixels on each cell to assign its value + size_t nZ = (size_t) countNonZero(square); + if(nZ > square.total() / 2) bits.at< unsigned char >(y, x) = 1; + } + } + + return bits; +} + + + +/** + * @brief Return number of erroneous bits in border, i.e. number of white bits in border. + */ +static int _getBorderErrors(const Mat &bits, int markerSize, int borderSize) { + + int sizeWithBorders = markerSize + 2 * borderSize; + + CV_Assert(markerSize > 0 && bits.cols == sizeWithBorders && bits.rows == sizeWithBorders); + + int totalErrors = 0; + for(int y = 0; y < sizeWithBorders; y++) { + for(int k = 0; k < borderSize; k++) { + if(bits.ptr< unsigned char >(y)[k] != 0) totalErrors++; + if(bits.ptr< unsigned char >(y)[sizeWithBorders - 1 - k] != 0) totalErrors++; + } + } + for(int x = borderSize; x < sizeWithBorders - borderSize; x++) { + for(int k = 0; k < borderSize; k++) { + if(bits.ptr< unsigned char >(k)[x] != 0) totalErrors++; + if(bits.ptr< unsigned char >(sizeWithBorders - 1 - k)[x] != 0) totalErrors++; + } + } + return totalErrors; +} + + +/** + * @brief Tries to identify one candidate given the dictionary + * @return candidate typ. zero if the candidate is not valid, + * 1 if the candidate is a black candidate (default candidate) + * 2 if the candidate is a white candidate + */ +static uint8_t _identifyOneCandidate(const Ptr& dictionary, InputArray _image, + const vector& _corners, int& idx, + const Ptr& params, int& rotation, + const float scale = 1.f) { + CV_DbgAssert(_corners.size() == 4); + CV_DbgAssert(_image.getMat().total() != 0); + CV_DbgAssert(params->markerBorderBits > 0); + uint8_t typ=1; + // get bits + // scale corners to the correct size to search on the corresponding image pyramid + vector scaled_corners(4); + for (int i = 0; i < 4; ++i) { + scaled_corners[i].x = _corners[i].x * scale; + scaled_corners[i].y = _corners[i].y * scale; + } + + Mat candidateBits = + _extractBits(_image, scaled_corners, dictionary->markerSize, params->markerBorderBits, + params->perspectiveRemovePixelPerCell, + params->perspectiveRemoveIgnoredMarginPerCell, params->minOtsuStdDev); + + // analyze border bits + int maximumErrorsInBorder = + int(dictionary->markerSize * dictionary->markerSize * params->maxErroneousBitsInBorderRate); + int borderErrors = + _getBorderErrors(candidateBits, dictionary->markerSize, params->markerBorderBits); + + // check if it is a white marker + if(params->detectInvertedMarker){ + // to get from 255 to 1 + Mat invertedImg = ~candidateBits-254; + int invBError = _getBorderErrors(invertedImg, dictionary->markerSize, params->markerBorderBits); + // white marker + if(invBError maximumErrorsInBorder) return 0; // border is wrong + + // take only inner bits + Mat onlyBits = + candidateBits.rowRange(params->markerBorderBits, + candidateBits.rows - params->markerBorderBits) + .colRange(params->markerBorderBits, candidateBits.cols - params->markerBorderBits); + + // try to indentify the marker + if(!dictionary->identify(onlyBits, idx, rotation, params->errorCorrectionRate)) + return 0; + + return typ; +} + +/** + * @brief rotate the initial corner to get to the right position + */ +static void correctCornerPosition( vector< Point2f >& _candidate, int rotate){ + std::rotate(_candidate.begin(), _candidate.begin() + 4 - rotate, _candidate.end()); +} + +static size_t _findOptPyrImageForCanonicalImg( + const std::vector& img_pyr, + const int scaled_width, + const int cur_perimeter, + const int min_perimeter) { + CV_Assert(scaled_width > 0); + size_t optLevel = 0; + float dist = std::numeric_limits::max(); + for (size_t i = 0; i < img_pyr.size(); ++i) { + const float scale = img_pyr[i].cols / static_cast(scaled_width); + const float perimeter_scaled = cur_perimeter * scale; + // instead of std::abs() favor the larger pyramid level by checking if the distance is postive + // will slow down the algorithm but find more corners in the end + const float new_dist = perimeter_scaled - min_perimeter; + if (new_dist < dist && new_dist > 0.f) { + dist = new_dist; + optLevel = i; + } + } + return optLevel; +} + +/** + * @brief Identify square candidates according to a marker dictionary + */ + +static void _identifyCandidates(InputArray grey, + const std::vector& image_pyr, + vector< vector< vector< Point2f > > >& _candidatesSet, + vector< vector< vector > >& _contoursSet, const Ptr &_dictionary, + vector< vector< Point2f > >& _accepted, vector< vector >& _contours, vector< int >& ids, + const Ptr ¶ms, + OutputArrayOfArrays _rejected = noArray()) { + CV_DbgAssert(grey.getMat().total() != 0); + CV_DbgAssert(grey.getMat().type() == CV_8UC1); + int ncandidates = (int)_candidatesSet[0].size(); + vector< vector< Point2f > > accepted; + vector< vector< Point2f > > rejected; + vector< vector< Point > > contours; + + vector< int > idsTmp(ncandidates, -1); + vector< int > rotated(ncandidates, 0); + vector< uint8_t > validCandidates(ncandidates, 0); + + //// Analyze each of the candidates + parallel_for_(Range(0, ncandidates), [&](const Range &range) { + const int begin = range.start; + const int end = range.end; + + vector< vector< Point2f > >& candidates = params->detectInvertedMarker ? _candidatesSet[1] : _candidatesSet[0]; + vector< vector< Point > >& contourS = params->detectInvertedMarker ? _contoursSet[1] : _contoursSet[0]; + + for(int i = begin; i < end; i++) { + int currId = -1; + // implements equation (4) + if (params->useAruco3Detection) { + const int perimeterOfContour = static_cast(contourS[i].size()); + const int min_perimeter = params->minSideLengthCanonicalImg * 4; + const size_t nearestImgId = _findOptPyrImageForCanonicalImg(image_pyr, grey.cols(), perimeterOfContour, min_perimeter); + const float scale = image_pyr[nearestImgId].cols / static_cast(grey.cols()); + + validCandidates[i] = _identifyOneCandidate(_dictionary, image_pyr[nearestImgId], candidates[i], currId, params, rotated[i], scale); + } + else { + validCandidates[i] = _identifyOneCandidate(_dictionary, grey, candidates[i], currId, params, rotated[i]); + } + + if(validCandidates[i] > 0) + idsTmp[i] = currId; + } + }); + + for(int i = 0; i < ncandidates; i++) { + if(validCandidates[i] > 0) { + // to choose the right set of candidates :: 0 for default, 1 for white markers + uint8_t set = validCandidates[i]-1; + + // shift corner positions to the correct rotation + correctCornerPosition(_candidatesSet[set][i], rotated[i]); + + if( !params->detectInvertedMarker && validCandidates[i] == 2 ) + continue; + + // add valid candidate + accepted.push_back(_candidatesSet[set][i]); + ids.push_back(idsTmp[i]); + + contours.push_back(_contoursSet[set][i]); + + } else { + rejected.push_back(_candidatesSet[0][i]); + } + } + + // parse output + _accepted = accepted; + + _contours= contours; + + if(_rejected.needed()) { + _copyVector2Output(rejected, _rejected); + } +} + +/** + * Line fitting A * B = C :: Called from function refineCandidateLines + * @param nContours, contour-container + */ +static Point3f _interpolate2Dline(const std::vector& nContours){ + CV_Assert(nContours.size() >= 2); + float minX, minY, maxX, maxY; + minX = maxX = nContours[0].x; + minY = maxY = nContours[0].y; + + for(unsigned int i = 0; i< nContours.size(); i++){ + minX = nContours[i].x < minX ? nContours[i].x : minX; + minY = nContours[i].y < minY ? nContours[i].y : minY; + maxX = nContours[i].x > maxX ? nContours[i].x : maxX; + maxY = nContours[i].y > maxY ? nContours[i].y : maxY; + } + + Mat A = Mat::ones((int)nContours.size(), 2, CV_32F); // Coefficient Matrix (N x 2) + Mat B((int)nContours.size(), 1, CV_32F); // Variables Matrix (N x 1) + Mat C; // Constant + + if(maxX - minX > maxY - minY){ + for(unsigned int i =0; i < nContours.size(); i++){ + A.at(i,0)= nContours[i].x; + B.at(i,0)= nContours[i].y; + } + + solve(A, B, C, DECOMP_NORMAL); + + return Point3f(C.at(0, 0), -1., C.at(1, 0)); + } + else{ + for(unsigned int i =0; i < nContours.size(); i++){ + A.at(i,0)= nContours[i].y; + B.at(i,0)= nContours[i].x; + } + + solve(A, B, C, DECOMP_NORMAL); + + return Point3f(-1., C.at(0, 0), C.at(1, 0)); + } + +} + +/** + * Find the Point where the lines crosses :: Called from function refineCandidateLines + * @param nLine1 + * @param nLine2 + * @return Crossed Point + */ +static Point2f _getCrossPoint(Point3f nLine1, Point3f nLine2){ + Matx22f A(nLine1.x, nLine1.y, nLine2.x, nLine2.y); + Vec2f B(-nLine1.z, -nLine2.z); + return Vec2f(A.solve(B).val); +} + +/** + * Refine Corners using the contour vector :: Called from function detectMarkers + * @param nContours, contour-container + * @param nCorners, candidate Corners + * @param camMatrix, cameraMatrix input 3x3 floating-point camera matrix + * @param distCoeff, distCoeffs vector of distortion coefficient + */ +static void _refineCandidateLines(std::vector& nContours, std::vector& nCorners){ + vector contour2f(nContours.begin(), nContours.end()); + /* 5 groups :: to group the edges + * 4 - classified by its corner + * extra group - (temporary) if contours do not begin with a corner + */ + vector cntPts[5]; + int cornerIndex[4]={-1}; + int group=4; + + for ( unsigned int i =0; i < nContours.size(); i++ ) { + for(unsigned int j=0; j<4; j++){ + if ( nCorners[j] == contour2f[i] ){ + cornerIndex[j] = i; + group=j; + } + } + cntPts[group].push_back(contour2f[i]); + } + for (int i = 0; i < 4; i++) + { + CV_Assert(cornerIndex[i] != -1); + } + // saves extra group into corresponding + if( !cntPts[4].empty() ){ + for( unsigned int i=0; i < cntPts[4].size() ; i++ ) + cntPts[group].push_back(cntPts[4].at(i)); + cntPts[4].clear(); + } + + //Evaluate contour direction :: using the position of the detected corners + int inc=1; + + inc = ( (cornerIndex[0] > cornerIndex[1]) && (cornerIndex[3] > cornerIndex[0]) ) ? -1:inc; + inc = ( (cornerIndex[2] > cornerIndex[3]) && (cornerIndex[1] > cornerIndex[2]) ) ? -1:inc; + + // calculate the line :: who passes through the grouped points + Point3f lines[4]; + for(int i=0; i<4; i++){ + lines[i]=_interpolate2Dline(cntPts[i]); + } + + /* + * calculate the corner :: where the lines crosses to each other + * clockwise direction no clockwise direction + * 0 1 + * .---. 1 .---. 2 + * | | | | + * 3 .___. 0 .___. + * 2 3 + */ + for(int i=0; i < 4; i++){ + if(inc<0) + nCorners[i] = _getCrossPoint(lines[ i ], lines[ (i+1)%4 ]); // 01 12 23 30 + else + nCorners[i] = _getCrossPoint(lines[ i ], lines[ (i+3)%4 ]); // 30 01 12 23 + } +} + +static inline void findCornerInPyrImage(const float scale_init, const int closest_pyr_image_idx, + const std::vector& grey_pyramid, Mat corners, + const Ptr& params) { + // scale them to the closest pyramid level + if (scale_init != 1.f) + corners *= scale_init; // scale_init * scale_pyr + for (int idx = closest_pyr_image_idx - 1; idx >= 0; --idx) { + // scale them to new pyramid level + corners *= 2.f; // *= scale_pyr; + // use larger win size for larger images + const int subpix_win_size = std::max(grey_pyramid[idx].cols, grey_pyramid[idx].rows) > 1080 ? 5 : 3; + cornerSubPix(grey_pyramid[idx], corners, + Size(subpix_win_size, subpix_win_size), + Size(-1, -1), + TermCriteria(TermCriteria::MAX_ITER | TermCriteria::EPS, + params->cornerRefinementMaxIterations, + params->cornerRefinementMinAccuracy)); + } +} + +void ArucoDetector::detectMarkers(InputArray _image, OutputArrayOfArrays _corners, OutputArray _ids, + OutputArrayOfArrays _rejectedImgPoints) { + CV_Assert(!_image.empty()); + CV_Assert(params->markerBorderBits > 0); + // check that the parameters are set correctly if Aruco3 is used + CV_Assert(!(params->useAruco3Detection == true && + params->minSideLengthCanonicalImg == 0 && + params->minMarkerLengthRatioOriginalImg == 0.0)); + + Mat grey; + _convertToGrey(_image.getMat(), grey); + + // Aruco3 functionality is the extension of Aruco. + // The description can be found in: + // [1] Speeded up detection of squared fiducial markers, 2018, FJ Romera-Ramirez et al. + // if Aruco3 functionality if not wanted + // change some parameters to be sure to turn it off + if (!params->useAruco3Detection) { + params->minMarkerLengthRatioOriginalImg = 0.0; + params->minSideLengthCanonicalImg = 0; + } + else { + // always turn on corner refinement in case of Aruco3, due to upsampling + params->cornerRefinementMethod = CORNER_REFINE_SUBPIX; + // only CORNER_REFINE_SUBPIX implement correctly for useAruco3Detection + // Todo: update other CORNER_REFINE methods + } + + /// Step 0: equation (2) from paper [1] + const float fxfy = (!params->useAruco3Detection ? 1.f : params->minSideLengthCanonicalImg / + (params->minSideLengthCanonicalImg + std::max(grey.cols, grey.rows)*params->minMarkerLengthRatioOriginalImg)); + + /// Step 1: create image pyramid. Section 3.4. in [1] + std::vector grey_pyramid; + int closest_pyr_image_idx = 0, num_levels = 0; + //// Step 1.1: resize image with equation (1) from paper [1] + if (params->useAruco3Detection) { + const float scale_pyr = 2.f; + const float img_area = static_cast(grey.rows*grey.cols); + const float min_area_marker = static_cast(params->minSideLengthCanonicalImg*params->minSideLengthCanonicalImg); + // find max level + num_levels = static_cast(log2(img_area / min_area_marker)/scale_pyr); + // the closest pyramid image to the downsampled segmentation image + // will later be used as start index for corner upsampling + const float scale_img_area = img_area * fxfy * fxfy; + closest_pyr_image_idx = cvRound(log2(img_area / scale_img_area)/scale_pyr); + } + cv::buildPyramid(grey, grey_pyramid, num_levels); + + // resize to segmentation image + // in this reduces size the contours will be detected + if (fxfy != 1.f) + cv::resize(grey, grey, cv::Size(cvRound(fxfy * grey.cols), cvRound(fxfy * grey.rows))); + + /// STEP 2: Detect marker candidates + vector< vector< Point2f > > candidates; + vector< vector< Point > > contours; + vector< int > ids; + + vector< vector< vector< Point2f > > > candidatesSet; + vector< vector< vector< Point > > > contoursSet; + + /// STEP 2.a Detect marker candidates :: using AprilTag + if(params->cornerRefinementMethod == CORNER_REFINE_APRILTAG){ + _apriltag(grey, params, candidates, contours); + + candidatesSet.push_back(candidates); + contoursSet.push_back(contours); + } + /// STEP 2.b Detect marker candidates :: traditional way + else + _detectCandidates(grey, candidatesSet, contoursSet, params); + + /// STEP 2: Check candidate codification (identify markers) + _identifyCandidates(grey, grey_pyramid, candidatesSet, contoursSet, dictionary, + candidates, contours, ids, params, _rejectedImgPoints); + + /// STEP 3: Corner refinement :: use corner subpix + if( params->cornerRefinementMethod == CORNER_REFINE_SUBPIX ) { + CV_Assert(params->cornerRefinementWinSize > 0 && params->cornerRefinementMaxIterations > 0 && + params->cornerRefinementMinAccuracy > 0); + // Do subpixel estimation. In Aruco3 start on the lowest pyramid level and upscale the corners + parallel_for_(Range(0, (int)candidates.size()), [&](const Range& range) { + const int begin = range.start; + const int end = range.end; + + for (int i = begin; i < end; i++) { + if (params->useAruco3Detection) { + const float scale_init = (float) grey_pyramid[closest_pyr_image_idx].cols / grey.cols; + findCornerInPyrImage(scale_init, closest_pyr_image_idx, grey_pyramid, Mat(candidates[i]), params); + } + else + cornerSubPix(grey, Mat(candidates[i]), + Size(params->cornerRefinementWinSize, params->cornerRefinementWinSize), + Size(-1, -1), + TermCriteria(TermCriteria::MAX_ITER | TermCriteria::EPS, + params->cornerRefinementMaxIterations, + params->cornerRefinementMinAccuracy)); + } + }); + } + + /// STEP 3, Optional : Corner refinement :: use contour container + if( params->cornerRefinementMethod == CORNER_REFINE_CONTOUR){ + + if(! _ids.empty()){ + + // do corner refinement using the contours for each detected markers + parallel_for_(Range(0, (int)candidates.size()), [&](const Range& range) { + for (int i = range.start; i < range.end; i++) { + _refineCandidateLines(contours[i], candidates[i]); + } + }); + } + } + + if (params->cornerRefinementMethod != CORNER_REFINE_SUBPIX && fxfy != 1.f) { + // only CORNER_REFINE_SUBPIX implement correctly for useAruco3Detection + // Todo: update other CORNER_REFINE methods + + // scale to orignal size, this however will lead to inaccurate detections! + for (auto &vecPoints : candidates) + for (auto &point : vecPoints) + point *= 1.f/fxfy; + } + + // copy to output arrays + _copyVector2Output(candidates, _corners); + Mat(ids).copyTo(_ids); +} + +/** + * Project board markers that are not included in the list of detected markers + */ +static void _projectUndetectedMarkers(const Ptr &_board, InputOutputArrayOfArrays _detectedCorners, + InputOutputArray _detectedIds, InputArray _cameraMatrix, InputArray _distCoeffs, + vector >& _undetectedMarkersProjectedCorners, + OutputArray _undetectedMarkersIds) { + // first estimate board pose with the current avaible markers + Mat rvec, tvec; + int boardDetectedMarkers = aruco::estimatePoseBoard(_detectedCorners, _detectedIds, _board, + _cameraMatrix, _distCoeffs, rvec, tvec); + + // at least one marker from board so rvec and tvec are valid + if(boardDetectedMarkers == 0) return; + + // search undetected markers and project them using the previous pose + vector > undetectedCorners; + vector undetectedIds; + for(unsigned int i = 0; i < _board->ids.size(); i++) { + int foundIdx = -1; + for(unsigned int j = 0; j < _detectedIds.total(); j++) { + if(_board->ids[i] == _detectedIds.getMat().ptr< int >()[j]) { + foundIdx = j; + break; + } + } + + // not detected + if(foundIdx == -1) { + undetectedCorners.push_back(vector()); + undetectedIds.push_back(_board->ids[i]); + projectPoints(_board->objPoints[i], rvec, tvec, _cameraMatrix, _distCoeffs, + undetectedCorners.back()); + } + } + // parse output + Mat(undetectedIds).copyTo(_undetectedMarkersIds); + _undetectedMarkersProjectedCorners = undetectedCorners; +} + +/** + * Interpolate board markers that are not included in the list of detected markers using + * global homography + */ +static void _projectUndetectedMarkers(const Ptr &_board, InputOutputArrayOfArrays _detectedCorners, + InputOutputArray _detectedIds, + vector >& _undetectedMarkersProjectedCorners, + OutputArray _undetectedMarkersIds) { + // check board points are in the same plane, if not, global homography cannot be applied + CV_Assert(_board->objPoints.size() > 0); + CV_Assert(_board->objPoints[0].size() > 0); + float boardZ = _board->objPoints[0][0].z; + for(unsigned int i = 0; i < _board->objPoints.size(); i++) { + for(unsigned int j = 0; j < _board->objPoints[i].size(); j++) + CV_Assert(boardZ == _board->objPoints[i][j].z); + } + + vector detectedMarkersObj2DAll; // Object coordinates (without Z) of all the detected + // marker corners in a single vector + vector imageCornersAll; // Image corners of all detected markers in a single vector + vector > undetectedMarkersObj2D; // Object coordinates (without Z) of all + // missing markers in different vectors + vector undetectedMarkersIds; // ids of missing markers + // find markers included in board, and missing markers from board. Fill the previous vectors + for(unsigned int j = 0; j < _board->ids.size(); j++) { + bool found = false; + for(unsigned int i = 0; i < _detectedIds.total(); i++) { + if(_detectedIds.getMat().ptr< int >()[i] == _board->ids[j]) { + for(int c = 0; c < 4; c++) { + imageCornersAll.push_back(_detectedCorners.getMat(i).ptr< Point2f >()[c]); + detectedMarkersObj2DAll.push_back( + Point2f(_board->objPoints[j][c].x, _board->objPoints[j][c].y)); + } + found = true; + break; + } + } + if(!found) { + undetectedMarkersObj2D.push_back(vector()); + for(int c = 0; c < 4; c++) { + undetectedMarkersObj2D.back().push_back( + Point2f(_board->objPoints[j][c].x, _board->objPoints[j][c].y)); + } + undetectedMarkersIds.push_back(_board->ids[j]); + } + } + if(imageCornersAll.size() == 0) return; + + // get homography from detected markers + Mat transformation = findHomography(detectedMarkersObj2DAll, imageCornersAll); + + _undetectedMarkersProjectedCorners.resize(undetectedMarkersIds.size()); + + // for each undetected marker, apply transformation + for(unsigned int i = 0; i < undetectedMarkersObj2D.size(); i++) { + perspectiveTransform(undetectedMarkersObj2D[i], _undetectedMarkersProjectedCorners[i], transformation); + } + Mat(undetectedMarkersIds).copyTo(_undetectedMarkersIds); +} + + +void ArucoDetector::refineDetectedMarkers(InputArray _image, const Ptr &_board, + InputOutputArrayOfArrays _detectedCorners, InputOutputArray _detectedIds, + InputOutputArrayOfArrays _rejectedCorners, InputArray _cameraMatrix, + InputArray _distCoeffs, OutputArray _recoveredIdxs) { + CV_Assert(refineParams->minRepDistance > 0); + + if(_detectedIds.total() == 0 || _rejectedCorners.total() == 0) return; + + // get projections of missing markers in the board + vector< vector< Point2f > > undetectedMarkersCorners; + vector< int > undetectedMarkersIds; + if(_cameraMatrix.total() != 0) { + // reproject based on camera projection model + _projectUndetectedMarkers(_board, _detectedCorners, _detectedIds, _cameraMatrix, _distCoeffs, + undetectedMarkersCorners, undetectedMarkersIds); + + } else { + // reproject based on global homography + _projectUndetectedMarkers(_board, _detectedCorners, _detectedIds, undetectedMarkersCorners, + undetectedMarkersIds); + } + + // list of missing markers indicating if they have been assigned to a candidate + vector< bool > alreadyIdentified(_rejectedCorners.total(), false); + + // maximum bits that can be corrected + int maxCorrectionRecalculated = + int(double(dictionary->maxCorrectionBits) * refineParams->errorCorrectionRate); + + Mat grey; + _convertToGrey(_image, grey); + + // vector of final detected marker corners and ids + vector > finalAcceptedCorners; + vector< int > finalAcceptedIds; + // fill with the current markers + finalAcceptedCorners.resize(_detectedCorners.total()); + finalAcceptedIds.resize(_detectedIds.total()); + for(unsigned int i = 0; i < _detectedIds.total(); i++) { + finalAcceptedCorners[i] = _detectedCorners.getMat(i).clone(); + finalAcceptedIds[i] = _detectedIds.getMat().ptr< int >()[i]; + } + vector< int > recoveredIdxs; // original indexes of accepted markers in _rejectedCorners + + // for each missing marker, try to find a correspondence + for(unsigned int i = 0; i < undetectedMarkersIds.size(); i++) { + + // best match at the moment + int closestCandidateIdx = -1; + double closestCandidateDistance = refineParams->minRepDistance * refineParams->minRepDistance + 1; + Mat closestRotatedMarker; + + for(unsigned int j = 0; j < _rejectedCorners.total(); j++) { + if(alreadyIdentified[j]) continue; + + // check distance + double minDistance = closestCandidateDistance + 1; + bool valid = false; + int validRot = 0; + for(int c = 0; c < 4; c++) { // first corner in rejected candidate + double currentMaxDistance = 0; + for(int k = 0; k < 4; k++) { + Point2f rejCorner = _rejectedCorners.getMat(j).ptr< Point2f >()[(c + k) % 4]; + Point2f distVector = undetectedMarkersCorners[i][k] - rejCorner; + double cornerDist = distVector.x * distVector.x + distVector.y * distVector.y; + currentMaxDistance = max(currentMaxDistance, cornerDist); + } + // if distance is better than current best distance + if(currentMaxDistance < closestCandidateDistance) { + valid = true; + validRot = c; + minDistance = currentMaxDistance; + } + if(!refineParams->checkAllOrders) break; + } + + if(!valid) continue; + + // apply rotation + Mat rotatedMarker; + if(refineParams->checkAllOrders) { + rotatedMarker = Mat(4, 1, CV_32FC2); + for(int c = 0; c < 4; c++) + rotatedMarker.ptr< Point2f >()[c] = + _rejectedCorners.getMat(j).ptr< Point2f >()[(c + 4 + validRot) % 4]; + } + else rotatedMarker = _rejectedCorners.getMat(j); + + // last filter, check if inner code is close enough to the assigned marker code + int codeDistance = 0; + // if errorCorrectionRate, dont check code + if(refineParams->errorCorrectionRate >= 0) { + + // extract bits + Mat bits = _extractBits( + grey, rotatedMarker, dictionary->markerSize, params->markerBorderBits, + params->perspectiveRemovePixelPerCell, + params->perspectiveRemoveIgnoredMarginPerCell, params->minOtsuStdDev); + + Mat onlyBits = + bits.rowRange(params->markerBorderBits, bits.rows - params->markerBorderBits) + .colRange(params->markerBorderBits, bits.rows - params->markerBorderBits); + + codeDistance = + dictionary->getDistanceToId(onlyBits, undetectedMarkersIds[i], false); + } + + // if everythin is ok, assign values to current best match + if(refineParams->errorCorrectionRate < 0 || codeDistance < maxCorrectionRecalculated) { + closestCandidateIdx = j; + closestCandidateDistance = minDistance; + closestRotatedMarker = rotatedMarker; + } + } + + // if at least one good match, we have rescue the missing marker + if(closestCandidateIdx >= 0) { + + // subpixel refinement + if(params->cornerRefinementMethod == CORNER_REFINE_SUBPIX) { + CV_Assert(params->cornerRefinementWinSize > 0 && + params->cornerRefinementMaxIterations > 0 && + params->cornerRefinementMinAccuracy > 0); + cornerSubPix(grey, closestRotatedMarker, + Size(params->cornerRefinementWinSize, params->cornerRefinementWinSize), + Size(-1, -1), TermCriteria(TermCriteria::MAX_ITER | TermCriteria::EPS, + params->cornerRefinementMaxIterations, + params->cornerRefinementMinAccuracy)); + } + + // remove from rejected + alreadyIdentified[closestCandidateIdx] = true; + + // add to detected + finalAcceptedCorners.push_back(closestRotatedMarker); + finalAcceptedIds.push_back(undetectedMarkersIds[i]); + + // add the original index of the candidate + recoveredIdxs.push_back(closestCandidateIdx); + } + } + + // parse output + if(finalAcceptedIds.size() != _detectedIds.total()) { + // parse output + Mat(finalAcceptedIds).copyTo(_detectedIds); + _copyVector2Output(finalAcceptedCorners, _detectedCorners); + + // recalculate _rejectedCorners based on alreadyIdentified + vector > finalRejected; + for(unsigned int i = 0; i < alreadyIdentified.size(); i++) { + if(!alreadyIdentified[i]) { + finalRejected.push_back(_rejectedCorners.getMat(i).clone()); + } + } + _copyVector2Output(finalRejected, _rejectedCorners); + + if(_recoveredIdxs.needed()) { + Mat(recoveredIdxs).copyTo(_recoveredIdxs); + } + } +} + + +void drawDetectedMarkers(InputOutputArray _image, InputArrayOfArrays _corners, + InputArray _ids, Scalar borderColor) { + CV_Assert(_image.getMat().total() != 0 && + (_image.getMat().channels() == 1 || _image.getMat().channels() == 3)); + CV_Assert((_corners.total() == _ids.total()) || _ids.total() == 0); + + // calculate colors + Scalar textColor, cornerColor; + textColor = cornerColor = borderColor; + swap(textColor.val[0], textColor.val[1]); // text color just sawp G and R + swap(cornerColor.val[1], cornerColor.val[2]); // corner color just sawp G and B + + int nMarkers = (int)_corners.total(); + for(int i = 0; i < nMarkers; i++) { + Mat currentMarker = _corners.getMat(i); + CV_Assert(currentMarker.total() == 4 && currentMarker.type() == CV_32FC2); + + // draw marker sides + for(int j = 0; j < 4; j++) { + Point2f p0, p1; + p0 = currentMarker.ptr< Point2f >(0)[j]; + p1 = currentMarker.ptr< Point2f >(0)[(j + 1) % 4]; + line(_image, p0, p1, borderColor, 1); + } + // draw first corner mark + rectangle(_image, currentMarker.ptr< Point2f >(0)[0] - Point2f(3, 3), + currentMarker.ptr< Point2f >(0)[0] + Point2f(3, 3), cornerColor, 1, LINE_AA); + + // draw ID + if(_ids.total() != 0) { + Point2f cent(0, 0); + for(int p = 0; p < 4; p++) + cent += currentMarker.ptr< Point2f >(0)[p]; + cent = cent / 4.; + stringstream s; + s << "id=" << _ids.getMat().ptr< int >(0)[i]; + putText(_image, s.str(), cent, FONT_HERSHEY_SIMPLEX, 0.5, textColor, 2); + } + } +} + +void drawMarker(const Ptr &dictionary, int id, int sidePixels, OutputArray _img, int borderBits) { + dictionary->drawMarker(id, sidePixels, _img, borderBits); +} + +} +} diff --git a/modules/aruco/src/aruco_utils.cpp b/modules/aruco/src/aruco_utils.cpp new file mode 100644 index 00000000000..8d680848caa --- /dev/null +++ b/modules/aruco/src/aruco_utils.cpp @@ -0,0 +1,50 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html + +#include "aruco_utils.hpp" +#include + +namespace cv { +namespace aruco { +using namespace std; + +void _copyVector2Output(std::vector > &vec, OutputArrayOfArrays out, const float scale) { + out.create((int)vec.size(), 1, CV_32FC2); + if(out.isMatVector()) { + for (unsigned int i = 0; i < vec.size(); i++) { + out.create(4, 1, CV_32FC2, i); + Mat &m = out.getMatRef(i); + Mat(Mat(vec[i]).t()*scale).copyTo(m); + } + } + else if(out.isUMatVector()) { + for (unsigned int i = 0; i < vec.size(); i++) { + out.create(4, 1, CV_32FC2, i); + UMat &m = out.getUMatRef(i); + Mat(Mat(vec[i]).t()*scale).copyTo(m); + } + } + else if(out.kind() == _OutputArray::STD_VECTOR_VECTOR){ + for (unsigned int i = 0; i < vec.size(); i++) { + out.create(4, 1, CV_32FC2, i); + Mat m = out.getMat(i); + Mat(Mat(vec[i]).t()*scale).copyTo(m); + } + } + else { + CV_Error(cv::Error::StsNotImplemented, + "Only Mat vector, UMat vector, and vector OutputArrays are currently supported."); + } +} + +void _convertToGrey(InputArray _in, OutputArray _out) { + CV_Assert(_in.type() == CV_8UC1 || _in.type() == CV_8UC3); + if(_in.type() == CV_8UC3) + cvtColor(_in, _out, COLOR_BGR2GRAY); + else + _in.copyTo(_out); +} + +} +} diff --git a/modules/aruco/src/aruco_utils.hpp b/modules/aruco/src/aruco_utils.hpp new file mode 100644 index 00000000000..029cd3fa81d --- /dev/null +++ b/modules/aruco/src/aruco_utils.hpp @@ -0,0 +1,44 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html +#ifndef __OPENCV_ARUCO_UTILS_HPP__ +#define __OPENCV_ARUCO_UTILS_HPP__ + +#include +#include + +namespace cv { +namespace aruco { + +/** + * @brief Copy the contents of a corners vector to an OutputArray, settings its size. + */ +void _copyVector2Output(std::vector > &vec, OutputArrayOfArrays out, const float scale = 1.f); + +/** + * @brief Convert input image to gray if it is a 3-channels image + */ +void _convertToGrey(InputArray _in, OutputArray _out); + +template +inline bool readParameter(const std::string& name, T& parameter, const FileNode& node) +{ + if (!node.empty() && !node[name].empty()) { + node[name] >> parameter; + return true; + } + return false; +} + +template +inline bool readWriteParameter(const std::string& name, T& parameter, const Ptr readNode = nullptr, + const Ptr writeStorage = nullptr) { + if (!readNode.empty()) + return readParameter(name, parameter, *readNode); + *writeStorage << name << parameter; + return true; +} + +} +} +#endif diff --git a/modules/aruco/src/board.cpp b/modules/aruco/src/board.cpp new file mode 100644 index 00000000000..d680e3b0843 --- /dev/null +++ b/modules/aruco/src/board.cpp @@ -0,0 +1,381 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html + +#include +#include +#include + +namespace cv { +namespace aruco { +using namespace std; + +/** + * @brief Implementation of drawPlanarBoard that accepts a raw Board pointer. + */ +static void _drawPlanarBoardImpl(Board *_board, Size outSize, OutputArray _img, int marginSize, int borderBits) { + CV_Assert(!outSize.empty()); + CV_Assert(marginSize >= 0); + + _img.create(outSize, CV_8UC1); + Mat out = _img.getMat(); + out.setTo(Scalar::all(255)); + out.adjustROI(-marginSize, -marginSize, -marginSize, -marginSize); + + // calculate max and min values in XY plane + CV_Assert(_board->objPoints.size() > 0); + float minX, maxX, minY, maxY; + minX = maxX = _board->objPoints[0][0].x; + minY = maxY = _board->objPoints[0][0].y; + + for(unsigned int i = 0; i < _board->objPoints.size(); i++) { + for(int j = 0; j < 4; j++) { + minX = min(minX, _board->objPoints[i][j].x); + maxX = max(maxX, _board->objPoints[i][j].x); + minY = min(minY, _board->objPoints[i][j].y); + maxY = max(maxY, _board->objPoints[i][j].y); + } + } + + float sizeX = maxX - minX; + float sizeY = maxY - minY; + + // proportion transformations + float xReduction = sizeX / float(out.cols); + float yReduction = sizeY / float(out.rows); + + // determine the zone where the markers are placed + if(xReduction > yReduction) { + int nRows = int(sizeY / xReduction); + int rowsMargins = (out.rows - nRows) / 2; + out.adjustROI(-rowsMargins, -rowsMargins, 0, 0); + } else { + int nCols = int(sizeX / yReduction); + int colsMargins = (out.cols - nCols) / 2; + out.adjustROI(0, 0, -colsMargins, -colsMargins); + } + + // now paint each marker + Dictionary &dictionary = *(_board->dictionary); + Mat marker; + Point2f outCorners[3]; + Point2f inCorners[3]; + for(unsigned int m = 0; m < _board->objPoints.size(); m++) { + // transform corners to markerZone coordinates + for(int j = 0; j < 3; j++) { + Point2f pf = Point2f(_board->objPoints[m][j].x, _board->objPoints[m][j].y); + // move top left to 0, 0 + pf -= Point2f(minX, minY); + pf.x = pf.x / sizeX * float(out.cols); + pf.y = pf.y / sizeY * float(out.rows); + outCorners[j] = pf; + } + + // get marker + Size dst_sz(outCorners[2] - outCorners[0]); // assuming CCW order + dst_sz.width = dst_sz.height = std::min(dst_sz.width, dst_sz.height); //marker should be square + dictionary.drawMarker(_board->ids[m], dst_sz.width, marker, borderBits); + + if((outCorners[0].y == outCorners[1].y) && (outCorners[1].x == outCorners[2].x)) { + // marker is aligned to image axes + marker.copyTo(out(Rect(outCorners[0], dst_sz))); + continue; + } + + // interpolate tiny marker to marker position in markerZone + inCorners[0] = Point2f(-0.5f, -0.5f); + inCorners[1] = Point2f(marker.cols - 0.5f, -0.5f); + inCorners[2] = Point2f(marker.cols - 0.5f, marker.rows - 0.5f); + + // remove perspective + Mat transformation = getAffineTransform(inCorners, outCorners); + warpAffine(marker, out, transformation, out.size(), INTER_LINEAR, + BORDER_TRANSPARENT); + } +} + +void drawPlanarBoard(const Ptr &_board, Size outSize, OutputArray _img, int marginSize, + int borderBits) { + _drawPlanarBoardImpl(_board, outSize, _img, marginSize, borderBits); +} + +Ptr Board::create(InputArrayOfArrays objPoints, const Ptr &dictionary, InputArray ids) { + CV_Assert(objPoints.total() == ids.total()); + CV_Assert(objPoints.type() == CV_32FC3 || objPoints.type() == CV_32FC1); + + std::vector > obj_points_vector; + Point3f rightBottomBorder = Point3f(0.f, 0.f, 0.f); + for (unsigned int i = 0; i < objPoints.total(); i++) { + std::vector corners; + Mat corners_mat = objPoints.getMat(i); + + if (corners_mat.type() == CV_32FC1) + corners_mat = corners_mat.reshape(3); + CV_Assert(corners_mat.total() == 4); + + for (int j = 0; j < 4; j++) { + const Point3f &corner = corners_mat.at(j); + corners.push_back(corner); + rightBottomBorder.x = std::max(rightBottomBorder.x, corner.x); + rightBottomBorder.y = std::max(rightBottomBorder.y, corner.y); + rightBottomBorder.z = std::max(rightBottomBorder.z, corner.z); + } + obj_points_vector.push_back(corners); + } + Ptr res = makePtr(); + ids.copyTo(res->ids); + res->objPoints = obj_points_vector; + res->dictionary = cv::makePtr(dictionary); + res->rightBottomBorder = rightBottomBorder; + return res; +} + +void Board::setIds(InputArray ids_) { + CV_Assert(objPoints.size() == ids_.total()); + ids_.copyTo(this->ids); +} + +Ptr GridBoard::create(int markersX, int markersY, float markerLength, float markerSeparation, + const Ptr &dictionary, int firstMarker) { + CV_Assert(markersX > 0 && markersY > 0 && markerLength > 0 && markerSeparation > 0); + Ptr res = makePtr(); + res->_markersX = markersX; + res->_markersY = markersY; + res->_markerLength = markerLength; + res->_markerSeparation = markerSeparation; + res->dictionary = dictionary; + + size_t totalMarkers = (size_t) markersX * markersY; + res->ids.resize(totalMarkers); + res->objPoints.reserve(totalMarkers); + + // fill ids with first identifiers + for (unsigned int i = 0; i < totalMarkers; i++) { + res->ids[i] = i + firstMarker; + } + + // calculate Board objPoints + for (int y = 0; y < markersY; y++) { + for (int x = 0; x < markersX; x++) { + vector corners(4); + corners[0] = Point3f(x * (markerLength + markerSeparation), + y * (markerLength + markerSeparation), 0); + corners[1] = corners[0] + Point3f(markerLength, 0, 0); + corners[2] = corners[0] + Point3f(markerLength, markerLength, 0); + corners[3] = corners[0] + Point3f(0, markerLength, 0); + res->objPoints.push_back(corners); + } + } + res->rightBottomBorder = Point3f(markersX * markerLength + markerSeparation * (markersX - 1), + markersY * markerLength + markerSeparation * (markersY - 1), 0.f); + return res; +} + +void GridBoard::draw(Size outSize, OutputArray _img, int marginSize, int borderBits) { + _drawPlanarBoardImpl((Board*)this, outSize, _img, marginSize, borderBits); +} + +void CharucoBoard::draw(Size outSize, OutputArray _img, int marginSize, int borderBits) { + CV_Assert(!outSize.empty()); + CV_Assert(marginSize >= 0); + + _img.create(outSize, CV_8UC1); + _img.setTo(255); + Mat out = _img.getMat(); + Mat noMarginsImg = + out.colRange(marginSize, out.cols - marginSize).rowRange(marginSize, out.rows - marginSize); + + double totalLengthX, totalLengthY; + totalLengthX = _squareLength * _squaresX; + totalLengthY = _squareLength * _squaresY; + + // proportional transformation + double xReduction = totalLengthX / double(noMarginsImg.cols); + double yReduction = totalLengthY / double(noMarginsImg.rows); + + // determine the zone where the chessboard is placed + Mat chessboardZoneImg; + if(xReduction > yReduction) { + int nRows = int(totalLengthY / xReduction); + int rowsMargins = (noMarginsImg.rows - nRows) / 2; + chessboardZoneImg = noMarginsImg.rowRange(rowsMargins, noMarginsImg.rows - rowsMargins); + } else { + int nCols = int(totalLengthX / yReduction); + int colsMargins = (noMarginsImg.cols - nCols) / 2; + chessboardZoneImg = noMarginsImg.colRange(colsMargins, noMarginsImg.cols - colsMargins); + } + + // determine the margins to draw only the markers + // take the minimum just to be sure + double squareSizePixels = min(double(chessboardZoneImg.cols) / double(_squaresX), + double(chessboardZoneImg.rows) / double(_squaresY)); + + double diffSquareMarkerLength = (_squareLength - _markerLength) / 2; + int diffSquareMarkerLengthPixels = + int(diffSquareMarkerLength * squareSizePixels / _squareLength); + + // draw markers + Mat markersImg; + _drawPlanarBoardImpl(this, chessboardZoneImg.size(), markersImg, diffSquareMarkerLengthPixels, borderBits); + markersImg.copyTo(chessboardZoneImg); + + // now draw black squares + for(int y = 0; y < _squaresY; y++) { + for(int x = 0; x < _squaresX; x++) { + + if(y % 2 != x % 2) continue; // white corner, dont do anything + + double startX, startY; + startX = squareSizePixels * double(x); + startY = squareSizePixels * double(y); + + Mat squareZone = chessboardZoneImg.rowRange(int(startY), int(startY + squareSizePixels)) + .colRange(int(startX), int(startX + squareSizePixels)); + + squareZone.setTo(0); + } + } +} + +Ptr CharucoBoard::create(int squaresX, int squaresY, float squareLength, + float markerLength, const Ptr &dictionary) { + CV_Assert(squaresX > 1 && squaresY > 1 && markerLength > 0 && squareLength > markerLength); + Ptr res = makePtr(); + + res->_squaresX = squaresX; + res->_squaresY = squaresY; + res->_squareLength = squareLength; + res->_markerLength = markerLength; + res->dictionary = dictionary; + + float diffSquareMarkerLength = (squareLength - markerLength) / 2; + // calculate Board objPoints + for(int y = 0; y < squaresY; y++) { + for(int x = 0; x < squaresX; x++) { + + if(y % 2 == x % 2) continue; // black corner, no marker here + + vector corners(4); + corners[0] = Point3f(x * squareLength + diffSquareMarkerLength, + y * squareLength + diffSquareMarkerLength, 0); + corners[1] = corners[0] + Point3f(markerLength, 0, 0); + corners[2] = corners[0] + Point3f(markerLength, markerLength, 0); + corners[3] = corners[0] + Point3f(0, markerLength, 0); + res->objPoints.push_back(corners); + // first ids in dictionary + int nextId = (int)res->ids.size(); + res->ids.push_back(nextId); + } + } + + // now fill chessboardCorners + for(int y = 0; y < squaresY - 1; y++) { + for(int x = 0; x < squaresX - 1; x++) { + Point3f corner; + corner.x = (x + 1) * squareLength; + corner.y = (y + 1) * squareLength; + corner.z = 0; + res->chessboardCorners.push_back(corner); + } + } + res->rightBottomBorder = Point3f(squaresX * squareLength, + squaresY * squareLength, 0.f); + res->_getNearestMarkerCorners(); + + return res; +} + +/** + * Fill nearestMarkerIdx and nearestMarkerCorners arrays + */ +void CharucoBoard::_getNearestMarkerCorners() { + nearestMarkerIdx.resize(chessboardCorners.size()); + nearestMarkerCorners.resize(chessboardCorners.size()); + + unsigned int nMarkers = (unsigned int)ids.size(); + unsigned int nCharucoCorners = (unsigned int)chessboardCorners.size(); + for(unsigned int i = 0; i < nCharucoCorners; i++) { + double minDist = -1; // distance of closest markers + Point3f charucoCorner = chessboardCorners[i]; + for(unsigned int j = 0; j < nMarkers; j++) { + // calculate distance from marker center to charuco corner + Point3f center = Point3f(0, 0, 0); + for(unsigned int k = 0; k < 4; k++) + center += objPoints[j][k]; + center /= 4.; + double sqDistance; + Point3f distVector = charucoCorner - center; + sqDistance = distVector.x * distVector.x + distVector.y * distVector.y; + if(j == 0 || fabs(sqDistance - minDist) < cv::pow(0.01 * _squareLength, 2)) { + // if same minimum distance (or first iteration), add to nearestMarkerIdx vector + nearestMarkerIdx[i].push_back(j); + minDist = sqDistance; + } else if(sqDistance < minDist) { + // if finding a closest marker to the charuco corner + nearestMarkerIdx[i].clear(); // remove any previous added marker + nearestMarkerIdx[i].push_back(j); // add the new closest marker index + minDist = sqDistance; + } + } + // for each of the closest markers, search the marker corner index closer + // to the charuco corner + for(unsigned int j = 0; j < nearestMarkerIdx[i].size(); j++) { + nearestMarkerCorners[i].resize(nearestMarkerIdx[i].size()); + double minDistCorner = -1; + for(unsigned int k = 0; k < 4; k++) { + double sqDistance; + Point3f distVector = charucoCorner - objPoints[nearestMarkerIdx[i][j]][k]; + sqDistance = distVector.x * distVector.x + distVector.y * distVector.y; + if(k == 0 || sqDistance < minDistCorner) { + // if this corner is closer to the charuco corner, assing its index + // to nearestMarkerCorners + minDistCorner = sqDistance; + nearestMarkerCorners[i][j] = k; + } + } + } + } +} + +bool testCharucoCornersCollinear(const Ptr &_board, InputArray _charucoIds) { + unsigned int nCharucoCorners = (unsigned int)_charucoIds.getMat().total(); + if (nCharucoCorners <= 2) + return true; + + // only test if there are 3 or more corners + CV_Assert( _board->chessboardCorners.size() >= _charucoIds.getMat().total()); + + Vec point0( _board->chessboardCorners[_charucoIds.getMat().at< int >(0)].x, + _board->chessboardCorners[_charucoIds.getMat().at< int >(0)].y, 1); + + Vec point1( _board->chessboardCorners[_charucoIds.getMat().at< int >(1)].x, + _board->chessboardCorners[_charucoIds.getMat().at< int >(1)].y, 1); + + // create a line from the first two points. + Vec testLine = point0.cross(point1); + Vec testPoint(0, 0, 1); + + double divisor = sqrt(testLine[0]*testLine[0] + testLine[1]*testLine[1]); + CV_Assert(divisor != 0.0); + + // normalize the line with normal + testLine /= divisor; + + double dotProduct; + for (unsigned int i = 2; i < nCharucoCorners; i++){ + testPoint(0) = _board->chessboardCorners[_charucoIds.getMat().at< int >(i)].x; + testPoint(1) = _board->chessboardCorners[_charucoIds.getMat().at< int >(i)].y; + + // if testPoint is on testLine, dotProduct will be zero (or very, very close) + dotProduct = testPoint.dot(testLine); + + if (std::abs(dotProduct) > 1e-6){ + return false; + } + } + // no points found that were off of testLine, return true that all points collinear. + return true; +} + +} +} diff --git a/modules/aruco/src/charuco.cpp b/modules/aruco/src/charuco.cpp index b179bc63055..0aac51e3b14 100644 --- a/modules/aruco/src/charuco.cpp +++ b/modules/aruco/src/charuco.cpp @@ -1,40 +1,6 @@ -/* -By downloading, copying, installing or using the software you agree to this -license. If you do not agree to this license, do not download, install, -copy or use the software. - - License Agreement - For Open Source Computer Vision Library - (3-clause BSD License) - -Copyright (C) 2013, OpenCV Foundation, all rights reserved. -Third party copyrights are property of their respective owners. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the names of the copyright holders nor the names of the contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -This software is provided by the copyright holders and contributors "as is" and -any express or implied warranties, including, but not limited to, the implied -warranties of merchantability and fitness for a particular purpose are -disclaimed. In no event shall copyright holders or contributors be liable for -any direct, indirect, incidental, special, exemplary, or consequential damages -(including, but not limited to, procurement of substitute goods or services; -loss of use, data, or profits; or business interruption) however caused -and on any theory of liability, whether in contract, strict liability, -or tort (including negligence or otherwise) arising in any way out of -the use of this software, even if advised of the possibility of such damage. -*/ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html #include "precomp.hpp" #include "opencv2/aruco/charuco.hpp" @@ -45,187 +11,6 @@ namespace cv { namespace aruco { using namespace std; - - - -/** - */ -void CharucoBoard::draw(Size outSize, OutputArray _img, int marginSize, int borderBits) { - - CV_Assert(!outSize.empty()); - CV_Assert(marginSize >= 0); - - _img.create(outSize, CV_8UC1); - _img.setTo(255); - Mat out = _img.getMat(); - Mat noMarginsImg = - out.colRange(marginSize, out.cols - marginSize).rowRange(marginSize, out.rows - marginSize); - - double totalLengthX, totalLengthY; - totalLengthX = _squareLength * _squaresX; - totalLengthY = _squareLength * _squaresY; - - // proportional transformation - double xReduction = totalLengthX / double(noMarginsImg.cols); - double yReduction = totalLengthY / double(noMarginsImg.rows); - - // determine the zone where the chessboard is placed - Mat chessboardZoneImg; - if(xReduction > yReduction) { - int nRows = int(totalLengthY / xReduction); - int rowsMargins = (noMarginsImg.rows - nRows) / 2; - chessboardZoneImg = noMarginsImg.rowRange(rowsMargins, noMarginsImg.rows - rowsMargins); - } else { - int nCols = int(totalLengthX / yReduction); - int colsMargins = (noMarginsImg.cols - nCols) / 2; - chessboardZoneImg = noMarginsImg.colRange(colsMargins, noMarginsImg.cols - colsMargins); - } - - // determine the margins to draw only the markers - // take the minimum just to be sure - double squareSizePixels = min(double(chessboardZoneImg.cols) / double(_squaresX), - double(chessboardZoneImg.rows) / double(_squaresY)); - - double diffSquareMarkerLength = (_squareLength - _markerLength) / 2; - int diffSquareMarkerLengthPixels = - int(diffSquareMarkerLength * squareSizePixels / _squareLength); - - // draw markers - Mat markersImg; - aruco::_drawPlanarBoardImpl(this, chessboardZoneImg.size(), markersImg, - diffSquareMarkerLengthPixels, borderBits); - - markersImg.copyTo(chessboardZoneImg); - - // now draw black squares - for(int y = 0; y < _squaresY; y++) { - for(int x = 0; x < _squaresX; x++) { - - if(y % 2 != x % 2) continue; // white corner, dont do anything - - double startX, startY; - startX = squareSizePixels * double(x); - startY = squareSizePixels * double(y); - - Mat squareZone = chessboardZoneImg.rowRange(int(startY), int(startY + squareSizePixels)) - .colRange(int(startX), int(startX + squareSizePixels)); - - squareZone.setTo(0); - } - } -} - - - -/** - */ -Ptr CharucoBoard::create(int squaresX, int squaresY, float squareLength, - float markerLength, const Ptr &dictionary) { - - CV_Assert(squaresX > 1 && squaresY > 1 && markerLength > 0 && squareLength > markerLength); - Ptr res = makePtr(); - - res->_squaresX = squaresX; - res->_squaresY = squaresY; - res->_squareLength = squareLength; - res->_markerLength = markerLength; - res->dictionary = dictionary; - - float diffSquareMarkerLength = (squareLength - markerLength) / 2; - - // calculate Board objPoints - for(int y = 0; y < squaresY; y++) { - for(int x = 0; x < squaresX; x++) { - - if(y % 2 == x % 2) continue; // black corner, no marker here - - vector corners(4); - corners[0] = Point3f(x * squareLength + diffSquareMarkerLength, - y * squareLength + diffSquareMarkerLength, 0); - corners[1] = corners[0] + Point3f(markerLength, 0, 0); - corners[2] = corners[0] + Point3f(markerLength, markerLength, 0); - corners[3] = corners[0] + Point3f(0, markerLength, 0); - res->objPoints.push_back(corners); - // first ids in dictionary - int nextId = (int)res->ids.size(); - res->ids.push_back(nextId); - } - } - - // now fill chessboardCorners - for(int y = 0; y < squaresY - 1; y++) { - for(int x = 0; x < squaresX - 1; x++) { - Point3f corner; - corner.x = (x + 1) * squareLength; - corner.y = (y + 1) * squareLength; - corner.z = 0; - res->chessboardCorners.push_back(corner); - } - } - res->rightBottomBorder = Point3f(squaresX * squareLength, - squaresY * squareLength, 0.f); - res->_getNearestMarkerCorners(); - - return res; -} - - - -/** - * Fill nearestMarkerIdx and nearestMarkerCorners arrays - */ -void CharucoBoard::_getNearestMarkerCorners() { - - nearestMarkerIdx.resize(chessboardCorners.size()); - nearestMarkerCorners.resize(chessboardCorners.size()); - - unsigned int nMarkers = (unsigned int)ids.size(); - unsigned int nCharucoCorners = (unsigned int)chessboardCorners.size(); - for(unsigned int i = 0; i < nCharucoCorners; i++) { - double minDist = -1; // distance of closest markers - Point3f charucoCorner = chessboardCorners[i]; - for(unsigned int j = 0; j < nMarkers; j++) { - // calculate distance from marker center to charuco corner - Point3f center = Point3f(0, 0, 0); - for(unsigned int k = 0; k < 4; k++) - center += objPoints[j][k]; - center /= 4.; - double sqDistance; - Point3f distVector = charucoCorner - center; - sqDistance = distVector.x * distVector.x + distVector.y * distVector.y; - if(j == 0 || fabs(sqDistance - minDist) < cv::pow(0.01 * _squareLength, 2)) { - // if same minimum distance (or first iteration), add to nearestMarkerIdx vector - nearestMarkerIdx[i].push_back(j); - minDist = sqDistance; - } else if(sqDistance < minDist) { - // if finding a closest marker to the charuco corner - nearestMarkerIdx[i].clear(); // remove any previous added marker - nearestMarkerIdx[i].push_back(j); // add the new closest marker index - minDist = sqDistance; - } - } - - // for each of the closest markers, search the marker corner index closer - // to the charuco corner - for(unsigned int j = 0; j < nearestMarkerIdx[i].size(); j++) { - nearestMarkerCorners[i].resize(nearestMarkerIdx[i].size()); - double minDistCorner = -1; - for(unsigned int k = 0; k < 4; k++) { - double sqDistance; - Point3f distVector = charucoCorner - objPoints[nearestMarkerIdx[i][j]][k]; - sqDistance = distVector.x * distVector.x + distVector.y * distVector.y; - if(k == 0 || sqDistance < minDistCorner) { - // if this corner is closer to the charuco corner, assing its index - // to nearestMarkerCorners - minDistCorner = sqDistance; - nearestMarkerCorners[i][j] = k; - } - } - } - } -} - - /** * Remove charuco corners if any of their minMarkers closest markers has not been detected */ @@ -388,7 +173,6 @@ static void _getMaximumSubPixWindowSizes(InputArrayOfArrays markerCorners, Input } - /** * Interpolate charuco corners using approximated pose estimation */ @@ -432,7 +216,6 @@ static int _interpolateCornersCharucoApproxCalib(InputArrayOfArrays _markerCorne } - /** * Interpolate charuco corners using local homography */ @@ -524,9 +307,6 @@ static int _interpolateCornersCharucoLocalHom(InputArrayOfArrays _markerCorners, } - -/** - */ int interpolateCornersCharuco(InputArrayOfArrays _markerCorners, InputArray _markerIds, InputArray _image, const Ptr &_board, OutputArray _charucoCorners, OutputArray _charucoIds, @@ -550,9 +330,6 @@ int interpolateCornersCharuco(InputArrayOfArrays _markerCorners, InputArray _mar } - -/** - */ void drawDetectedCornersCharuco(InputOutputArray _image, InputArray _charucoCorners, InputArray _charucoIds, Scalar cornerColor) { @@ -580,128 +357,10 @@ void drawDetectedCornersCharuco(InputOutputArray _image, InputArray _charucoCorn } -/** - * Check if a set of 3d points are enough for calibration. Z coordinate is ignored. - * Only axis parallel lines are considered - */ -static bool _arePointsEnoughForPoseEstimation(const vector< Point3f > &points) { - - if(points.size() < 4) return false; - - vector< double > sameXValue; // different x values in points - vector< int > sameXCounter; // number of points with the x value in sameXValue - for(unsigned int i = 0; i < points.size(); i++) { - bool found = false; - for(unsigned int j = 0; j < sameXValue.size(); j++) { - if(sameXValue[j] == points[i].x) { - found = true; - sameXCounter[j]++; - } - } - if(!found) { - sameXValue.push_back(points[i].x); - sameXCounter.push_back(1); - } - } - - // count how many x values has more than 2 points - int moreThan2 = 0; - for(unsigned int i = 0; i < sameXCounter.size(); i++) { - if(sameXCounter[i] >= 2) moreThan2++; - } - - // if we have more than 1 two xvalues with more than 2 points, calibration is ok - if(moreThan2 > 1) - return true; - else - return false; -} - - -/** - */ -bool estimatePoseCharucoBoard(InputArray _charucoCorners, InputArray _charucoIds, - const Ptr &_board, InputArray _cameraMatrix, InputArray _distCoeffs, - InputOutputArray _rvec, InputOutputArray _tvec, bool useExtrinsicGuess) { - - CV_Assert((_charucoCorners.getMat().total() == _charucoIds.getMat().total())); - - // need, at least, 4 corners - if(_charucoIds.getMat().total() < 4) return false; - - vector< Point3f > objPoints; - objPoints.reserve(_charucoIds.getMat().total()); - for(unsigned int i = 0; i < _charucoIds.getMat().total(); i++) { - int currId = _charucoIds.getMat().at< int >(i); - CV_Assert(currId >= 0 && currId < (int)_board->chessboardCorners.size()); - objPoints.push_back(_board->chessboardCorners[currId]); - } - - // points need to be in different lines, check if detected points are enough - if(!_arePointsEnoughForPoseEstimation(objPoints)) return false; - - solvePnP(objPoints, _charucoCorners, _cameraMatrix, _distCoeffs, _rvec, _tvec, useExtrinsicGuess); - - return true; -} - - - - -/** - */ -double calibrateCameraCharuco(InputArrayOfArrays _charucoCorners, InputArrayOfArrays _charucoIds, - const Ptr &_board, Size imageSize, - InputOutputArray _cameraMatrix, InputOutputArray _distCoeffs, - OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs, - OutputArray _stdDeviationsIntrinsics, - OutputArray _stdDeviationsExtrinsics, - OutputArray _perViewErrors, - int flags, TermCriteria criteria) { - - CV_Assert(_charucoIds.total() > 0 && (_charucoIds.total() == _charucoCorners.total())); - - // Join object points of charuco corners in a single vector for calibrateCamera() function - vector< vector< Point3f > > allObjPoints; - allObjPoints.resize(_charucoIds.total()); - for(unsigned int i = 0; i < _charucoIds.total(); i++) { - unsigned int nCorners = (unsigned int)_charucoIds.getMat(i).total(); - CV_Assert(nCorners > 0 && nCorners == _charucoCorners.getMat(i).total()); - allObjPoints[i].reserve(nCorners); - - for(unsigned int j = 0; j < nCorners; j++) { - int pointId = _charucoIds.getMat(i).at< int >(j); - CV_Assert(pointId >= 0 && pointId < (int)_board->chessboardCorners.size()); - allObjPoints[i].push_back(_board->chessboardCorners[pointId]); - } - } - - return calibrateCamera(allObjPoints, _charucoCorners, imageSize, _cameraMatrix, _distCoeffs, - _rvecs, _tvecs, _stdDeviationsIntrinsics, _stdDeviationsExtrinsics, - _perViewErrors, flags, criteria); -} - - - -/** - */ -double calibrateCameraCharuco(InputArrayOfArrays _charucoCorners, InputArrayOfArrays _charucoIds, - const Ptr &_board, Size imageSize, - InputOutputArray _cameraMatrix, InputOutputArray _distCoeffs, - OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs, int flags, - TermCriteria criteria) { - return calibrateCameraCharuco(_charucoCorners, _charucoIds, _board, imageSize, _cameraMatrix, _distCoeffs, _rvecs, - _tvecs, noArray(), noArray(), noArray(), flags, criteria); -} - - -/** - */ void detectCharucoDiamond(InputArray _image, InputArrayOfArrays _markerCorners, InputArray _markerIds, float squareMarkerLengthRate, OutputArrayOfArrays _diamondCorners, OutputArray _diamondIds, InputArray _cameraMatrix, InputArray _distCoeffs, Ptr dictionary) { - CV_Assert(_markerIds.total() > 0 && _markerIds.total() == _markerCorners.total()); const float minRepDistanceRate = 1.302455f; @@ -768,10 +427,10 @@ void detectCharucoDiamond(InputArray _image, InputArrayOfArrays _markerCorners, // try to find the rest of markers in the diamond vector< int > acceptedIdxs; Ptr _b = _charucoDiamondLayout.staticCast(); - aruco::refineDetectedMarkers(grey, _b, - currentMarker, currentMarkerId, - candidates, noArray(), noArray(), minRepDistance, -1, false, - acceptedIdxs); + Ptr refineParameters = makePtr(minRepDistance, -1, false); + ArucoDetector detector(dictionary, DetectorParameters::create(), refineParameters); + detector.refineDetectedMarkers(grey, _b, currentMarker, currentMarkerId, candidates, noArray(), noArray(), + acceptedIdxs); // if found, we have a diamond if(currentMarker.size() == 4) { @@ -826,13 +485,8 @@ void detectCharucoDiamond(InputArray _image, InputArrayOfArrays _markerCorners, } - - -/** - */ void drawCharucoDiamond(const Ptr &dictionary, Vec4i ids, int squareLength, int markerLength, OutputArray _img, int marginSize, int borderBits) { - CV_Assert(squareLength > 0 && markerLength > 0 && squareLength > markerLength); CV_Assert(marginSize >= 0 && borderBits > 0); @@ -849,12 +503,8 @@ void drawCharucoDiamond(const Ptr &dictionary, Vec4i ids, int square } -/** - */ void drawDetectedDiamonds(InputOutputArray _image, InputArrayOfArrays _corners, InputArray _ids, Scalar borderColor) { - - CV_Assert(_image.getMat().total() != 0 && (_image.getMat().channels() == 1 || _image.getMat().channels() == 3)); CV_Assert((_corners.total() == _ids.total()) || _ids.total() == 0); @@ -895,59 +545,5 @@ void drawDetectedDiamonds(InputOutputArray _image, InputArrayOfArrays _corners, } } -/** - @param board layout of ChArUco board. - * @param image charucoIds list of identifiers for each corner in charucoCorners. - * @return bool value, 1 (true) for detected corners form a line, 0 for non-linear. - solvePnP will fail if the corners are collinear (true). - * Check that the set of charuco markers in _charucoIds does not identify a straight line on - the charuco board. Axis parallel, as well as diagonal and other straight lines detected. - */ - bool testCharucoCornersCollinear(const Ptr &_board, InputArray _charucoIds){ - - unsigned int nCharucoCorners = (unsigned int)_charucoIds.getMat().total(); - - if (nCharucoCorners <= 2) - return true; - - // only test if there are 3 or more corners - CV_Assert( _board->chessboardCorners.size() >= _charucoIds.getMat().total()); - - Vec point0( _board->chessboardCorners[_charucoIds.getMat().at< int >(0)].x, - _board->chessboardCorners[_charucoIds.getMat().at< int >(0)].y, - 1); - - Vec point1( _board->chessboardCorners[_charucoIds.getMat().at< int >(1)].x, - _board->chessboardCorners[_charucoIds.getMat().at< int >(1)].y, - 1); - - // create a line from the first two points. - Vec testLine = point0.cross(point1); - - Vec testPoint(0, 0, 1); - - double divisor = sqrt(testLine[0]*testLine[0] + testLine[1]*testLine[1]); - - CV_Assert( divisor != 0); - - // normalize the line with normal - testLine /= divisor; - - double dotProduct; - for (unsigned int i = 2; i < nCharucoCorners; i++){ - testPoint(0) = _board->chessboardCorners[_charucoIds.getMat().at< int >(i)].x; - testPoint(1) = _board->chessboardCorners[_charucoIds.getMat().at< int >(i)].y; - - // if testPoint is on testLine, dotProduct will be zero (or very, very close) - dotProduct = testPoint.dot(testLine); - - if (std::abs(dotProduct) > 1e-6){ - return false; - } - } - - // no points found that were off of testLine, return true that all points collinear. - return true; -} } } diff --git a/modules/aruco/src/dictionary.cpp b/modules/aruco/src/dictionary.cpp index 20d2526a47d..5672538f293 100644 --- a/modules/aruco/src/dictionary.cpp +++ b/modules/aruco/src/dictionary.cpp @@ -1,57 +1,22 @@ -/* -By downloading, copying, installing or using the software you agree to this -license. If you do not agree to this license, do not download, install, -copy or use the software. - - License Agreement - For Open Source Computer Vision Library - (3-clause BSD License) - -Copyright (C) 2013, OpenCV Foundation, all rights reserved. -Third party copyrights are property of their respective owners. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the names of the copyright holders nor the names of the contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -This software is provided by the copyright holders and contributors "as is" and -any express or implied warranties, including, but not limited to, the implied -warranties of merchantability and fitness for a particular purpose are -disclaimed. In no event shall copyright holders or contributors be liable for -any direct, indirect, incidental, special, exemplary, or consequential damages -(including, but not limited to, procurement of substitute goods or services; -loss of use, data, or profits; or business interruption) however caused -and on any theory of liability, whether in contract, strict liability, -or tort (including negligence or otherwise) arising in any way out of -the use of this software, even if advised of the possibility of such damage. -*/ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html -#include "precomp.hpp" -#include "opencv2/aruco/dictionary.hpp" -#include #include -#include "predefined_dictionaries.hpp" -#include "predefined_dictionaries_apriltag.hpp" #include "opencv2/core/hal/hal.hpp" +#include "precomp.hpp" +#include "aruco_utils.hpp" +#include "predefined_dictionaries.hpp" +#include "apriltag/predefined_dictionaries_apriltag.hpp" +#include + namespace cv { namespace aruco { using namespace std; -/** - */ Dictionary::Dictionary(const Ptr &_dictionary) { markerSize = _dictionary->markerSize; maxCorrectionBits = _dictionary->maxCorrectionBits; @@ -59,8 +24,6 @@ Dictionary::Dictionary(const Ptr &_dictionary) { } -/** - */ Dictionary::Dictionary(const Mat &_bytesList, int _markerSize, int _maxcorr) { markerSize = _markerSize; maxCorrectionBits = _maxcorr; @@ -68,54 +31,40 @@ Dictionary::Dictionary(const Mat &_bytesList, int _markerSize, int _maxcorr) { } -/** - */ Ptr Dictionary::create(int nMarkers, int markerSize, int randomSeed) { const Ptr baseDictionary = makePtr(); return create(nMarkers, markerSize, baseDictionary, randomSeed); } -/** - */ Ptr Dictionary::create(int nMarkers, int markerSize, const Ptr &baseDictionary, int randomSeed) { - return generateCustomDictionary(nMarkers, markerSize, baseDictionary, randomSeed); } -template -static inline bool readParameter(const FileNode& node, T& parameter) -{ - if (!node.empty()) { - node >> parameter; - return true; - } - return false; -} -bool Dictionary::readDictionary(const cv::FileNode& fn) -{ +bool Dictionary::readDictionary(const cv::FileNode& fn) { int nMarkers = 0, _markerSize = 0; - if (fn.empty() || !readParameter(fn["nmarkers"], nMarkers) || !readParameter(fn["markersize"], _markerSize)) + if (fn.empty() || !readParameter("nmarkers", nMarkers, fn) || !readParameter("markersize", _markerSize, fn)) return false; Mat bytes(0, 0, CV_8UC1), marker(_markerSize, _markerSize, CV_8UC1); std::string markerString; for (int i = 0; i < nMarkers; i++) { std::ostringstream ostr; ostr << i; - if (!readParameter(fn["marker_" + ostr.str()], markerString)) + if (!readParameter("marker_" + ostr.str(), markerString, fn)) return false; for (int j = 0; j < (int) markerString.size(); j++) marker.at(j) = (markerString[j] == '0') ? 0 : 1; bytes.push_back(Dictionary::getByteListFromBits(marker)); } int _maxCorrectionBits = 0; - readParameter(fn["maxCorrectionBits"], _maxCorrectionBits); + readParameter("maxCorrectionBits", _maxCorrectionBits, fn); *this = Dictionary(bytes, _markerSize, _maxCorrectionBits); return true; } + void Dictionary::writeDictionary(Ptr& fs) { *fs << "nmarkers" << bytesList.rows; *fs << "markersize" << markerSize; @@ -133,18 +82,13 @@ void Dictionary::writeDictionary(Ptr& fs) { } } -/** - */ + Ptr Dictionary::get(int dict) { return getPredefinedDictionary(dict); } -/** - */ -bool Dictionary::identify(const Mat &onlyBits, int &idx, int &rotation, - double maxCorrectionRate) const { - +bool Dictionary::identify(const Mat &onlyBits, int &idx, int &rotation, double maxCorrectionRate) const { CV_Assert(onlyBits.rows == markerSize && onlyBits.cols == markerSize); int maxCorrectionRecalculed = int(double(maxCorrectionBits) * maxCorrectionRate); @@ -182,8 +126,6 @@ bool Dictionary::identify(const Mat &onlyBits, int &idx, int &rotation, } -/** - */ int Dictionary::getDistanceToId(InputArray bits, int id, bool allRotations) const { CV_Assert(id >= 0 && id < bytesList.rows); @@ -207,12 +149,7 @@ int Dictionary::getDistanceToId(InputArray bits, int id, bool allRotations) cons } - -/** - * @brief Draw a canonical marker image - */ void Dictionary::drawMarker(int id, int sidePixels, OutputArray _img, int borderBits) const { - CV_Assert(sidePixels >= (markerSize + 2*borderBits)); CV_Assert(id < bytesList.rows); CV_Assert(borderBits > 0); @@ -234,11 +171,6 @@ void Dictionary::drawMarker(int id, int sidePixels, OutputArray _img, int border } - - -/** - * @brief Transform matrix of bits to list of bytes in the 4 rotations - */ Mat Dictionary::getByteListFromBits(const Mat &bits) { // integer ceil int nbytes = (bits.cols * bits.rows + 8 - 1) / 8; @@ -277,10 +209,6 @@ Mat Dictionary::getByteListFromBits(const Mat &bits) { } - -/** - * @brief Transform list of bytes to matrix of bits - */ Mat Dictionary::getBitsFromByteList(const Mat &byteList, int markerSize) { CV_Assert(byteList.total() > 0 && byteList.total() >= (unsigned int)markerSize * markerSize / 8 && @@ -315,9 +243,7 @@ Mat Dictionary::getBitsFromByteList(const Mat &byteList, int markerSize) { } - -Ptr getPredefinedDictionary(PREDEFINED_DICTIONARY_NAME name) -{ +Ptr getPredefinedDictionary(PREDEFINED_DICTIONARY_NAME name) { // DictionaryData constructors calls // moved out of globals so construted on first use, which allows lazy-loading of opencv dll static const Dictionary DICT_ARUCO_DATA = Dictionary(Mat(1024, (5 * 5 + 7) / 8, CV_8UC4, (uchar*)DICT_ARUCO_BYTES), 5, 0); @@ -438,8 +364,7 @@ static int _getSelfDistance(const Mat &marker) { return minHamming; } -/** - */ + Ptr generateCustomDictionary(int nMarkers, int markerSize, const Ptr &baseDictionary, int randomSeed) { RNG rng((uint64)(randomSeed)); @@ -530,8 +455,6 @@ Ptr generateCustomDictionary(int nMarkers, int markerSize, } -/** - */ Ptr generateCustomDictionary(int nMarkers, int markerSize, int randomSeed) { Ptr baseDictionary = makePtr(); return generateCustomDictionary(nMarkers, markerSize, baseDictionary, randomSeed); diff --git a/modules/aruco/src/precomp.hpp b/modules/aruco/src/precomp.hpp index b45771a6644..955d9f3ed8a 100644 --- a/modules/aruco/src/precomp.hpp +++ b/modules/aruco/src/precomp.hpp @@ -1,43 +1,6 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// - // - // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. - // - // By downloading, copying, installing or using the software you agree to this license. - // If you do not agree to this license, do not download, install, - // copy or use the software. - // - // - // License Agreement - // For Open Source Computer Vision Library - // - // Copyright (C) 2014, OpenCV Foundation, all rights reserved. - // Third party copyrights are property of their respective owners. - // - // Redistribution and use in source and binary forms, with or without modification, - // are permitted provided that the following conditions are met: - // - // * Redistribution's of source code must retain the above copyright notice, - // this list of conditions and the following disclaimer. - // - // * Redistribution's in binary form must reproduce the above copyright notice, - // this list of conditions and the following disclaimer in the documentation - // and/or other materials provided with the distribution. - // - // * The name of the copyright holders may not be used to endorse or promote products - // derived from this software without specific prior written permission. - // - // This software is provided by the copyright holders and contributors "as is" and - // any express or implied warranties, including, but not limited to, the implied - // warranties of merchantability and fitness for a particular purpose are disclaimed. - // In no event shall the Intel Corporation or contributors be liable for any direct, - // indirect, incidental, special, exemplary, or consequential damages - // (including, but not limited to, procurement of substitute goods or services; - // loss of use, data, or profits; or business interruption) however caused - // and on any theory of liability, whether in contract, strict liability, - // or tort (including negligence or otherwise) arising in any way out of - // the use of this software, even if advised of the possibility of such damage. - // - //M*/ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html #ifndef __OPENCV_CCALIB_PRECOMP__ #define __OPENCV_CCALIB_PRECOMP__ diff --git a/modules/aruco/src/predefined_dictionaries.hpp b/modules/aruco/src/predefined_dictionaries.hpp index ce2db45947a..f3431830598 100644 --- a/modules/aruco/src/predefined_dictionaries.hpp +++ b/modules/aruco/src/predefined_dictionaries.hpp @@ -1,40 +1,6 @@ -/* -By downloading, copying, installing or using the software you agree to this -license. If you do not agree to this license, do not download, install, -copy or use the software. - - License Agreement - For Open Source Computer Vision Library - (3-clause BSD License) - -Copyright (C) 2013, OpenCV Foundation, all rights reserved. -Third party copyrights are property of their respective owners. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the names of the copyright holders nor the names of the contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -This software is provided by the copyright holders and contributors "as is" and -any express or implied warranties, including, but not limited to, the implied -warranties of merchantability and fitness for a particular purpose are -disclaimed. In no event shall copyright holders or contributors be liable for -any direct, indirect, incidental, special, exemplary, or consequential damages -(including, but not limited to, procurement of substitute goods or services; -loss of use, data, or profits; or business interruption) however caused -and on any theory of liability, whether in contract, strict liability, -or tort (including negligence or otherwise) arising in any way out of -the use of this software, even if advised of the possibility of such damage. -*/ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html namespace { diff --git a/modules/aruco/test/test_arucodetection.cpp b/modules/aruco/test/test_arucodetection.cpp index c2715cf2d51..78ec99cc054 100644 --- a/modules/aruco/test/test_arucodetection.cpp +++ b/modules/aruco/test/test_arucodetection.cpp @@ -57,8 +57,7 @@ CV_ArucoDetectionSimple::CV_ArucoDetectionSimple() {} void CV_ArucoDetectionSimple::run(int) { - - Ptr dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_250); + aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_6X6_250)); // 20 images for(int i = 0; i < 20; i++) { @@ -74,7 +73,7 @@ void CV_ArucoDetectionSimple::run(int) { for(int x = 0; x < 2; x++) { Mat marker; int id = i * 4 + y * 2 + x; - aruco::drawMarker(dictionary, id, markerSidePixels, marker); + aruco::drawMarker(detector.dictionary, id, markerSidePixels, marker); Point2f firstCorner = Point2f(markerSidePixels / 2.f + x * (1.5f * markerSidePixels), markerSidePixels / 2.f + y * (1.5f * markerSidePixels)); @@ -95,9 +94,8 @@ void CV_ArucoDetectionSimple::run(int) { // detect markers vector< vector< Point2f > > corners; vector< int > ids; - Ptr params = aruco::DetectorParameters::create(); - aruco::detectMarkers(img, dictionary, corners, ids, params); + detector.detectMarkers(img, corners, ids); // check detection results for(unsigned int m = 0; m < groundTruthIds.size(); m++) { @@ -277,7 +275,9 @@ void CV_ArucoDetectionPerspective::run(int) { cameraMatrix.at< double >(0, 0) = cameraMatrix.at< double >(1, 1) = 650; cameraMatrix.at< double >(0, 2) = imgSize.width / 2; cameraMatrix.at< double >(1, 2) = imgSize.height / 2; - Ptr dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_250); + Ptr params = aruco::DetectorParameters::create(); + params->minDistanceToBorder = 1; + aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_6X6_250), params); // detect from different positions for(double distance = 0.1; distance < 0.7; distance += 0.2) { @@ -288,13 +288,11 @@ void CV_ArucoDetectionPerspective::run(int) { iter++; vector< Point2f > groundTruthCorners; - Ptr params = aruco::DetectorParameters::create(); - params->minDistanceToBorder = 1; params->markerBorderBits = markerBorder; /// create synthetic image Mat img= - projectMarker(dictionary, currentId, cameraMatrix, deg2rad(yaw), deg2rad(pitch), + projectMarker(detector.dictionary, currentId, cameraMatrix, deg2rad(yaw), deg2rad(pitch), distance, imgSize, markerBorder, groundTruthCorners, szEnclosed); // marker :: Inverted if(ArucoAlgParams::DETECT_INVERTED_MARKER == arucoAlgParams){ @@ -314,7 +312,7 @@ void CV_ArucoDetectionPerspective::run(int) { // detect markers vector< vector< Point2f > > corners; vector< int > ids; - aruco::detectMarkers(img, dictionary, corners, ids, params); + detector.detectMarkers(img, corners, ids); // check results if(ids.size() != 1 || (ids.size() == 1 && ids[0] != currentId)) { @@ -360,8 +358,8 @@ CV_ArucoDetectionMarkerSize::CV_ArucoDetectionMarkerSize() {} void CV_ArucoDetectionMarkerSize::run(int) { - - Ptr dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_250); + Ptr params = aruco::DetectorParameters::create(); + aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_6X6_250), params); int markerSide = 20; int imageSize = 200; @@ -372,17 +370,16 @@ void CV_ArucoDetectionMarkerSize::run(int) { // create synthetic image Mat img = Mat(imageSize, imageSize, CV_8UC1, Scalar::all(255)); - aruco::drawMarker(dictionary, id, markerSide, marker); + aruco::drawMarker(detector.dictionary, id, markerSide, marker); Mat aux = img.colRange(30, 30 + markerSide).rowRange(50, 50 + markerSide); marker.copyTo(aux); vector< vector< Point2f > > corners; vector< int > ids; - Ptr params = aruco::DetectorParameters::create(); // set a invalid minMarkerPerimeterRate params->minMarkerPerimeterRate = min(4., (4. * markerSide) / float(imageSize) + 0.1); - aruco::detectMarkers(img, dictionary, corners, ids, params); + detector.detectMarkers(img, corners, ids); if(corners.size() != 0) { ts->printf(cvtest::TS::LOG, "Error in DetectorParameters::minMarkerPerimeterRate"); ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY); @@ -391,7 +388,7 @@ void CV_ArucoDetectionMarkerSize::run(int) { // set an valid minMarkerPerimeterRate params->minMarkerPerimeterRate = max(0., (4. * markerSide) / float(imageSize) - 0.1); - aruco::detectMarkers(img, dictionary, corners, ids, params); + detector.detectMarkers(img, corners, ids); if(corners.size() != 1 || (corners.size() == 1 && ids[0] != id)) { ts->printf(cvtest::TS::LOG, "Error in DetectorParameters::minMarkerPerimeterRate"); ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY); @@ -400,7 +397,7 @@ void CV_ArucoDetectionMarkerSize::run(int) { // set a invalid maxMarkerPerimeterRate params->maxMarkerPerimeterRate = min(4., (4. * markerSide) / float(imageSize) - 0.1); - aruco::detectMarkers(img, dictionary, corners, ids, params); + detector.detectMarkers(img, corners, ids); if(corners.size() != 0) { ts->printf(cvtest::TS::LOG, "Error in DetectorParameters::maxMarkerPerimeterRate"); ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY); @@ -409,7 +406,7 @@ void CV_ArucoDetectionMarkerSize::run(int) { // set an valid maxMarkerPerimeterRate params->maxMarkerPerimeterRate = max(0., (4. * markerSide) / float(imageSize) + 0.1); - aruco::detectMarkers(img, dictionary, corners, ids, params); + detector.detectMarkers(img, corners, ids); if(corners.size() != 1 || (corners.size() == 1 && ids[0] != id)) { ts->printf(cvtest::TS::LOG, "Error in DetectorParameters::maxMarkerPerimeterRate"); ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY); @@ -436,30 +433,32 @@ CV_ArucoBitCorrection::CV_ArucoBitCorrection() {} void CV_ArucoBitCorrection::run(int) { - Ptr _dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_250); - aruco::Dictionary &dictionary = *_dictionary; - aruco::Dictionary dictionary2 = *_dictionary; + Ptr _dictionary1 = aruco::getPredefinedDictionary(aruco::DICT_6X6_250); + Ptr _dictionary2 = aruco::getPredefinedDictionary(aruco::DICT_6X6_250); + aruco::Dictionary &dictionary1 = *_dictionary1; + aruco::Dictionary &dictionary2 = *_dictionary2; + Ptr params = aruco::DetectorParameters::create(); + aruco::ArucoDetector detector1(_dictionary1, params); int markerSide = 50; int imageSize = 150; - Ptr params = aruco::DetectorParameters::create(); // 10 markers for(int l = 0; l < 10; l++) { Mat marker; int id = 10 + l * 20; - Mat currentCodeBytes = dictionary.bytesList.rowRange(id, id + 1); + Mat currentCodeBytes = dictionary1.bytesList.rowRange(id, id + 1); // 5 valid cases for(int i = 0; i < 5; i++) { // how many bit errors (the error is low enough so it can be corrected) params->errorCorrectionRate = 0.2 + i * 0.1; int errors = - (int)std::floor(dictionary.maxCorrectionBits * params->errorCorrectionRate - 1.); + (int)std::floor(dictionary1.maxCorrectionBits * params->errorCorrectionRate - 1.); // create erroneous marker in currentCodeBits Mat currentCodeBits = - aruco::Dictionary::getBitsFromByteList(currentCodeBytes, dictionary.markerSize); + aruco::Dictionary::getBitsFromByteList(currentCodeBytes, dictionary1.markerSize); for(int e = 0; e < errors; e++) { currentCodeBits.ptr< unsigned char >()[2 * e] = !currentCodeBits.ptr< unsigned char >()[2 * e]; @@ -476,7 +475,7 @@ void CV_ArucoBitCorrection::run(int) { // try to detect using original dictionary vector< vector< Point2f > > corners; vector< int > ids; - aruco::detectMarkers(img, _dictionary, corners, ids, params); + detector1.detectMarkers(img, corners, ids); if(corners.size() != 1 || (corners.size() == 1 && ids[0] != id)) { ts->printf(cvtest::TS::LOG, "Error in bit correction"); ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY); @@ -489,11 +488,11 @@ void CV_ArucoBitCorrection::run(int) { // how many bit errors (the error is too high to be corrected) params->errorCorrectionRate = 0.2 + i * 0.1; int errors = - (int)std::floor(dictionary.maxCorrectionBits * params->errorCorrectionRate + 1.); + (int)std::floor(dictionary1.maxCorrectionBits * params->errorCorrectionRate + 1.); // create erroneous marker in currentCodeBits Mat currentCodeBits = - aruco::Dictionary::getBitsFromByteList(currentCodeBytes, dictionary.markerSize); + aruco::Dictionary::getBitsFromByteList(currentCodeBytes, dictionary1.markerSize); for(int e = 0; e < errors; e++) { currentCodeBits.ptr< unsigned char >()[2 * e] = !currentCodeBits.ptr< unsigned char >()[2 * e]; @@ -502,9 +501,9 @@ void CV_ArucoBitCorrection::run(int) { // dictionary3 is only composed by the modified marker (in its original form) Ptr _dictionary3 = makePtr( dictionary2.bytesList.rowRange(id, id + 1).clone(), - dictionary.markerSize, - dictionary.maxCorrectionBits); - + dictionary1.markerSize, + dictionary1.maxCorrectionBits); + aruco::ArucoDetector detector3(_dictionary3, params); // add erroneous marker to dictionary2 in order to create the erroneous marker image Mat currentCodeBytesError = aruco::Dictionary::getByteListFromBits(currentCodeBits); currentCodeBytesError.copyTo(dictionary2.bytesList.rowRange(id, id + 1)); @@ -516,7 +515,7 @@ void CV_ArucoBitCorrection::run(int) { // try to detect using dictionary3, it should fail vector< vector< Point2f > > corners; vector< int > ids; - aruco::detectMarkers(img, _dictionary3, corners, ids, params); + detector3.detectMarkers(img, corners, ids); if(corners.size() != 0) { ts->printf(cvtest::TS::LOG, "Error in DetectorParameters::errorCorrectionRate"); ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY); @@ -569,8 +568,7 @@ TEST(CV_ArucoTutorial, can_find_singlemarkersoriginal) { string img_path = cvtest::findDataFile("singlemarkersoriginal.jpg", false); Mat image = imread(img_path); - Ptr dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_250); - Ptr detectorParams = aruco::DetectorParameters::create(); + aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_6X6_250)); vector< int > ids; vector< vector< Point2f > > corners, rejected; @@ -584,7 +582,7 @@ TEST(CV_ArucoTutorial, can_find_singlemarkersoriginal) for (size_t i = 0; i < N; i++) mapGoldCorners[goldCornersIds[i]] = goldCorners[i]; - aruco::detectMarkers(image, dictionary, corners, ids, detectorParams, rejected); + detector.detectMarkers(image, corners, ids, rejected); ASSERT_EQ(N, ids.size()); for (size_t i = 0; i < N; i++) @@ -609,9 +607,10 @@ TEST(CV_ArucoTutorial, can_find_gboriginal) FileStorage fs(dictPath, FileStorage::READ); dictionary->aruco::Dictionary::readDictionary(fs.root()); // set marker from tutorial_dict.yml - Ptr detectorParams = aruco::DetectorParameters::create(); + aruco::ArucoDetector detector(dictionary, detectorParams); + vector< int > ids; vector< vector< Point2f > > corners, rejected; const size_t N = 35ull; @@ -638,7 +637,7 @@ TEST(CV_ArucoTutorial, can_find_gboriginal) for (int i = 0; i < static_cast(N); i++) mapGoldCorners[i] = goldCorners[i]; - aruco::detectMarkers(image, dictionary, corners, ids, detectorParams, rejected); + detector.detectMarkers(image, corners, ids, rejected); ASSERT_EQ(N, ids.size()); @@ -657,8 +656,7 @@ TEST(CV_ArucoTutorial, can_find_gboriginal) TEST(CV_ArucoDetectMarkers, regression_3192) { - Ptr dictionary = aruco::getPredefinedDictionary(aruco::DICT_4X4_50); - Ptr detectorParams = aruco::DetectorParameters::create(); + aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_4X4_50)); vector< int > markerIds; vector > markerCorners; string imgPath = cvtest::findDataFile("aruco/regression_3192.png"); @@ -670,7 +668,7 @@ TEST(CV_ArucoDetectMarkers, regression_3192) for (size_t i = 0; i < N; i++) mapGoldCorners[goldCornersIds[i]] = goldCorners[i]; - aruco::detectMarkers(image, dictionary, markerCorners, markerIds, detectorParams); + detector.detectMarkers(image, markerCorners, markerIds); ASSERT_EQ(N, markerIds.size()); for (size_t i = 0; i < N; i++) @@ -688,9 +686,8 @@ TEST(CV_ArucoDetectMarkers, regression_3192) TEST(CV_ArucoDetectMarkers, regression_2492) { - Ptr dictionary = aruco::getPredefinedDictionary(aruco::DICT_5X5_50); - Ptr detectorParams = aruco::DetectorParameters::create(); - detectorParams->minMarkerDistanceRate = 0.026; + aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_5X5_50)); + detector.params->minMarkerDistanceRate = 0.026; vector< int > markerIds; vector > markerCorners; string imgPath = cvtest::findDataFile("aruco/regression_2492.png"); @@ -705,7 +702,7 @@ TEST(CV_ArucoDetectMarkers, regression_2492) for (size_t i = 0; i < N; i++) mapGoldCorners[goldCornersIds[i]].push_back(goldCorners[i]); - aruco::detectMarkers(image, dictionary, markerCorners, markerIds, detectorParams); + detector.detectMarkers(image, markerCorners, markerIds); ASSERT_EQ(N, markerIds.size()); for (size_t i = 0; i < N; i++) @@ -746,11 +743,10 @@ struct ArucoThreading: public testing::TestWithParam params = cv::aruco::DetectorParameters::create(); // We are not testing against different dictionaries // As we are interested mostly in small images, smaller // markers is better -> 4x4 - cv::Ptr dictionary = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_4X4_50); + aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_4X4_50)); // Height of the test image can be chosen quite freely // We aim to test against small images as in those the @@ -762,19 +758,19 @@ TEST_P(ArucoThreading, number_of_threads_does_not_change_results) // Create a test image cv::Mat img_marker; - cv::aruco::drawMarker(dictionary, 23, height_marker, img_marker, 1); + cv::aruco::drawMarker(detector.dictionary, 23, height_marker, img_marker, 1); // Copy to bigger image to get a white border cv::Mat img(height_img, height_img, CV_8UC1, cv::Scalar(255)); img_marker.copyTo(img(cv::Rect(shift, shift, height_marker, height_marker))); - params->cornerRefinementMethod = GetParam(); + detector.params->cornerRefinementMethod = GetParam(); std::vector > original_corners; std::vector original_ids; { NumThreadsSetter thread_num_setter(1); - cv::aruco::detectMarkers(img, dictionary, original_corners, original_ids, params); + detector.detectMarkers(img, original_corners, original_ids); } ASSERT_EQ(original_ids.size(), 1ull); @@ -787,7 +783,7 @@ TEST_P(ArucoThreading, number_of_threads_does_not_change_results) std::vector > corners; std::vector ids; - cv::aruco::detectMarkers(img, dictionary, corners, ids, params); + detector.detectMarkers(img, corners, ids); // If we don't find any markers, the test is broken ASSERT_EQ(ids.size(), 1ull); diff --git a/modules/aruco/test/test_boarddetection.cpp b/modules/aruco/test/test_boarddetection.cpp index ed4ee161719..42ab71ce697 100644 --- a/modules/aruco/test/test_boarddetection.cpp +++ b/modules/aruco/test/test_boarddetection.cpp @@ -55,6 +55,8 @@ class CV_ArucoBoardPose : public cvtest::BaseTest { public: CV_ArucoBoardPose(ArucoAlgParams arucoAlgParams) { + Ptr params; + Ptr dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_250); params = aruco::DetectorParameters::create(); params->minDistanceToBorder = 3; if (arucoAlgParams == ArucoAlgParams::USE_ARUCO3) { @@ -63,10 +65,11 @@ class CV_ArucoBoardPose : public cvtest::BaseTest { params->minSideLengthCanonicalImg = 16; params->errorCorrectionRate = 0.8; } + detector = aruco::ArucoDetector(dictionary, params); } protected: - Ptr params; + aruco::ArucoDetector detector; void run(int); }; @@ -75,8 +78,7 @@ void CV_ArucoBoardPose::run(int) { int iter = 0; Mat cameraMatrix = Mat::eye(3, 3, CV_64FC1); Size imgSize(500, 500); - Ptr dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_250); - Ptr gridboard = aruco::GridBoard::create(3, 3, 0.02f, 0.005f, dictionary); + Ptr gridboard = aruco::GridBoard::create(3, 3, 0.02f, 0.005f, detector.dictionary); Ptr board = gridboard.staticCast(); cameraMatrix.at< double >(0, 0) = cameraMatrix.at< double >(1, 1) = 650; cameraMatrix.at< double >(0, 2) = imgSize.width / 2; @@ -96,8 +98,8 @@ void CV_ArucoBoardPose::run(int) { imgSize, markerBorder); vector< vector< Point2f > > corners; vector< int > ids; - params->markerBorderBits = markerBorder; - aruco::detectMarkers(img, dictionary, corners, ids, params); + detector.params->markerBorderBits = markerBorder; + detector.detectMarkers(img, corners, ids); ASSERT_EQ(ids.size(), gridboard->ids.size()); @@ -160,15 +162,18 @@ class CV_ArucoRefine : public cvtest::BaseTest { public: CV_ArucoRefine(ArucoAlgParams arucoAlgParams) { - params = aruco::DetectorParameters::create(); + Ptr dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_250); + Ptr params = aruco::DetectorParameters::create(); params->minDistanceToBorder = 3; params->cornerRefinementMethod = aruco::CORNER_REFINE_SUBPIX; if (arucoAlgParams == ArucoAlgParams::USE_ARUCO3) params->useAruco3Detection = true; + Ptr refineParams = makePtr(10, 3., true); + detector = aruco::ArucoDetector(dictionary, params, refineParams); } protected: - Ptr params; + aruco::ArucoDetector detector; void run(int); }; @@ -178,8 +183,7 @@ void CV_ArucoRefine::run(int) { int iter = 0; Mat cameraMatrix = Mat::eye(3, 3, CV_64FC1); Size imgSize(500, 500); - Ptr dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_250); - Ptr gridboard = aruco::GridBoard::create(3, 3, 0.02f, 0.005f, dictionary); + Ptr gridboard = aruco::GridBoard::create(3, 3, 0.02f, 0.005f, detector.dictionary); Ptr board = gridboard.staticCast(); cameraMatrix.at< double >(0, 0) = cameraMatrix.at< double >(1, 1) = 650; cameraMatrix.at< double >(0, 2) = imgSize.width / 2; @@ -201,8 +205,8 @@ void CV_ArucoRefine::run(int) { // detect markers vector< vector< Point2f > > corners, rejected; vector< int > ids; - params->markerBorderBits = markerBorder; - aruco::detectMarkers(img, dictionary, corners, ids, params, rejected); + detector.params->markerBorderBits = markerBorder; + detector.detectMarkers(img, corners, ids, rejected); // remove a marker from detection int markersBeforeDelete = (int)ids.size(); @@ -213,8 +217,8 @@ void CV_ArucoRefine::run(int) { ids.erase(ids.begin(), ids.begin() + 1); // try to refind the erased marker - aruco::refineDetectedMarkers(img, board, corners, ids, rejected, cameraMatrix, - distCoeffs, 10, 3., true, noArray(), params); + detector.refineDetectedMarkers(img, board, corners, ids, rejected, cameraMatrix, + distCoeffs, noArray()); // check result if((int)ids.size() < markersBeforeDelete) { diff --git a/modules/aruco/test/test_charucodetection.cpp b/modules/aruco/test/test_charucodetection.cpp index 91b4002d701..13ff37dad88 100644 --- a/modules/aruco/test/test_charucodetection.cpp +++ b/modules/aruco/test/test_charucodetection.cpp @@ -132,8 +132,10 @@ void CV_CharucoDetection::run(int) { int iter = 0; Mat cameraMatrix = Mat::eye(3, 3, CV_64FC1); Size imgSize(500, 500); - Ptr dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_250); - Ptr board = aruco::CharucoBoard::create(4, 4, 0.03f, 0.015f, dictionary); + Ptr params = aruco::DetectorParameters::create(); + params->minDistanceToBorder = 3; + aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_6X6_250), params); + Ptr board = aruco::CharucoBoard::create(4, 4, 0.03f, 0.015f, detector.dictionary); cameraMatrix.at< double >(0, 0) = cameraMatrix.at< double >(1, 1) = 600; cameraMatrix.at< double >(0, 2) = imgSize.width / 2; @@ -157,10 +159,9 @@ void CV_CharucoDetection::run(int) { // detect markers vector< vector< Point2f > > corners; vector< int > ids; - Ptr params = aruco::DetectorParameters::create(); - params->minDistanceToBorder = 3; - params->markerBorderBits = markerBorder; - aruco::detectMarkers(img, dictionary, corners, ids, params); + + detector.params->markerBorderBits = markerBorder; + detector.detectMarkers(img, corners, ids); if(ids.size() == 0) { ts->printf(cvtest::TS::LOG, "Marker detection failed"); @@ -237,8 +238,10 @@ void CV_CharucoPoseEstimation::run(int) { int iter = 0; Mat cameraMatrix = Mat::eye(3, 3, CV_64FC1); Size imgSize(500, 500); - Ptr dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_250); - Ptr board = aruco::CharucoBoard::create(4, 4, 0.03f, 0.015f, dictionary); + Ptr params = aruco::DetectorParameters::create(); + params->minDistanceToBorder = 3; + aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_6X6_250), params); + Ptr board = aruco::CharucoBoard::create(4, 4, 0.03f, 0.015f, detector.dictionary); cameraMatrix.at< double >(0, 0) = cameraMatrix.at< double >(1, 1) = 650; cameraMatrix.at< double >(0, 2) = imgSize.width / 2; @@ -261,10 +264,8 @@ void CV_CharucoPoseEstimation::run(int) { // detect markers vector< vector< Point2f > > corners; vector< int > ids; - Ptr params = aruco::DetectorParameters::create(); - params->minDistanceToBorder = 3; - params->markerBorderBits = markerBorder; - aruco::detectMarkers(img, dictionary, corners, ids, params); + detector.params->markerBorderBits = markerBorder; + detector.detectMarkers(img, corners, ids); ASSERT_EQ(ids.size(), board->ids.size()); @@ -348,11 +349,13 @@ void CV_CharucoDiamondDetection::run(int) { int iter = 0; Mat cameraMatrix = Mat::eye(3, 3, CV_64FC1); Size imgSize(500, 500); - Ptr dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_250); + Ptr params = aruco::DetectorParameters::create(); + params->minDistanceToBorder = 0; + aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_6X6_250), params); float squareLength = 0.03f; float markerLength = 0.015f; Ptr board = - aruco::CharucoBoard::create(3, 3, squareLength, markerLength, dictionary); + aruco::CharucoBoard::create(3, 3, squareLength, markerLength, detector.dictionary); cameraMatrix.at< double >(0, 0) = cameraMatrix.at< double >(1, 1) = 650; cameraMatrix.at< double >(0, 2) = imgSize.width / 2; @@ -377,10 +380,8 @@ void CV_CharucoDiamondDetection::run(int) { // detect markers vector< vector< Point2f > > corners; vector< int > ids; - Ptr params = aruco::DetectorParameters::create(); - params->minDistanceToBorder = 0; - params->markerBorderBits = markerBorder; - aruco::detectMarkers(img, dictionary, corners, ids, params); + detector.params->markerBorderBits = markerBorder; + detector.detectMarkers(img, corners, ids); if(ids.size() != 4) { ts->printf(cvtest::TS::LOG, "Not enough markers for diamond detection"); @@ -646,10 +647,12 @@ TEST(Charuco, testBoardSubpixelCoords) auto params = cv::aruco::DetectorParameters::create(); params->cornerRefinementMethod = cv::aruco::CORNER_REFINE_APRILTAG; + aruco::ArucoDetector detector(dict, params); + std::vector ids; std::vector> corners, rejected; - cv::aruco::detectMarkers(gray, dict, corners, ids, params, rejected); + detector.detectMarkers(gray, corners, ids, rejected); ASSERT_EQ(ids.size(), size_t(8)); @@ -671,8 +674,7 @@ TEST(CV_ArucoTutorial, can_find_choriginal) { string imgPath = cvtest::findDataFile("choriginal.jpg", false); Mat image = imread(imgPath); - cv::Ptr dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_250); - Ptr detectorParams = aruco::DetectorParameters::create(); + aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_6X6_250)); vector< int > ids; vector< vector< Point2f > > corners, rejected; @@ -691,7 +693,7 @@ TEST(CV_ArucoTutorial, can_find_choriginal) for (int i = 0; i < static_cast(N); i++) mapGoldCorners[i] = goldCorners[i]; - aruco::detectMarkers(image, dictionary, corners, ids, detectorParams, rejected); + detector.detectMarkers(image, corners, ids, rejected); ASSERT_EQ(N, ids.size()); for (size_t i = 0; i < N; i++) @@ -711,8 +713,7 @@ TEST(CV_ArucoTutorial, can_find_chocclusion) { string imgPath = cvtest::findDataFile("chocclusion_original.jpg", false); Mat image = imread(imgPath); - cv::Ptr dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_250); - Ptr detectorParams = aruco::DetectorParameters::create(); + aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_6X6_250)); vector< int > ids; vector< vector< Point2f > > corners, rejected; @@ -730,7 +731,7 @@ TEST(CV_ArucoTutorial, can_find_chocclusion) for (int i = 0; i < static_cast(N); i++) mapGoldCorners[goldCornersIds[i]] = goldCorners[i]; - aruco::detectMarkers(image, dictionary, corners, ids, detectorParams, rejected); + detector.detectMarkers(image, corners, ids, rejected); ASSERT_EQ(N, ids.size()); for (size_t i = 0; i < N; i++) @@ -762,6 +763,8 @@ TEST(CV_ArucoTutorial, can_find_diamondmarkers) detectorParams->readDetectorParameters(fs.root()); detectorParams->cornerRefinementMethod = 3; + aruco::ArucoDetector detector(dictionary, detectorParams); + vector< int > ids; vector< vector< Point2f > > corners, rejected; const size_t N = 12ull; @@ -771,7 +774,7 @@ TEST(CV_ArucoTutorial, can_find_diamondmarkers) for (int i = 0; i < static_cast(N); i++) counterGoldCornersIds[goldCornersIds[i]]++; - aruco::detectMarkers(image, dictionary, corners, ids, detectorParams, rejected); + detector.detectMarkers(image, corners, ids, rejected); map counterRes; for (size_t i = 0; i < N; i++) { @@ -788,16 +791,16 @@ TEST(Charuco, issue_14014) string imgPath = cvtest::findDataFile("aruco/recover.png"); Mat img = imread(imgPath); - Ptr dict = aruco::getPredefinedDictionary(aruco::PREDEFINED_DICTIONARY_NAME(cv::aruco::DICT_7X7_250)); - Ptr board = aruco::CharucoBoard::create(8, 5, 0.03455f, 0.02164f, dict); Ptr detectorParams = aruco::DetectorParameters::create(); detectorParams->cornerRefinementMethod = aruco::CORNER_REFINE_SUBPIX; detectorParams->cornerRefinementMinAccuracy = 0.01; + aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_7X7_250), detectorParams); + Ptr board = aruco::CharucoBoard::create(8, 5, 0.03455f, 0.02164f, detector.dictionary); vector corners, rejectedPoints; vector ids; - aruco::detectMarkers(img, dict, corners, ids, detectorParams, rejectedPoints); + detector.detectMarkers(img, corners, ids, rejectedPoints); ASSERT_EQ(corners.size(), 19ull); EXPECT_EQ(Size(4, 1), corners[0].size()); // check dimension of detected corners @@ -806,7 +809,7 @@ TEST(Charuco, issue_14014) ASSERT_EQ(rejectedPoints.size(), 26ull); // optional check to track regressions EXPECT_EQ(Size(4, 1), rejectedPoints[0].size()); // check dimension of detected corners - aruco::refineDetectedMarkers(img, board, corners, ids, rejectedPoints); + detector.refineDetectedMarkers(img, board, corners, ids, rejectedPoints); ASSERT_EQ(corners.size(), 20ull); EXPECT_EQ(Size(4, 1), corners[0].size()); // check dimension of rejected corners after successfully refine diff --git a/modules/aruco/test/test_precomp.hpp b/modules/aruco/test/test_precomp.hpp index d8c903f4c7b..86595ccbda8 100644 --- a/modules/aruco/test/test_precomp.hpp +++ b/modules/aruco/test/test_precomp.hpp @@ -7,7 +7,6 @@ #include "opencv2/ts.hpp" #include "opencv2/imgproc.hpp" #include "opencv2/calib3d.hpp" -#include "opencv2/aruco.hpp" #include #endif diff --git a/modules/ovis/samples/aruco_ar_demo.cpp b/modules/ovis/samples/aruco_ar_demo.cpp index 8e2464046c1..2398a7182a3 100644 --- a/modules/ovis/samples/aruco_ar_demo.cpp +++ b/modules/ovis/samples/aruco_ar_demo.cpp @@ -3,7 +3,7 @@ #include #include -#include +#include #include From 774faf3a6755948815d4d6121d4f1ef39de563f7 Mon Sep 17 00:00:00 2001 From: AleksandrPanov Date: Thu, 30 Jun 2022 15:29:30 +0300 Subject: [PATCH 30/45] update docs and refactoring --- modules/aruco/include/opencv2/aruco/board.hpp | 10 +- .../aruco/include/opencv2/aruco_detector.hpp | 126 +++++++++++------- modules/aruco/test/test_aruco_utils.hpp | 4 +- 3 files changed, 87 insertions(+), 53 deletions(-) diff --git a/modules/aruco/include/opencv2/aruco/board.hpp b/modules/aruco/include/opencv2/aruco/board.hpp index 254bbe0902a..bbbde687b64 100644 --- a/modules/aruco/include/opencv2/aruco/board.hpp +++ b/modules/aruco/include/opencv2/aruco/board.hpp @@ -56,14 +56,14 @@ class CV_EXPORTS_W Board { /// /// Markers are placed in a certain order - row by row, left to right in every row. /// For M markers, the size is Mx4. - CV_PROP std::vector< std::vector< Point3f > > objPoints; + CV_PROP std::vector > objPoints; /// the dictionary of markers employed for this board CV_PROP Ptr dictionary; /// vector of the identifiers of the markers in the board (same size than objPoints) /// The identifiers refers to the board dictionary - CV_PROP_RW std::vector< int > ids; + CV_PROP_RW std::vector ids; /// coordinate of the bottom right corner of the board, is set when calling the function create() CV_PROP Point3f rightBottomBorder; @@ -153,11 +153,11 @@ class CV_EXPORTS_W GridBoard : public Board { class CV_EXPORTS_W CharucoBoard : public Board { public: // vector of chessboard 3D corners precalculated - CV_PROP std::vector< Point3f > chessboardCorners; + CV_PROP std::vector chessboardCorners; // for each charuco corner, nearest marker id and nearest marker corner id of each marker - CV_PROP std::vector< std::vector< int > > nearestMarkerIdx; - CV_PROP std::vector< std::vector< int > > nearestMarkerCorners; + CV_PROP std::vector > nearestMarkerIdx; + CV_PROP std::vector > nearestMarkerCorners; /** * @brief Draw a ChArUco board diff --git a/modules/aruco/include/opencv2/aruco_detector.hpp b/modules/aruco/include/opencv2/aruco_detector.hpp index 52eebeb139d..ed99f584355 100644 --- a/modules/aruco/include/opencv2/aruco_detector.hpp +++ b/modules/aruco/include/opencv2/aruco_detector.hpp @@ -9,10 +9,12 @@ /** * @defgroup aruco ArUco Marker Detection * This module is dedicated to square fiducial markers (also known as Augmented Reality Markers) - * These markers are useful for easy, fast and robust camera pose estimation.ç + * These markers are useful for easy, fast and robust camera pose estimation. * - * The main functionalities are: + * The main functionality of ArucoDetector class is: * - Detection of markers in an image + * + * There are even more functionalities implemented in charuco.hpp and aruco_calib_pose.hpp: * - Pose estimation from a single marker or from a board/set of markers * - Detection of ChArUco board for high subpixel accuracy * - Camera calibration from both, ArUco boards and ChArUco boards. @@ -48,6 +50,9 @@ enum CornerRefineMethod{ CORNER_REFINE_APRILTAG, ///< Tag and corners detection based on the AprilTag 2 approach @cite wang2016iros }; +/** + * @brief struct DetectorParameters is used by ArucoDetector + */ struct CV_EXPORTS_W DetectorParameters { DetectorParameters() { adaptiveThreshWinSizeMin = 3; @@ -84,8 +89,7 @@ struct CV_EXPORTS_W DetectorParameters { minMarkerLengthRatioOriginalImg = 0.0; }; - /** - * @brief Create a new set of DetectorParameters with default values. + /** @brief Create a new set of DetectorParameters with default values. */ CV_WRAP static Ptr create() { Ptr params = makePtr(); @@ -114,12 +118,14 @@ struct CV_EXPORTS_W DetectorParameters { /// constant for adaptive thresholding before finding contours (default 7) CV_PROP_RW double adaptiveThreshConstant; - /// determine minimum perimeter for marker contour to be detected. This is defined as a rate respect to the - /// maximum dimension of the input image (default 0.03). + /** @brief determine minimum perimeter for marker contour to be detected. This is defined as a rate respect to the + * maximum dimension of the input image (default 0.03). + */ CV_PROP_RW double minMarkerPerimeterRate; - /// determine maximum perimeter for marker contour to be detected. This is defined as a rate respect to - /// the maximum dimension of the input image (default 4.0). + /** @brief determine maximum perimeter for marker contour to be detected. This is defined as a rate respect to + * the maximum dimension of the input image (default 4.0). + */ CV_PROP_RW double maxMarkerPerimeterRate; /// minimum accuracy during the polygonal approximation process to determine which contours are squares. (default 0.03) @@ -131,15 +137,17 @@ struct CV_EXPORTS_W DetectorParameters { /// minimum distance of any corner to the image border for detected markers (in pixels) (default 3) CV_PROP_RW int minDistanceToBorder; - /// minimum mean distance beetween two marker corners to be considered imilar, so that the - /// smaller one is removed. The rate is relative to the smaller perimeter of the two markers (default 0.05). + /** @brief minimum mean distance beetween two marker corners to be considered imilar, so that the + * smaller one is removed. The rate is relative to the smaller perimeter of the two markers (default 0.05). + */ CV_PROP_RW double minMarkerDistanceRate; - /// corner refinement method (default CORNER_REFINE_NONE). - /// 0:CORNER_REFINE_NONE, no refinement. - /// 1: CORNER_REFINE_SUBPIX, do subpixel refinement. - /// 2: CORNER_REFINE_CONTOUR use contour-Points, - /// 3: CORNER_REFINE_APRILTAG use the AprilTag2 approach). + /** @brief default CORNER_REFINE_NONE. + * 0:CORNER_REFINE_NONE, no refinement. + * 1: CORNER_REFINE_SUBPIX, do subpixel refinement. + * 2: CORNER_REFINE_CONTOUR use contour-Points, + * 3: CORNER_REFINE_APRILTAG use the AprilTag2 approach). + */ CV_PROP_RW int cornerRefinementMethod; /// window size for the corner refinement process (in pixels) (default 5). @@ -157,25 +165,29 @@ struct CV_EXPORTS_W DetectorParameters { /// number of bits (per dimension) for each cell of the marker when removing the perspective (default 4). CV_PROP_RW int perspectiveRemovePixelPerCell; - /// width of the margin of pixels on each cell not considered for the - /// determination of the cell bit. Represents the rate respect to the total size of the cell, i.e. - /// perspectiveRemovePixelPerCell (default 0.13) + /** @brief width of the margin of pixels on each cell not considered for the + * determination of the cell bit. Represents the rate respect to the total size of the cell, i.e. + * perspectiveRemovePixelPerCell (default 0.13) + */ CV_PROP_RW double perspectiveRemoveIgnoredMarginPerCell; - /// maximum number of accepted erroneous bits in the border (i.e. number of allowed - /// white bits in the border). Represented as a rate respect to the total number of bits per marker (default 0.35). + /** @brief maximum number of accepted erroneous bits in the border (i.e. number of allowed + * white bits in the border). Represented as a rate respect to the total number of bits per marker (default 0.35). + */ CV_PROP_RW double maxErroneousBitsInBorderRate; - /// minimun standard deviation in pixels values during the decodification step to apply Otsu - /// thresholding (otherwise, all the bits are set to 0 or 1 depending on mean higher than 128 or not) (default 5.0) + /** @brief minimun standard deviation in pixels values during the decodification step to apply Otsu + * thresholding (otherwise, all the bits are set to 0 or 1 depending on mean higher than 128 or not) (default 5.0) + */ CV_PROP_RW double minOtsuStdDev; /// error correction rate respect to the maximun error correction capability for each dictionary (default 0.6). CV_PROP_RW double errorCorrectionRate; - // April :: User-configurable parameters. - /// detection of quads can be done on a lower-resolution image, improving speed at a cost of - /// pose accuracy and a slight decrease in detection rate. Decoding the binary payload is still + /** @brief April :: User-configurable parameters. + * detection of quads can be done on a lower-resolution image, improving speed at a cost of + * pose accuracy and a slight decrease in detection rate. Decoding the binary payload is still + */ CV_PROP_RW float aprilTagQuadDecimate; /// what Gaussian blur should be applied to the segmented image (used for quad detection?) @@ -188,28 +200,32 @@ struct CV_EXPORTS_W DetectorParameters { /// how many corner candidates to consider when segmenting a group of pixels into a quad (default 10). CV_PROP_RW int aprilTagMaxNmaxima; - /// reject quads where pairs of edges have angles that are close to straight or close to 180 degrees. - /// Zero means that no quads are rejected. (In radians) (default 10*PI/180) + /** @brief reject quads where pairs of edges have angles that are close to straight or close to 180 degrees. + * Zero means that no quads are rejected. (In radians) (default 10*PI/180) + */ CV_PROP_RW float aprilTagCriticalRad; /// when fitting lines to the contours, what is the maximum mean squared error CV_PROP_RW float aprilTagMaxLineFitMse; - /// when we build our model of black & white pixels, we add an extra check that the - /// white model must be (overall) brighter than the black model. How much brighter? (in pixel values, [0,255]). - /// (default 5) + /** @brief when we build our model of black & white pixels, we add an extra check that the + * white model must be (overall) brighter than the black model. How much brighter? (in pixel values, [0,255]). + * (default 5) + */ CV_PROP_RW int aprilTagMinWhiteBlackDiff; /// should the thresholded image be deglitched? Only useful for very noisy images (default 0). CV_PROP_RW int aprilTagDeglitch; - /// to check if there is a white marker. In order to generate a "white" marker just invert a - /// normal marker by using a tilde, ~markerImage. (default false) + /** @brief to check if there is a white marker. In order to generate a "white" marker just invert a + * normal marker by using a tilde, ~markerImage. (default false) + */ CV_PROP_RW bool detectInvertedMarker; - /// new Aruco functionality proposed in the paper: - /// Romero-Ramirez et al: Speeded up detection of squared fiducial markers (2018) - /// https://www.researchgate.net/publication/325787310_Speeded_Up_Detection_of_Squared_Fiducial_Markers + /** @brief new Aruco functionality proposed in the paper: + * Romero-Ramirez et al: Speeded up detection of squared fiducial markers (2018) + * https://www.researchgate.net/publication/325787310_Speeded_Up_Detection_of_Squared_Fiducial_Markers + */ /// to enable the new and faster Aruco detection strategy. CV_PROP_RW bool useAruco3Detection; @@ -224,6 +240,9 @@ struct CV_EXPORTS_W DetectorParameters { bool readWrite(const Ptr& readNode = nullptr, const Ptr& writeStorage = nullptr); }; +/** + * @brief struct RefineParameters is used by ArucoDetector + */ struct CV_EXPORTS_W RefineParameters { RefineParameters() { minRepDistance = 10.f; @@ -245,37 +264,53 @@ struct CV_EXPORTS_W RefineParameters { */ CV_WRAP bool readRefineParameters(const FileNode& fn); - /** - * @brief Write a set of RefineParameters to FileStorage + /** @brief Write a set of RefineParameters to FileStorage */ CV_WRAP bool writeRefineParameters(const Ptr& fs); - /// minRepDistance minimum distance between the corners of the rejected candidate and the reprojected marker in - /// order to consider it as a correspondence. + /** @brief minRepDistance minimum distance between the corners of the rejected candidate and the reprojected marker in + * order to consider it as a correspondence. + */ CV_PROP_RW float minRepDistance; - /// minRepDistance rate of allowed erroneous bits respect to the error correction - /// capability of the used dictionary. -1 ignores the error correction step. + /** @brief minRepDistance rate of allowed erroneous bits respect to the error correction + * capability of the used dictionary. -1 ignores the error correction step. + */ CV_PROP_RW float errorCorrectionRate; - /// checkAllOrders consider the four posible corner orders in the rejectedCorners array. - // * If it set to false, only the provided corner order is considered (default true). + /** @brief checkAllOrders consider the four posible corner orders in the rejectedCorners array. + * If it set to false, only the provided corner order is considered (default true). + */ CV_PROP_RW bool checkAllOrders; private: bool readWrite(const Ptr& readNode = nullptr, const Ptr& writeStorage = nullptr); }; +/** + * @brief + * The main functionality of ArucoDetector class is detection of markers in an image with detectMarkers() method. + * After detecting some markers in the image, you can try to find undetected markers from this dictionary with + * refineDetectedMarkers() method. + * @see DetectorParameters, RefineParameters + */ class CV_EXPORTS_W ArucoDetector : public Algorithm { public: /// dictionary indicates the type of markers that will be searched CV_PROP_RW Ptr dictionary; - /// marker detection parameters + /// marker detection parameters, check DetectorParameters docs to see available settings CV_PROP_RW Ptr params; /// marker refine parameters CV_PROP_RW Ptr refineParams; - ArucoDetector(const Ptr &_dictionary = getPredefinedDictionary(DICT_4X4_50), const Ptr &_params = DetectorParameters::create(), + /** + * @brief Basic ArucoDetector constructor + * @param _dictionary indicates the type of markers that will be searched + * @param _params marker detection parameters + * @param _refineParams marker refine detection parameters + */ + ArucoDetector(const Ptr &_dictionary = getPredefinedDictionary(DICT_4X4_50), + const Ptr &_params = DetectorParameters::create(), const Ptr &_refineParams = RefineParameters::create()): dictionary(_dictionary), params(_params), refineParams(_refineParams) {} @@ -303,7 +338,6 @@ class CV_EXPORTS_W ArucoDetector : public Algorithm * @note The function does not correct lens distortion or takes it into account. It's recommended to undistort * input image with corresponging camera model, if camera parameters are known * @sa undistort, estimatePoseSingleMarkers, estimatePoseBoard - * */ CV_WRAP void detectMarkers(InputArray image, OutputArrayOfArrays corners, OutputArray ids, OutputArrayOfArrays rejectedImgPoints = noArray()); diff --git a/modules/aruco/test/test_aruco_utils.hpp b/modules/aruco/test/test_aruco_utils.hpp index 033d8f9bb4f..13b69f27d4b 100644 --- a/modules/aruco/test/test_aruco_utils.hpp +++ b/modules/aruco/test/test_aruco_utils.hpp @@ -69,7 +69,7 @@ static inline void projectMarker(Mat& img, Ptr board, int markerIn // projected corners Mat distCoeffs(5, 1, CV_64FC1, Scalar::all(0)); - vector< Point2f > corners; + vector corners; // get max coordinate of board Point3f maxCoord = board->rightBottomBorder; @@ -82,7 +82,7 @@ static inline void projectMarker(Mat& img, Ptr board, int markerIn projectPoints(objPoints, rvec, tvec, cameraMatrix, distCoeffs, corners); // get perspective transform - vector< Point2f > originalCorners; + vector originalCorners; originalCorners.push_back(Point2f(0, 0)); originalCorners.push_back(Point2f((float)markerSizePixels, 0)); originalCorners.push_back(Point2f((float)markerSizePixels, (float)markerSizePixels)); From 64804f06b39c8d3fc160e2c16b34b95712fcd499 Mon Sep 17 00:00:00 2001 From: AleksandrPanov Date: Mon, 4 Jul 2022 10:32:20 +0300 Subject: [PATCH 31/45] fix docs/API --- .../opencv2/aruco/aruco_calib_pose.hpp | 73 ++++++++++--------- modules/aruco/include/opencv2/aruco/board.hpp | 6 +- .../aruco/include/opencv2/aruco_detector.hpp | 7 +- modules/aruco/src/aruco_calib_pose.cpp | 14 ++-- modules/aruco/test/test_charucodetection.cpp | 2 +- 5 files changed, 51 insertions(+), 51 deletions(-) diff --git a/modules/aruco/include/opencv2/aruco/aruco_calib_pose.hpp b/modules/aruco/include/opencv2/aruco/aruco_calib_pose.hpp index 23257ff766d..d9f0fd86012 100644 --- a/modules/aruco/include/opencv2/aruco/aruco_calib_pose.hpp +++ b/modules/aruco/include/opencv2/aruco/aruco_calib_pose.hpp @@ -12,8 +12,7 @@ namespace aruco { //! @addtogroup aruco //! @{ -/** @brief - * rvec/tvec define the right handed coordinate system of the marker. +/** @brief rvec/tvec define the right handed coordinate system of the marker. * PatternPos defines center this system and axes direction. * Axis X (red color) - first coordinate, axis Y (green color) - second coordinate, * axis Z (blue color) - third coordinate. @@ -21,30 +20,29 @@ namespace aruco { */ enum PatternPos { /** @brief The marker coordinate system is centered on the middle of the marker. - * The coordinates of the four corners (CCW order) of the marker in its own coordinate system are: - * (-markerLength/2, markerLength/2, 0), (markerLength/2, markerLength/2, 0), - * (markerLength/2, -markerLength/2, 0), (-markerLength/2, -markerLength/2, 0). - * - * These pattern points define this coordinate system: - * ![Image with axes drawn](images/singlemarkersaxes.jpg) - */ - CCW_center, + * The coordinates of the four corners (CCW order) of the marker in its own coordinate system are: + * (-markerLength/2, markerLength/2, 0), (markerLength/2, markerLength/2, 0), + * (markerLength/2, -markerLength/2, 0), (-markerLength/2, -markerLength/2, 0). + * + * These pattern points define this coordinate system: + * ![Image with axes drawn](images/singlemarkersaxes.jpg) + */ + CCW_CENTER, /** @brief The marker coordinate system is centered on the top-left corner of the marker. - * The coordinates of the four corners (CW order) of the marker in its own coordinate system are: - * (0, 0, 0), (markerLength, 0, 0), - * (markerLength, markerLength, 0), (0, markerLength, 0). - * - * These pattern points define this coordinate system: - * ![Image with axes drawn](images/singlemarkersaxes2.jpg) - * - * These pattern dots are convenient to use with a chessboard/ChArUco board. - */ - CW_top_left_corner + * The coordinates of the four corners (CW order) of the marker in its own coordinate system are: + * (0, 0, 0), (markerLength, 0, 0), + * (markerLength, markerLength, 0), (0, markerLength, 0). + * + * These pattern points define this coordinate system: + * ![Image with axes drawn](images/singlemarkersaxes2.jpg) + * + * These pattern dots are convenient to use with a chessboard/ChArUco board. + */ + CW_TOP_LEFT_CORNER }; -/** @brief - * Pose estimation parameters - * @param pattern Defines center this system and axes direction (default PatternPos::CCW_center). +/** @brief Pose estimation parameters + * @param pattern Defines center this system and axes direction (default PatternPos::CCW_CENTER). * @param useExtrinsicGuess Parameter used for SOLVEPNP_ITERATIVE. If true (1), the function uses the provided * rvec and tvec values as initial approximations of the rotation and translation vectors, respectively, and further * optimizes them (default false). @@ -56,7 +54,7 @@ struct CV_EXPORTS_W EstimateParameters { CV_PROP_RW bool useExtrinsicGuess; CV_PROP_RW SolvePnPMethod solvePnPMethod; - EstimateParameters(): pattern(CCW_center), useExtrinsicGuess(false), + EstimateParameters(): pattern(CCW_CENTER), useExtrinsicGuess(false), solvePnPMethod(SOLVEPNP_ITERATIVE) {} CV_WRAP static Ptr create() { @@ -82,9 +80,9 @@ struct CV_EXPORTS_W EstimateParameters { * Each element in rvecs corresponds to the specific marker in imgPoints. * @param tvecs array of output translation vectors (e.g. std::vector). * Each element in tvecs corresponds to the specific marker in imgPoints. - * @param _objPoints array of object points of all the marker corners + * @param objPoints array of object points of all the marker corners * @param estimateParameters set the origin of coordinate system and the coordinates of the four corners of the marker - * (default estimateParameters.pattern = PatternPos::CCW_center, estimateParameters.useExtrinsicGuess = false, + * (default estimateParameters.pattern = PatternPos::CCW_CENTER, estimateParameters.useExtrinsicGuess = false, * estimateParameters.solvePnPMethod = SOLVEPNP_ITERATIVE). * * This function receives the detected markers and returns their pose estimation respect to @@ -103,8 +101,8 @@ struct CV_EXPORTS_W EstimateParameters { */ CV_EXPORTS_W void estimatePoseSingleMarkers(InputArrayOfArrays corners, float markerLength, InputArray cameraMatrix, InputArray distCoeffs, - OutputArray rvecs, OutputArray tvecs, OutputArray _objPoints = noArray(), - Ptr estimateParameters = EstimateParameters::create()); + OutputArray rvecs, OutputArray tvecs, OutputArray objPoints = noArray(), + const Ptr& estimateParameters = EstimateParameters::create()); /** * @brief Pose estimation for a board of markers @@ -193,16 +191,18 @@ double calibrateCameraAruco(InputArrayOfArrays corners, InputArray ids, InputArr Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, OutputArray stdDeviationsIntrinsics, OutputArray stdDeviationsExtrinsics, OutputArray perViewErrors, int flags = 0, - TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON)); + const TermCriteria& criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON)); -/** @brief It's the same function as #calibrateCameraAruco but without calibration error estimation. +/** + * @brief It's the same function as #calibrateCameraAruco but without calibration error estimation. + * @overload */ CV_EXPORTS_W double calibrateCameraAruco(InputArrayOfArrays corners, InputArray ids, InputArray counter, const Ptr &board, Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs, OutputArrayOfArrays rvecs = noArray(), OutputArrayOfArrays tvecs = noArray(), int flags = 0, - TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, - 30, DBL_EPSILON)); + const TermCriteria& criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, + 30, DBL_EPSILON)); /** * @brief Pose estimation for a ChArUco board given some of their corners @@ -267,17 +267,18 @@ double calibrateCameraCharuco(InputArrayOfArrays charucoCorners, InputArrayOfArr const Ptr &board, Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, OutputArray stdDeviationsIntrinsics, OutputArray stdDeviationsExtrinsics, - OutputArray perViewErrors, int flags = 0, TermCriteria criteria = TermCriteria( + OutputArray perViewErrors, int flags = 0, const TermCriteria& criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON)); -/** @brief It's the same function as #calibrateCameraCharuco but without calibration error estimation. -*/ +/** + * @brief It's the same function as #calibrateCameraCharuco but without calibration error estimation. + */ CV_EXPORTS_W double calibrateCameraCharuco(InputArrayOfArrays charucoCorners, InputArrayOfArrays charucoIds, const Ptr &board, Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs, OutputArrayOfArrays rvecs = noArray(), OutputArrayOfArrays tvecs = noArray(), int flags = 0, - TermCriteria criteria=TermCriteria(TermCriteria::COUNT + + const TermCriteria& criteria=TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON)); //! @} diff --git a/modules/aruco/include/opencv2/aruco/board.hpp b/modules/aruco/include/opencv2/aruco/board.hpp index bbbde687b64..bfde9a1debf 100644 --- a/modules/aruco/include/opencv2/aruco/board.hpp +++ b/modules/aruco/include/opencv2/aruco/board.hpp @@ -26,7 +26,7 @@ class Dictionary; */ class CV_EXPORTS_W Board { public: - /** + /** * @brief Provide way to create Board by passing necessary data. Specially needed in Python. * * @param objPoints array of object points of all the marker corners in the board @@ -36,14 +36,14 @@ class CV_EXPORTS_W Board { */ CV_WRAP static Ptr create(InputArrayOfArrays objPoints, const Ptr &dictionary, InputArray ids); - /** + /** * @brief Set ids vector * * @param ids vector of the identifiers of the markers in the board (should be the same size * as objPoints) * * Recommended way to set ids vector, which will fail if the size of ids does not match size - * of objPoints. + * of objPoints. */ CV_WRAP void setIds(InputArray ids); diff --git a/modules/aruco/include/opencv2/aruco_detector.hpp b/modules/aruco/include/opencv2/aruco_detector.hpp index ed99f584355..e5d13088255 100644 --- a/modules/aruco/include/opencv2/aruco_detector.hpp +++ b/modules/aruco/include/opencv2/aruco_detector.hpp @@ -285,8 +285,7 @@ struct CV_EXPORTS_W RefineParameters { }; /** - * @brief - * The main functionality of ArucoDetector class is detection of markers in an image with detectMarkers() method. + * @brief The main functionality of ArucoDetector class is detection of markers in an image with detectMarkers() method. * After detecting some markers in the image, you can try to find undetected markers from this dictionary with * refineDetectedMarkers() method. * @see DetectorParameters, RefineParameters @@ -382,8 +381,8 @@ class CV_EXPORTS_W ArucoDetector : public Algorithm } /** @brief simplified API for language bindings - * @overload - */ + * @overload + */ CV_WRAP void write(const String& fileName) const { FileStorage fs(fileName, FileStorage::WRITE); write(fs); diff --git a/modules/aruco/src/aruco_calib_pose.cpp b/modules/aruco/src/aruco_calib_pose.cpp index 7dc7200a503..6fea45ccb83 100644 --- a/modules/aruco/src/aruco_calib_pose.cpp +++ b/modules/aruco/src/aruco_calib_pose.cpp @@ -47,13 +47,13 @@ static Mat _getSingleMarkerObjectPoints(float markerLength, const EstimateParame CV_Assert(markerLength > 0); Mat objPoints(4, 1, CV_32FC3); // set coordinate system in the top-left corner of the marker, with Z pointing out - if (estimateParameters.pattern == CW_top_left_corner) { + if (estimateParameters.pattern == CW_TOP_LEFT_CORNER) { objPoints.ptr(0)[0] = Vec3f(0.f, 0.f, 0); objPoints.ptr(0)[1] = Vec3f(markerLength, 0.f, 0); objPoints.ptr(0)[2] = Vec3f(markerLength, markerLength, 0); objPoints.ptr(0)[3] = Vec3f(0.f, markerLength, 0); } - else if (estimateParameters.pattern == CCW_center) { + else if (estimateParameters.pattern == CCW_CENTER) { objPoints.ptr(0)[0] = Vec3f(-markerLength/2.f, markerLength/2.f, 0); objPoints.ptr(0)[1] = Vec3f(markerLength/2.f, markerLength/2.f, 0); objPoints.ptr(0)[2] = Vec3f(markerLength/2.f, -markerLength/2.f, 0); @@ -67,7 +67,7 @@ static Mat _getSingleMarkerObjectPoints(float markerLength, const EstimateParame void estimatePoseSingleMarkers(InputArrayOfArrays _corners, float markerLength, InputArray _cameraMatrix, InputArray _distCoeffs, OutputArray _rvecs, OutputArray _tvecs, OutputArray _objPoints, - Ptr estimateParameters) { + const Ptr& estimateParameters) { CV_Assert(markerLength > 0); Mat markerObjPoints = _getSingleMarkerObjectPoints(markerLength, *estimateParameters); @@ -178,7 +178,7 @@ double calibrateCameraAruco(InputArrayOfArrays _corners, InputArray _ids, InputA OutputArray _stdDeviationsIntrinsics, OutputArray _stdDeviationsExtrinsics, OutputArray _perViewErrors, - int flags, TermCriteria criteria) { + int flags, const TermCriteria& criteria) { // for each frame, get properly processed imagePoints and objectPoints for the calibrateCamera // function vector processedObjectPoints, processedImagePoints; @@ -212,7 +212,7 @@ double calibrateCameraAruco(InputArrayOfArrays _corners, InputArray _ids, InputA double calibrateCameraAruco(InputArrayOfArrays _corners, InputArray _ids, InputArray _counter, const Ptr &board, Size imageSize, InputOutputArray _cameraMatrix, InputOutputArray _distCoeffs, - OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs, int flags, TermCriteria criteria) { + OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs, int flags, const TermCriteria& criteria) { return calibrateCameraAruco(_corners, _ids, _counter, board, imageSize, _cameraMatrix, _distCoeffs, _rvecs, _tvecs, noArray(), noArray(), noArray(), flags, criteria); } @@ -224,7 +224,7 @@ double calibrateCameraCharuco(InputArrayOfArrays _charucoCorners, InputArrayOfAr OutputArray _stdDeviationsIntrinsics, OutputArray _stdDeviationsExtrinsics, OutputArray _perViewErrors, - int flags, TermCriteria criteria) { + int flags, const TermCriteria& criteria) { CV_Assert(_charucoIds.total() > 0 && (_charucoIds.total() == _charucoCorners.total())); // Join object points of charuco corners in a single vector for calibrateCamera() function @@ -248,7 +248,7 @@ double calibrateCameraCharuco(InputArrayOfArrays _charucoCorners, InputArrayOfAr double calibrateCameraCharuco(InputArrayOfArrays _charucoCorners, InputArrayOfArrays _charucoIds, const Ptr &_board, Size imageSize, InputOutputArray _cameraMatrix, InputOutputArray _distCoeffs, OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs, - int flags, TermCriteria criteria) { + int flags, const TermCriteria& criteria) { return calibrateCameraCharuco(_charucoCorners, _charucoIds, _board, imageSize, _cameraMatrix, _distCoeffs, _rvecs, _tvecs, noArray(), noArray(), noArray(), flags, criteria); } diff --git a/modules/aruco/test/test_charucodetection.cpp b/modules/aruco/test/test_charucodetection.cpp index 13ff37dad88..e96281713c5 100644 --- a/modules/aruco/test/test_charucodetection.cpp +++ b/modules/aruco/test/test_charucodetection.cpp @@ -441,7 +441,7 @@ void CV_CharucoDiamondDetection::run(int) { } Ptr estimateParameters = aruco::EstimateParameters::create(); - estimateParameters->pattern = aruco::CW_top_left_corner; + estimateParameters->pattern = aruco::CW_TOP_LEFT_CORNER; // estimate diamond pose vector< Vec3d > estimatedRvec, estimatedTvec; aruco::estimatePoseSingleMarkers(diamondCorners, squareLength, cameraMatrix, distCoeffs, estimatedRvec, From db55474a7b400d35fb92b0b0e0a98a83c7bc6eaa Mon Sep 17 00:00:00 2001 From: AleksandrPanov Date: Mon, 4 Jul 2022 19:39:27 +0300 Subject: [PATCH 32/45] fix indentation, add enum prefix --- .../opencv2/aruco/aruco_calib_pose.hpp | 10 ++-- modules/aruco/include/opencv2/aruco/board.hpp | 58 +++++++++---------- modules/aruco/src/aruco_calib_pose.cpp | 4 +- modules/aruco/test/test_charucodetection.cpp | 2 +- 4 files changed, 34 insertions(+), 40 deletions(-) diff --git a/modules/aruco/include/opencv2/aruco/aruco_calib_pose.hpp b/modules/aruco/include/opencv2/aruco/aruco_calib_pose.hpp index d9f0fd86012..9fa202cec9e 100644 --- a/modules/aruco/include/opencv2/aruco/aruco_calib_pose.hpp +++ b/modules/aruco/include/opencv2/aruco/aruco_calib_pose.hpp @@ -27,7 +27,7 @@ enum PatternPos { * These pattern points define this coordinate system: * ![Image with axes drawn](images/singlemarkersaxes.jpg) */ - CCW_CENTER, + ARUCO_CCW_CENTER, /** @brief The marker coordinate system is centered on the top-left corner of the marker. * The coordinates of the four corners (CW order) of the marker in its own coordinate system are: * (0, 0, 0), (markerLength, 0, 0), @@ -38,11 +38,11 @@ enum PatternPos { * * These pattern dots are convenient to use with a chessboard/ChArUco board. */ - CW_TOP_LEFT_CORNER + ARUCO_CW_TOP_LEFT_CORNER }; /** @brief Pose estimation parameters - * @param pattern Defines center this system and axes direction (default PatternPos::CCW_CENTER). + * @param pattern Defines center this system and axes direction (default PatternPos::ARUCO_CCW_CENTER). * @param useExtrinsicGuess Parameter used for SOLVEPNP_ITERATIVE. If true (1), the function uses the provided * rvec and tvec values as initial approximations of the rotation and translation vectors, respectively, and further * optimizes them (default false). @@ -54,7 +54,7 @@ struct CV_EXPORTS_W EstimateParameters { CV_PROP_RW bool useExtrinsicGuess; CV_PROP_RW SolvePnPMethod solvePnPMethod; - EstimateParameters(): pattern(CCW_CENTER), useExtrinsicGuess(false), + EstimateParameters(): pattern(ARUCO_CCW_CENTER), useExtrinsicGuess(false), solvePnPMethod(SOLVEPNP_ITERATIVE) {} CV_WRAP static Ptr create() { @@ -82,7 +82,7 @@ struct CV_EXPORTS_W EstimateParameters { * Each element in tvecs corresponds to the specific marker in imgPoints. * @param objPoints array of object points of all the marker corners * @param estimateParameters set the origin of coordinate system and the coordinates of the four corners of the marker - * (default estimateParameters.pattern = PatternPos::CCW_CENTER, estimateParameters.useExtrinsicGuess = false, + * (default estimateParameters.pattern = PatternPos::ARUCO_CCW_CENTER, estimateParameters.useExtrinsicGuess = false, * estimateParameters.solvePnPMethod = SOLVEPNP_ITERATIVE). * * This function receives the detected markers and returns their pose estimation respect to diff --git a/modules/aruco/include/opencv2/aruco/board.hpp b/modules/aruco/include/opencv2/aruco/board.hpp index bfde9a1debf..e0b4450e619 100644 --- a/modules/aruco/include/opencv2/aruco/board.hpp +++ b/modules/aruco/include/opencv2/aruco/board.hpp @@ -25,44 +25,40 @@ class Dictionary; * - The identifier of all the markers in the board. */ class CV_EXPORTS_W Board { - public: - /** - * @brief Provide way to create Board by passing necessary data. Specially needed in Python. - * - * @param objPoints array of object points of all the marker corners in the board - * @param dictionary the dictionary of markers employed for this board - * @param ids vector of the identifiers of the markers in the board - * - */ +public: + /** @brief Provide way to create Board by passing necessary data. Specially needed in Python. + * @param objPoints array of object points of all the marker corners in the board + * @param dictionary the dictionary of markers employed for this board + * @param ids vector of the identifiers of the markers in the board + */ CV_WRAP static Ptr create(InputArrayOfArrays objPoints, const Ptr &dictionary, InputArray ids); - /** - * @brief Set ids vector - * - * @param ids vector of the identifiers of the markers in the board (should be the same size - * as objPoints) - * - * Recommended way to set ids vector, which will fail if the size of ids does not match size - * of objPoints. - */ + /** @brief Set ids vector + * @param ids vector of the identifiers of the markers in the board (should be the same size + * as objPoints) + * + * Recommended way to set ids vector, which will fail if the size of ids does not match size + * of objPoints. + */ CV_WRAP void setIds(InputArray ids); - /// array of object points of all the marker corners in the board - /// each marker include its 4 corners in this order: - ///- objPoints[i][0] - left-top point of i-th marker - ///- objPoints[i][1] - right-top point of i-th marker - ///- objPoints[i][2] - right-bottom point of i-th marker - ///- objPoints[i][3] - left-bottom point of i-th marker - /// - /// Markers are placed in a certain order - row by row, left to right in every row. - /// For M markers, the size is Mx4. + /** @brief array of object points of all the marker corners in the board each marker include its 4 corners in this order: + * - objPoints[i][0] - left-top point of i-th marker + * - objPoints[i][1] - right-top point of i-th marker + * - objPoints[i][2] - right-bottom point of i-th marker + * - objPoints[i][3] - left-bottom point of i-th marker + * + * Markers are placed in a certain order - row by row, left to right in every row. + * For M markers, the size is Mx4. + */ CV_PROP std::vector > objPoints; /// the dictionary of markers employed for this board CV_PROP Ptr dictionary; - /// vector of the identifiers of the markers in the board (same size than objPoints) - /// The identifiers refers to the board dictionary + /** @brief vector of the identifiers of the markers in the board (same size than objPoints) + * The identifiers refers to the board dictionary + */ CV_PROP_RW std::vector ids; /// coordinate of the bottom right corner of the board, is set when calling the function create() @@ -173,9 +169,7 @@ class CV_EXPORTS_W CharucoBoard : public Board { CV_WRAP void draw(Size outSize, OutputArray img, int marginSize = 0, int borderBits = 1); - /** - * @brief Create a CharucoBoard object - * + /** @brief Create a CharucoBoard object * @param squaresX number of chessboard squares in X direction * @param squaresY number of chessboard squares in Y direction * @param squareLength chessboard square side length (normally in meters) diff --git a/modules/aruco/src/aruco_calib_pose.cpp b/modules/aruco/src/aruco_calib_pose.cpp index 6fea45ccb83..7f81643c43d 100644 --- a/modules/aruco/src/aruco_calib_pose.cpp +++ b/modules/aruco/src/aruco_calib_pose.cpp @@ -47,13 +47,13 @@ static Mat _getSingleMarkerObjectPoints(float markerLength, const EstimateParame CV_Assert(markerLength > 0); Mat objPoints(4, 1, CV_32FC3); // set coordinate system in the top-left corner of the marker, with Z pointing out - if (estimateParameters.pattern == CW_TOP_LEFT_CORNER) { + if (estimateParameters.pattern == ARUCO_CW_TOP_LEFT_CORNER) { objPoints.ptr(0)[0] = Vec3f(0.f, 0.f, 0); objPoints.ptr(0)[1] = Vec3f(markerLength, 0.f, 0); objPoints.ptr(0)[2] = Vec3f(markerLength, markerLength, 0); objPoints.ptr(0)[3] = Vec3f(0.f, markerLength, 0); } - else if (estimateParameters.pattern == CCW_CENTER) { + else if (estimateParameters.pattern == ARUCO_CCW_CENTER) { objPoints.ptr(0)[0] = Vec3f(-markerLength/2.f, markerLength/2.f, 0); objPoints.ptr(0)[1] = Vec3f(markerLength/2.f, markerLength/2.f, 0); objPoints.ptr(0)[2] = Vec3f(markerLength/2.f, -markerLength/2.f, 0); diff --git a/modules/aruco/test/test_charucodetection.cpp b/modules/aruco/test/test_charucodetection.cpp index e96281713c5..c819b37029c 100644 --- a/modules/aruco/test/test_charucodetection.cpp +++ b/modules/aruco/test/test_charucodetection.cpp @@ -441,7 +441,7 @@ void CV_CharucoDiamondDetection::run(int) { } Ptr estimateParameters = aruco::EstimateParameters::create(); - estimateParameters->pattern = aruco::CW_TOP_LEFT_CORNER; + estimateParameters->pattern = aruco::ARUCO_CW_TOP_LEFT_CORNER; // estimate diamond pose vector< Vec3d > estimatedRvec, estimatedTvec; aruco::estimatePoseSingleMarkers(diamondCorners, squareLength, cameraMatrix, distCoeffs, estimatedRvec, From 4326ae5b179ec735552f1ec93d26ab319b754071 Mon Sep 17 00:00:00 2001 From: AleksandrPanov Date: Mon, 4 Jul 2022 20:05:50 +0300 Subject: [PATCH 33/45] remove readWrite() from API --- modules/aruco/include/opencv2/aruco/board.hpp | 5 ++++- modules/aruco/include/opencv2/aruco_detector.hpp | 2 -- modules/aruco/src/aruco_detector.cpp | 13 +++++++------ 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/modules/aruco/include/opencv2/aruco/board.hpp b/modules/aruco/include/opencv2/aruco/board.hpp index e0b4450e619..93b413ba613 100644 --- a/modules/aruco/include/opencv2/aruco/board.hpp +++ b/modules/aruco/include/opencv2/aruco/board.hpp @@ -211,7 +211,10 @@ class CV_EXPORTS_W CharucoBoard : public Board { * @return bool value, 1 (true) if detected corners form a line, 0 (false) if they do not. solvePnP, calibration functions will fail if the corners are collinear (true). * - * The number of ids in charucoIDs should be <= the number of chessboard corners in the board. This functions checks whether the charuco corners are on a straight line (returns true, if so), or not (false). Axis parallel, as well as diagonal and other straight lines detected. Degenerate cases: for number of charucoIDs <= 2, the function returns true. + * The number of ids in charucoIDs should be <= the number of chessboard corners in the board. + * This functions checks whether the charuco corners are on a straight line (returns true, if so), or not (false). + * Axis parallel, as well as diagonal and other straight lines detected. Degenerate cases: + * for number of charucoIDs <= 2,the function returns true. */ CV_EXPORTS_W bool testCharucoCornersCollinear(const Ptr &board, InputArray charucoIds); diff --git a/modules/aruco/include/opencv2/aruco_detector.hpp b/modules/aruco/include/opencv2/aruco_detector.hpp index e5d13088255..b13aa227eed 100644 --- a/modules/aruco/include/opencv2/aruco_detector.hpp +++ b/modules/aruco/include/opencv2/aruco_detector.hpp @@ -280,8 +280,6 @@ struct CV_EXPORTS_W RefineParameters { * If it set to false, only the provided corner order is considered (default true). */ CV_PROP_RW bool checkAllOrders; -private: - bool readWrite(const Ptr& readNode = nullptr, const Ptr& writeStorage = nullptr); }; /** diff --git a/modules/aruco/src/aruco_detector.cpp b/modules/aruco/src/aruco_detector.cpp index a9dc9af4670..58040ec9281 100644 --- a/modules/aruco/src/aruco_detector.cpp +++ b/modules/aruco/src/aruco_detector.cpp @@ -67,13 +67,14 @@ bool DetectorParameters::writeDetectorParameters(const Ptr& fs) return readWrite(nullptr, fs); } -bool RefineParameters::readWrite(const Ptr& readNode, const Ptr& writeStorage) { +static inline bool readWrite(RefineParameters& refineParameters, const Ptr& readNode, + const Ptr& writeStorage = nullptr) { CV_Assert(!readNode.empty() || !writeStorage.empty()); bool check = false; - check |= readWriteParameter("minRepDistance", this->minRepDistance, readNode, writeStorage); - check |= readWriteParameter("errorCorrectionRate", this->errorCorrectionRate, readNode, writeStorage); - check |= readWriteParameter("checkAllOrders", this->checkAllOrders, readNode, writeStorage); + check |= readWriteParameter("minRepDistance", refineParameters.minRepDistance, readNode, writeStorage); + check |= readWriteParameter("errorCorrectionRate", refineParameters.errorCorrectionRate, readNode, writeStorage); + check |= readWriteParameter("checkAllOrders", refineParameters.checkAllOrders, readNode, writeStorage); return check; } @@ -81,13 +82,13 @@ bool RefineParameters::readRefineParameters(const FileNode &fn) { if(fn.empty()) return false; Ptr pfn = makePtr(fn); - return readWrite(pfn); + return readWrite(*this, pfn); } bool RefineParameters::writeRefineParameters(const Ptr &fs) { if(fs.empty()) return false; - return readWrite(nullptr, fs); + return readWrite(*this, nullptr, fs); } /** From b511cfa5be21ff1c186e99cbf6148ecbbb0ad677 Mon Sep 17 00:00:00 2001 From: AleksandrPanov Date: Wed, 6 Jul 2022 20:32:47 +0300 Subject: [PATCH 34/45] add gridImpl --- .../opencv2/aruco/aruco_calib_pose.hpp | 3 +- modules/aruco/include/opencv2/aruco/board.hpp | 27 +++++-------- modules/aruco/src/board.cpp | 38 ++++++++++++++++--- 3 files changed, 42 insertions(+), 26 deletions(-) diff --git a/modules/aruco/include/opencv2/aruco/aruco_calib_pose.hpp b/modules/aruco/include/opencv2/aruco/aruco_calib_pose.hpp index 9fa202cec9e..7f6624ccde4 100644 --- a/modules/aruco/include/opencv2/aruco/aruco_calib_pose.hpp +++ b/modules/aruco/include/opencv2/aruco/aruco_calib_pose.hpp @@ -193,9 +193,8 @@ double calibrateCameraAruco(InputArrayOfArrays corners, InputArray ids, InputArr OutputArray stdDeviationsExtrinsics, OutputArray perViewErrors, int flags = 0, const TermCriteria& criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON)); -/** +/** @overload * @brief It's the same function as #calibrateCameraAruco but without calibration error estimation. - * @overload */ CV_EXPORTS_W double calibrateCameraAruco(InputArrayOfArrays corners, InputArray ids, InputArray counter, const Ptr &board, Size imageSize, InputOutputArray cameraMatrix, diff --git a/modules/aruco/include/opencv2/aruco/board.hpp b/modules/aruco/include/opencv2/aruco/board.hpp index 93b413ba613..ec1d68fd62b 100644 --- a/modules/aruco/include/opencv2/aruco/board.hpp +++ b/modules/aruco/include/opencv2/aruco/board.hpp @@ -90,7 +90,8 @@ CV_EXPORTS_W void drawPlanarBoard(const Ptr &board, Size outSize, OutputA */ class CV_EXPORTS_W GridBoard : public Board { - public: +public: + CV_EXPORTS_W GridBoard(); /** * @brief Draw a GridBoard * @@ -121,21 +122,13 @@ class CV_EXPORTS_W GridBoard : public Board { CV_WRAP static Ptr create(int markersX, int markersY, float markerLength, float markerSeparation, const Ptr &dictionary, int firstMarker = 0); - CV_WRAP Size getGridSize() const { return Size(_markersX, _markersY); } - - CV_WRAP float getMarkerLength() const { return _markerLength; } - - CV_WRAP float getMarkerSeparation() const { return _markerSeparation; } - - private: - // number of markers in X and Y directions - int _markersX, _markersY; + CV_WRAP Size getGridSize() const; + CV_WRAP float getMarkerLength(); + CV_WRAP float getMarkerSeparation(); - // marker side length (normally in meters) - float _markerLength; - - // separation between markers in the grid - float _markerSeparation; +protected: + struct GridImpl; + Ptr gridImpl; }; /** @@ -147,7 +140,7 @@ class CV_EXPORTS_W GridBoard : public Board { * This class also allows the easy creation and drawing of ChArUco boards. */ class CV_EXPORTS_W CharucoBoard : public Board { - public: +public: // vector of chessboard 3D corners precalculated CV_PROP std::vector chessboardCorners; @@ -185,9 +178,7 @@ class CV_EXPORTS_W CharucoBoard : public Board { float markerLength, const Ptr &dictionary); CV_WRAP Size getChessboardSize() const { return Size(_squaresX, _squaresY); } - CV_WRAP float getSquareLength() const { return _squareLength; } - CV_WRAP float getMarkerLength() const { return _markerLength; } private: diff --git a/modules/aruco/src/board.cpp b/modules/aruco/src/board.cpp index d680e3b0843..a3b06793082 100644 --- a/modules/aruco/src/board.cpp +++ b/modules/aruco/src/board.cpp @@ -10,8 +10,7 @@ namespace cv { namespace aruco { using namespace std; -/** - * @brief Implementation of drawPlanarBoard that accepts a raw Board pointer. +/** @brief Implementation of drawPlanarBoard that accepts a raw Board pointer. */ static void _drawPlanarBoardImpl(Board *_board, Size outSize, OutputArray _img, int marginSize, int borderBits) { CV_Assert(!outSize.empty()); @@ -99,6 +98,20 @@ void drawPlanarBoard(const Ptr &_board, Size outSize, OutputArray _img, i _drawPlanarBoardImpl(_board, outSize, _img, marginSize, borderBits); } +struct GridBoard::GridImpl { + GridImpl(){}; + // number of markers in X and Y directions + int sizeX = 3, sizeY = 3; + + // marker side length (normally in meters) + float markerLength = 1.f; + + // separation between markers in the grid + float markerSeparation = .5f; +}; + +GridBoard::GridBoard(): gridImpl(makePtr()) {} + Ptr Board::create(InputArrayOfArrays objPoints, const Ptr &dictionary, InputArray ids) { CV_Assert(objPoints.total() == ids.total()); CV_Assert(objPoints.type() == CV_32FC3 || objPoints.type() == CV_32FC1); @@ -139,10 +152,10 @@ Ptr GridBoard::create(int markersX, int markersY, float markerLength, const Ptr &dictionary, int firstMarker) { CV_Assert(markersX > 0 && markersY > 0 && markerLength > 0 && markerSeparation > 0); Ptr res = makePtr(); - res->_markersX = markersX; - res->_markersY = markersY; - res->_markerLength = markerLength; - res->_markerSeparation = markerSeparation; + res->gridImpl->sizeX = markersX; + res->gridImpl->sizeY = markersY; + res->gridImpl->markerLength = markerLength; + res->gridImpl->markerSeparation = markerSeparation; res->dictionary = dictionary; size_t totalMarkers = (size_t) markersX * markersY; @@ -175,6 +188,19 @@ void GridBoard::draw(Size outSize, OutputArray _img, int marginSize, int borderB _drawPlanarBoardImpl((Board*)this, outSize, _img, marginSize, borderBits); } +Size GridBoard::getGridSize() const { + return Size(gridImpl->sizeX, gridImpl->sizeY); +} + +float GridBoard::getMarkerLength() { + return gridImpl->markerLength; +} + +float GridBoard::getMarkerSeparation() { + return gridImpl->markerSeparation; +} + + void CharucoBoard::draw(Size outSize, OutputArray _img, int marginSize, int borderBits) { CV_Assert(!outSize.empty()); CV_Assert(marginSize >= 0); From ba4d60ee8c875d58fb7706726a12fe43373eb911 Mon Sep 17 00:00:00 2001 From: AleksandrPanov Date: Wed, 6 Jul 2022 21:09:10 +0300 Subject: [PATCH 35/45] add charucoImpl --- modules/aruco/include/opencv2/aruco/board.hpp | 26 ++-- modules/aruco/src/board.cpp | 142 ++++++++++-------- 2 files changed, 88 insertions(+), 80 deletions(-) diff --git a/modules/aruco/include/opencv2/aruco/board.hpp b/modules/aruco/include/opencv2/aruco/board.hpp index ec1d68fd62b..c2cad319f72 100644 --- a/modules/aruco/include/opencv2/aruco/board.hpp +++ b/modules/aruco/include/opencv2/aruco/board.hpp @@ -129,6 +129,7 @@ class CV_EXPORTS_W GridBoard : public Board { protected: struct GridImpl; Ptr gridImpl; + friend class CharucoBoard; }; /** @@ -141,6 +142,8 @@ class CV_EXPORTS_W GridBoard : public Board { */ class CV_EXPORTS_W CharucoBoard : public Board { public: + CharucoBoard(); + // vector of chessboard 3D corners precalculated CV_PROP std::vector chessboardCorners; @@ -148,8 +151,7 @@ class CV_EXPORTS_W CharucoBoard : public Board { CV_PROP std::vector > nearestMarkerIdx; CV_PROP std::vector > nearestMarkerCorners; - /** - * @brief Draw a ChArUco board + /** @brief Draw a ChArUco board * * @param outSize size of the output image in pixels. * @param img output image with the board. The size of this image will be outSize @@ -177,21 +179,13 @@ class CV_EXPORTS_W CharucoBoard : public Board { CV_WRAP static Ptr create(int squaresX, int squaresY, float squareLength, float markerLength, const Ptr &dictionary); - CV_WRAP Size getChessboardSize() const { return Size(_squaresX, _squaresY); } - CV_WRAP float getSquareLength() const { return _squareLength; } - CV_WRAP float getMarkerLength() const { return _markerLength; } - - private: - void _getNearestMarkerCorners(); + CV_WRAP Size getChessboardSize() const; + CV_WRAP float getSquareLength() const; + CV_WRAP float getMarkerLength() const; - // number of markers in X and Y directions - int _squaresX, _squaresY; - - // size of chessboard squares side (normally in meters) - float _squareLength; - - // marker side length (normally in meters) - float _markerLength; +protected: + struct CharucoImpl; + Ptr charucoImpl; }; /** diff --git a/modules/aruco/src/board.cpp b/modules/aruco/src/board.cpp index a3b06793082..1901381c1fc 100644 --- a/modules/aruco/src/board.cpp +++ b/modules/aruco/src/board.cpp @@ -200,6 +200,15 @@ float GridBoard::getMarkerSeparation() { return gridImpl->markerSeparation; } +struct CharucoBoard::CharucoImpl : GridBoard::GridImpl { + // size of chessboard squares side (normally in meters) + float squareLength; + + // marker side length (normally in meters) + float markerLength; +}; + +CharucoBoard::CharucoBoard(): charucoImpl(makePtr()) {} void CharucoBoard::draw(Size outSize, OutputArray _img, int marginSize, int borderBits) { CV_Assert(!outSize.empty()); @@ -212,8 +221,8 @@ void CharucoBoard::draw(Size outSize, OutputArray _img, int marginSize, int bord out.colRange(marginSize, out.cols - marginSize).rowRange(marginSize, out.rows - marginSize); double totalLengthX, totalLengthY; - totalLengthX = _squareLength * _squaresX; - totalLengthY = _squareLength * _squaresY; + totalLengthX = charucoImpl->squareLength * charucoImpl->sizeX; + totalLengthY = charucoImpl->squareLength * charucoImpl->sizeY; // proportional transformation double xReduction = totalLengthX / double(noMarginsImg.cols); @@ -233,12 +242,12 @@ void CharucoBoard::draw(Size outSize, OutputArray _img, int marginSize, int bord // determine the margins to draw only the markers // take the minimum just to be sure - double squareSizePixels = min(double(chessboardZoneImg.cols) / double(_squaresX), - double(chessboardZoneImg.rows) / double(_squaresY)); + double squareSizePixels = min(double(chessboardZoneImg.cols) / double(charucoImpl->sizeX), + double(chessboardZoneImg.rows) / double(charucoImpl->sizeY)); - double diffSquareMarkerLength = (_squareLength - _markerLength) / 2; + double diffSquareMarkerLength = (charucoImpl->squareLength - charucoImpl->markerLength) / 2; int diffSquareMarkerLengthPixels = - int(diffSquareMarkerLength * squareSizePixels / _squareLength); + int(diffSquareMarkerLength * squareSizePixels / charucoImpl->squareLength); // draw markers Mat markersImg; @@ -246,8 +255,8 @@ void CharucoBoard::draw(Size outSize, OutputArray _img, int marginSize, int bord markersImg.copyTo(chessboardZoneImg); // now draw black squares - for(int y = 0; y < _squaresY; y++) { - for(int x = 0; x < _squaresX; x++) { + for(int y = 0; y < charucoImpl->sizeY; y++) { + for(int x = 0; x < charucoImpl->sizeX; x++) { if(y % 2 != x % 2) continue; // white corner, dont do anything @@ -263,15 +272,67 @@ void CharucoBoard::draw(Size outSize, OutputArray _img, int marginSize, int bord } } +/** + * Fill nearestMarkerIdx and nearestMarkerCorners arrays + */ +static inline void _getNearestMarkerCorners(CharucoBoard &board, float squareLength) { + board.nearestMarkerIdx.resize(board.chessboardCorners.size()); + board.nearestMarkerCorners.resize(board.chessboardCorners.size()); + + unsigned int nMarkers = (unsigned int)board.ids.size(); + unsigned int nCharucoCorners = (unsigned int)board.chessboardCorners.size(); + for(unsigned int i = 0; i < nCharucoCorners; i++) { + double minDist = -1; // distance of closest markers + Point3f charucoCorner = board.chessboardCorners[i]; + for(unsigned int j = 0; j < nMarkers; j++) { + // calculate distance from marker center to charuco corner + Point3f center = Point3f(0, 0, 0); + for(unsigned int k = 0; k < 4; k++) + center += board.objPoints[j][k]; + center /= 4.; + double sqDistance; + Point3f distVector = charucoCorner - center; + sqDistance = distVector.x * distVector.x + distVector.y * distVector.y; + if(j == 0 || fabs(sqDistance - minDist) < cv::pow(0.01 * squareLength, 2)) { + // if same minimum distance (or first iteration), add to nearestMarkerIdx vector + board.nearestMarkerIdx[i].push_back(j); + minDist = sqDistance; + } else if(sqDistance < minDist) { + // if finding a closest marker to the charuco corner + board.nearestMarkerIdx[i].clear(); // remove any previous added marker + board.nearestMarkerIdx[i].push_back(j); // add the new closest marker index + minDist = sqDistance; + } + } + // for each of the closest markers, search the marker corner index closer + // to the charuco corner + for(unsigned int j = 0; j < board.nearestMarkerIdx[i].size(); j++) { + board.nearestMarkerCorners[i].resize(board.nearestMarkerIdx[i].size()); + double minDistCorner = -1; + for(unsigned int k = 0; k < 4; k++) { + double sqDistance; + Point3f distVector = charucoCorner - board.objPoints[board.nearestMarkerIdx[i][j]][k]; + sqDistance = distVector.x * distVector.x + distVector.y * distVector.y; + if(k == 0 || sqDistance < minDistCorner) { + // if this corner is closer to the charuco corner, assing its index + // to nearestMarkerCorners + minDistCorner = sqDistance; + board.nearestMarkerCorners[i][j] = k; + } + } + } + } +} + Ptr CharucoBoard::create(int squaresX, int squaresY, float squareLength, float markerLength, const Ptr &dictionary) { CV_Assert(squaresX > 1 && squaresY > 1 && markerLength > 0 && squareLength > markerLength); Ptr res = makePtr(); - res->_squaresX = squaresX; - res->_squaresY = squaresY; - res->_squareLength = squareLength; - res->_markerLength = markerLength; + res->charucoImpl->sizeX = squaresX; + res->charucoImpl->sizeY = squaresY; + res->charucoImpl->squareLength = squareLength; + res->charucoImpl->markerLength = markerLength; res->dictionary = dictionary; float diffSquareMarkerLength = (squareLength - markerLength) / 2; @@ -306,62 +367,15 @@ Ptr CharucoBoard::create(int squaresX, int squaresY, float squareL } res->rightBottomBorder = Point3f(squaresX * squareLength, squaresY * squareLength, 0.f); - res->_getNearestMarkerCorners(); - + _getNearestMarkerCorners(*res, res->charucoImpl->squareLength); return res; } -/** - * Fill nearestMarkerIdx and nearestMarkerCorners arrays - */ -void CharucoBoard::_getNearestMarkerCorners() { - nearestMarkerIdx.resize(chessboardCorners.size()); - nearestMarkerCorners.resize(chessboardCorners.size()); +Size CharucoBoard::getChessboardSize() const { return Size(charucoImpl->sizeX, charucoImpl->sizeY); } - unsigned int nMarkers = (unsigned int)ids.size(); - unsigned int nCharucoCorners = (unsigned int)chessboardCorners.size(); - for(unsigned int i = 0; i < nCharucoCorners; i++) { - double minDist = -1; // distance of closest markers - Point3f charucoCorner = chessboardCorners[i]; - for(unsigned int j = 0; j < nMarkers; j++) { - // calculate distance from marker center to charuco corner - Point3f center = Point3f(0, 0, 0); - for(unsigned int k = 0; k < 4; k++) - center += objPoints[j][k]; - center /= 4.; - double sqDistance; - Point3f distVector = charucoCorner - center; - sqDistance = distVector.x * distVector.x + distVector.y * distVector.y; - if(j == 0 || fabs(sqDistance - minDist) < cv::pow(0.01 * _squareLength, 2)) { - // if same minimum distance (or first iteration), add to nearestMarkerIdx vector - nearestMarkerIdx[i].push_back(j); - minDist = sqDistance; - } else if(sqDistance < minDist) { - // if finding a closest marker to the charuco corner - nearestMarkerIdx[i].clear(); // remove any previous added marker - nearestMarkerIdx[i].push_back(j); // add the new closest marker index - minDist = sqDistance; - } - } - // for each of the closest markers, search the marker corner index closer - // to the charuco corner - for(unsigned int j = 0; j < nearestMarkerIdx[i].size(); j++) { - nearestMarkerCorners[i].resize(nearestMarkerIdx[i].size()); - double minDistCorner = -1; - for(unsigned int k = 0; k < 4; k++) { - double sqDistance; - Point3f distVector = charucoCorner - objPoints[nearestMarkerIdx[i][j]][k]; - sqDistance = distVector.x * distVector.x + distVector.y * distVector.y; - if(k == 0 || sqDistance < minDistCorner) { - // if this corner is closer to the charuco corner, assing its index - // to nearestMarkerCorners - minDistCorner = sqDistance; - nearestMarkerCorners[i][j] = k; - } - } - } - } -} +float CharucoBoard::getSquareLength() const { return charucoImpl->squareLength; } + +float CharucoBoard::getMarkerLength() const { return charucoImpl->markerLength; } bool testCharucoCornersCollinear(const Ptr &_board, InputArray _charucoIds) { unsigned int nCharucoCorners = (unsigned int)_charucoIds.getMat().total(); From d1fe61a467d930fd5f9c87b2486e6bacc970eec6 Mon Sep 17 00:00:00 2001 From: Aleksandr Panov Date: Fri, 8 Jul 2022 00:49:02 +0300 Subject: [PATCH 36/45] add CV_WRAP, add default Board constructor --- modules/aruco/include/opencv2/aruco/board.hpp | 6 ++++-- modules/aruco/include/opencv2/aruco_detector.hpp | 2 +- modules/aruco/src/board.cpp | 2 ++ 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/modules/aruco/include/opencv2/aruco/board.hpp b/modules/aruco/include/opencv2/aruco/board.hpp index c2cad319f72..f76f2f36ba3 100644 --- a/modules/aruco/include/opencv2/aruco/board.hpp +++ b/modules/aruco/include/opencv2/aruco/board.hpp @@ -26,6 +26,8 @@ class Dictionary; */ class CV_EXPORTS_W Board { public: + CV_WRAP Board(); + /** @brief Provide way to create Board by passing necessary data. Specially needed in Python. * @param objPoints array of object points of all the marker corners in the board * @param dictionary the dictionary of markers employed for this board @@ -91,7 +93,7 @@ CV_EXPORTS_W void drawPlanarBoard(const Ptr &board, Size outSize, OutputA class CV_EXPORTS_W GridBoard : public Board { public: - CV_EXPORTS_W GridBoard(); + CV_WRAP GridBoard(); /** * @brief Draw a GridBoard * @@ -142,7 +144,7 @@ class CV_EXPORTS_W GridBoard : public Board { */ class CV_EXPORTS_W CharucoBoard : public Board { public: - CharucoBoard(); + CV_WRAP CharucoBoard(); // vector of chessboard 3D corners precalculated CV_PROP std::vector chessboardCorners; diff --git a/modules/aruco/include/opencv2/aruco_detector.hpp b/modules/aruco/include/opencv2/aruco_detector.hpp index b13aa227eed..f52e6d85b29 100644 --- a/modules/aruco/include/opencv2/aruco_detector.hpp +++ b/modules/aruco/include/opencv2/aruco_detector.hpp @@ -306,7 +306,7 @@ class CV_EXPORTS_W ArucoDetector : public Algorithm * @param _params marker detection parameters * @param _refineParams marker refine detection parameters */ - ArucoDetector(const Ptr &_dictionary = getPredefinedDictionary(DICT_4X4_50), + CV_WRAP ArucoDetector(const Ptr &_dictionary = getPredefinedDictionary(DICT_4X4_50), const Ptr &_params = DetectorParameters::create(), const Ptr &_refineParams = RefineParameters::create()): dictionary(_dictionary), params(_params), refineParams(_refineParams) {} diff --git a/modules/aruco/src/board.cpp b/modules/aruco/src/board.cpp index 1901381c1fc..2920b485433 100644 --- a/modules/aruco/src/board.cpp +++ b/modules/aruco/src/board.cpp @@ -112,6 +112,8 @@ struct GridBoard::GridImpl { GridBoard::GridBoard(): gridImpl(makePtr()) {} +Board::Board(): dictionary(makePtr(getPredefinedDictionary(PREDEFINED_DICTIONARY_NAME::DICT_4X4_50))) {} + Ptr Board::create(InputArrayOfArrays objPoints, const Ptr &dictionary, InputArray ids) { CV_Assert(objPoints.total() == ids.total()); CV_Assert(objPoints.type() == CV_32FC3 || objPoints.type() == CV_32FC1); From 39dabc1d4823c06ade6e3665b805dd275d9173fc Mon Sep 17 00:00:00 2001 From: Aleksandr Panov Date: Fri, 8 Jul 2022 02:16:12 +0300 Subject: [PATCH 37/45] add const --- modules/aruco/include/opencv2/aruco/board.hpp | 4 ++-- modules/aruco/src/board.cpp | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/aruco/include/opencv2/aruco/board.hpp b/modules/aruco/include/opencv2/aruco/board.hpp index f76f2f36ba3..51d0248bcab 100644 --- a/modules/aruco/include/opencv2/aruco/board.hpp +++ b/modules/aruco/include/opencv2/aruco/board.hpp @@ -125,8 +125,8 @@ class CV_EXPORTS_W GridBoard : public Board { const Ptr &dictionary, int firstMarker = 0); CV_WRAP Size getGridSize() const; - CV_WRAP float getMarkerLength(); - CV_WRAP float getMarkerSeparation(); + CV_WRAP float getMarkerLength() const; + CV_WRAP float getMarkerSeparation() const; protected: struct GridImpl; diff --git a/modules/aruco/src/board.cpp b/modules/aruco/src/board.cpp index 2920b485433..94f88b01fb9 100644 --- a/modules/aruco/src/board.cpp +++ b/modules/aruco/src/board.cpp @@ -194,11 +194,11 @@ Size GridBoard::getGridSize() const { return Size(gridImpl->sizeX, gridImpl->sizeY); } -float GridBoard::getMarkerLength() { +float GridBoard::getMarkerLength() const { return gridImpl->markerLength; } -float GridBoard::getMarkerSeparation() { +float GridBoard::getMarkerSeparation() const { return gridImpl->markerSeparation; } From 1f003fe9e483877b71b5e246f648c4563bee5966 Mon Sep 17 00:00:00 2001 From: AleksandrPanov Date: Mon, 25 Jul 2022 10:56:47 +0300 Subject: [PATCH 38/45] fix readWrite() --- .../aruco/include/opencv2/aruco_detector.hpp | 3 -- modules/aruco/src/aruco_detector.cpp | 53 ++++++++++--------- 2 files changed, 27 insertions(+), 29 deletions(-) diff --git a/modules/aruco/include/opencv2/aruco_detector.hpp b/modules/aruco/include/opencv2/aruco_detector.hpp index f52e6d85b29..f3342ebe76d 100644 --- a/modules/aruco/include/opencv2/aruco_detector.hpp +++ b/modules/aruco/include/opencv2/aruco_detector.hpp @@ -235,9 +235,6 @@ struct CV_EXPORTS_W DetectorParameters { /// range [0,1], eq (2) from paper. The parameter tau_i has a direct influence on the processing speed. CV_PROP_RW float minMarkerLengthRatioOriginalImg; - -private: - bool readWrite(const Ptr& readNode = nullptr, const Ptr& writeStorage = nullptr); }; /** diff --git a/modules/aruco/src/aruco_detector.cpp b/modules/aruco/src/aruco_detector.cpp index 58040ec9281..6b6a328bcd2 100644 --- a/modules/aruco/src/aruco_detector.cpp +++ b/modules/aruco/src/aruco_detector.cpp @@ -15,40 +15,41 @@ namespace aruco { using namespace std; -bool DetectorParameters::readWrite(const Ptr& readNode, const Ptr& writeStorage) { +static inline bool readWrite(DetectorParameters ¶ms, const Ptr& readNode = nullptr, + const Ptr& writeStorage = nullptr) { CV_Assert(!readNode.empty() || !writeStorage.empty()); bool check = false; - check |= readWriteParameter("adaptiveThreshWinSizeMin", this->adaptiveThreshWinSizeMin, readNode, writeStorage); - check |= readWriteParameter("adaptiveThreshWinSizeMax", this->adaptiveThreshWinSizeMax, readNode, writeStorage); - check |= readWriteParameter("adaptiveThreshWinSizeStep", this->adaptiveThreshWinSizeStep, readNode, writeStorage); - check |= readWriteParameter("adaptiveThreshConstant", this->adaptiveThreshConstant, readNode, writeStorage); - check |= readWriteParameter("minMarkerPerimeterRate", this->minMarkerPerimeterRate, readNode, writeStorage); - check |= readWriteParameter("maxMarkerPerimeterRate", this->maxMarkerPerimeterRate, readNode, writeStorage); - check |= readWriteParameter("polygonalApproxAccuracyRate", this->polygonalApproxAccuracyRate, + check |= readWriteParameter("adaptiveThreshWinSizeMin", params.adaptiveThreshWinSizeMin, readNode, writeStorage); + check |= readWriteParameter("adaptiveThreshWinSizeMax", params.adaptiveThreshWinSizeMax, readNode, writeStorage); + check |= readWriteParameter("adaptiveThreshWinSizeStep", params.adaptiveThreshWinSizeStep, readNode, writeStorage); + check |= readWriteParameter("adaptiveThreshConstant", params.adaptiveThreshConstant, readNode, writeStorage); + check |= readWriteParameter("minMarkerPerimeterRate", params.minMarkerPerimeterRate, readNode, writeStorage); + check |= readWriteParameter("maxMarkerPerimeterRate", params.maxMarkerPerimeterRate, readNode, writeStorage); + check |= readWriteParameter("polygonalApproxAccuracyRate", params.polygonalApproxAccuracyRate, readNode, writeStorage); - check |= readWriteParameter("minCornerDistanceRate", this->minCornerDistanceRate, readNode, writeStorage); - check |= readWriteParameter("minDistanceToBorder", this->minDistanceToBorder, readNode, writeStorage); - check |= readWriteParameter("minMarkerDistanceRate", this->minMarkerDistanceRate, readNode, writeStorage); - check |= readWriteParameter("cornerRefinementMethod", this->cornerRefinementMethod, readNode, writeStorage); - check |= readWriteParameter("cornerRefinementWinSize", this->cornerRefinementWinSize, readNode, writeStorage); - check |= readWriteParameter("cornerRefinementMaxIterations", this->cornerRefinementMaxIterations, + check |= readWriteParameter("minCornerDistanceRate", params.minCornerDistanceRate, readNode, writeStorage); + check |= readWriteParameter("minDistanceToBorder", params.minDistanceToBorder, readNode, writeStorage); + check |= readWriteParameter("minMarkerDistanceRate", params.minMarkerDistanceRate, readNode, writeStorage); + check |= readWriteParameter("cornerRefinementMethod", params.cornerRefinementMethod, readNode, writeStorage); + check |= readWriteParameter("cornerRefinementWinSize", params.cornerRefinementWinSize, readNode, writeStorage); + check |= readWriteParameter("cornerRefinementMaxIterations", params.cornerRefinementMaxIterations, readNode, writeStorage); - check |= readWriteParameter("cornerRefinementMinAccuracy", this->cornerRefinementMinAccuracy, + check |= readWriteParameter("cornerRefinementMinAccuracy", params.cornerRefinementMinAccuracy, readNode, writeStorage); - check |= readWriteParameter("markerBorderBits", this->markerBorderBits, readNode, writeStorage); - check |= readWriteParameter("perspectiveRemovePixelPerCell", this->perspectiveRemovePixelPerCell, + check |= readWriteParameter("markerBorderBits", params.markerBorderBits, readNode, writeStorage); + check |= readWriteParameter("perspectiveRemovePixelPerCell", params.perspectiveRemovePixelPerCell, readNode, writeStorage); - check |= readWriteParameter("perspectiveRemoveIgnoredMarginPerCell", this->perspectiveRemoveIgnoredMarginPerCell, + check |= readWriteParameter("perspectiveRemoveIgnoredMarginPerCell", params.perspectiveRemoveIgnoredMarginPerCell, readNode, writeStorage); - check |= readWriteParameter("maxErroneousBitsInBorderRate", this->maxErroneousBitsInBorderRate, + check |= readWriteParameter("maxErroneousBitsInBorderRate", params.maxErroneousBitsInBorderRate, readNode, writeStorage); - check |= readWriteParameter("minOtsuStdDev", this->minOtsuStdDev, readNode, writeStorage); - check |= readWriteParameter("errorCorrectionRate", this->errorCorrectionRate, readNode, writeStorage); + check |= readWriteParameter("minOtsuStdDev", params.minOtsuStdDev, readNode, writeStorage); + check |= readWriteParameter("errorCorrectionRate", params.errorCorrectionRate, readNode, writeStorage); // new aruco 3 functionality - check |= readWriteParameter("useAruco3Detection", this->useAruco3Detection, readNode, writeStorage); - check |= readWriteParameter("minSideLengthCanonicalImg", this->minSideLengthCanonicalImg, readNode, writeStorage); - check |= readWriteParameter("minMarkerLengthRatioOriginalImg", this->minMarkerLengthRatioOriginalImg, + check |= readWriteParameter("useAruco3Detection", params.useAruco3Detection, readNode, writeStorage); + check |= readWriteParameter("minSideLengthCanonicalImg", params.minSideLengthCanonicalImg, readNode, writeStorage); + check |= readWriteParameter("minMarkerLengthRatioOriginalImg", params.minMarkerLengthRatioOriginalImg, readNode, writeStorage); return check; } @@ -57,14 +58,14 @@ bool DetectorParameters::readDetectorParameters(const FileNode& fn) { if(fn.empty()) return false; Ptr pfn = makePtr(fn); - return readWrite(pfn); + return readWrite(*this, pfn); } bool DetectorParameters::writeDetectorParameters(const Ptr& fs) { if (fs.empty() && !fs->isOpened()) return false; - return readWrite(nullptr, fs); + return readWrite(*this, nullptr, fs); } static inline bool readWrite(RefineParameters& refineParameters, const Ptr& readNode, From e5aa1385c3e043a8f2e31e4b290b5365d54c140d Mon Sep 17 00:00:00 2001 From: Andrey Senyaev Date: Wed, 27 Jul 2022 10:25:35 +0300 Subject: [PATCH 39/45] Workflow on Ubuntu 14.04 for 3.4 branch --- .github/workflows/PR-3.4.yaml | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/.github/workflows/PR-3.4.yaml b/.github/workflows/PR-3.4.yaml index e219af7b809..bf3a80471a0 100644 --- a/.github/workflows/PR-3.4.yaml +++ b/.github/workflows/PR-3.4.yaml @@ -6,13 +6,16 @@ on: - 3.4 jobs: - ARM64: + Ubuntu2004-ARM64: uses: opencv/ci-gha-workflow/.github/workflows/OCV-Contrib-PR-3.4-ARM64.yaml@main - U20: + Ubuntu1404-x64: + uses: opencv/ci-gha-workflow/.github/workflows/OCV-Contrib-PR-3.4-U14.yaml@main + + Ubuntu2004-x64: uses: opencv/ci-gha-workflow/.github/workflows/OCV-Contrib-PR-3.4-U20.yaml@main - W10: + Windows10-x64: uses: opencv/ci-gha-workflow/.github/workflows/OCV-Contrib-PR-3.4-W10.yaml@main macOS-ARM64: From 0fe993d919f575ffae792efb4c21e28f09f71739 Mon Sep 17 00:00:00 2001 From: Andrey Senyaev Date: Wed, 27 Jul 2022 10:39:11 +0300 Subject: [PATCH 40/45] Rename jobs in GHA for 4.x --- .github/workflows/PR-4.x.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/PR-4.x.yaml b/.github/workflows/PR-4.x.yaml index 122e3d0e99b..dedc3767f9f 100644 --- a/.github/workflows/PR-4.x.yaml +++ b/.github/workflows/PR-4.x.yaml @@ -6,13 +6,13 @@ on: - 4.x jobs: - ARM64: + Ubuntu2004-ARM64: uses: opencv/ci-gha-workflow/.github/workflows/OCV-Contrib-PR-4.x-ARM64.yaml@main - U20: + Ubuntu2004-x64: uses: opencv/ci-gha-workflow/.github/workflows/OCV-Contrib-PR-4.x-U20.yaml@main - W10: + Windows10-x64: uses: opencv/ci-gha-workflow/.github/workflows/OCV-Contrib-PR-4.x-W10.yaml@main macOS-ARM64: From 230b6c9a448f65c2f72133677c7eea753a4b298b Mon Sep 17 00:00:00 2001 From: AleksandrPanov Date: Wed, 3 Aug 2022 16:30:04 +0300 Subject: [PATCH 41/45] add set/get/dictionary/objPoints, update tests --- modules/aruco/include/opencv2/aruco/board.hpp | 38 ++++++++- modules/aruco/misc/python/test/test_aruco.py | 8 +- .../tutorial_charuco_create_detect.cpp | 4 +- modules/aruco/src/aruco.cpp | 2 +- modules/aruco/src/aruco_calib_pose.cpp | 8 +- modules/aruco/src/aruco_detector.cpp | 30 +++---- modules/aruco/src/board.cpp | 84 ++++++++++++++----- modules/aruco/src/charuco.cpp | 27 +++--- modules/aruco/test/test_aruco_utils.hpp | 8 +- modules/aruco/test/test_boarddetection.cpp | 75 +++++++++-------- modules/aruco/test/test_charucodetection.cpp | 37 ++++---- 11 files changed, 200 insertions(+), 121 deletions(-) diff --git a/modules/aruco/include/opencv2/aruco/board.hpp b/modules/aruco/include/opencv2/aruco/board.hpp index 51d0248bcab..85c8369e91f 100644 --- a/modules/aruco/include/opencv2/aruco/board.hpp +++ b/modules/aruco/include/opencv2/aruco/board.hpp @@ -44,6 +44,36 @@ class CV_EXPORTS_W Board { */ CV_WRAP void setIds(InputArray ids); + /** @brief change id for ids[index] + * @param index - element index in ids + * @param newId - new value for ids[index], should be less than Dictionary size + */ + CV_WRAP void changeId(int index, int newId); + /** @brief return ids + */ + CV_WRAP const std::vector& getIds() const; + + /** @brief set dictionary + */ + CV_WRAP void setDictionary(const Ptr &dictionary); + + /** @brief return dictionary + */ + CV_WRAP Ptr getDictionary() const; + + /** @brief set objPoints + */ + CV_WRAP void setObjPoints(const std::vector > &objPoints); + + /** @brief get objPoints + */ + CV_WRAP const std::vector >& getObjPoints() const; + + /** @brief get rightBottomBorder + */ + CV_WRAP const Point3f& getRightBottomBorder() const; + +protected: /** @brief array of object points of all the marker corners in the board each marker include its 4 corners in this order: * - objPoints[i][0] - left-top point of i-th marker * - objPoints[i][1] - right-top point of i-th marker @@ -58,13 +88,13 @@ class CV_EXPORTS_W Board { /// the dictionary of markers employed for this board CV_PROP Ptr dictionary; + /// coordinate of the bottom right corner of the board, is set when calling the function create() + CV_PROP Point3f rightBottomBorder; + /** @brief vector of the identifiers of the markers in the board (same size than objPoints) * The identifiers refers to the board dictionary */ CV_PROP_RW std::vector ids; - - /// coordinate of the bottom right corner of the board, is set when calling the function create() - CV_PROP Point3f rightBottomBorder; }; /** @@ -196,7 +226,7 @@ class CV_EXPORTS_W CharucoBoard : public Board { * @param board layout of ChArUco board. * @param charucoIds list of identifiers for each corner in charucoCorners per frame. * @return bool value, 1 (true) if detected corners form a line, 0 (false) if they do not. - solvePnP, calibration functions will fail if the corners are collinear (true). + * solvePnP, calibration functions will fail if the corners are collinear (true). * * The number of ids in charucoIDs should be <= the number of chessboard corners in the board. * This functions checks whether the charuco corners are on a straight line (returns true, if so), or not (false). diff --git a/modules/aruco/misc/python/test/test_aruco.py b/modules/aruco/misc/python/test/test_aruco.py index 349d7b73524..ebbb86622ab 100644 --- a/modules/aruco/misc/python/test/test_aruco.py +++ b/modules/aruco/misc/python/test/test_aruco.py @@ -19,13 +19,13 @@ def test_idsAccessibility(self): aruco_dict = cv.aruco.Dictionary_get(cv.aruco.DICT_5X5_250) board = cv.aruco.CharucoBoard_create(7, 5, 1, 0.5, aruco_dict) - np.testing.assert_array_equal(board.ids.squeeze(), ids) + np.testing.assert_array_equal(board.getIds().squeeze(), ids) - board.ids = rev_ids - np.testing.assert_array_equal(board.ids.squeeze(), rev_ids) + board.setIds(rev_ids) + np.testing.assert_array_equal(board.getIds().squeeze(), rev_ids) board.setIds(ids) - np.testing.assert_array_equal(board.ids.squeeze(), ids) + np.testing.assert_array_equal(board.getIds().squeeze(), ids) with self.assertRaises(cv.error): board.setIds(np.array([0])) diff --git a/modules/aruco/samples/tutorial_charuco_create_detect.cpp b/modules/aruco/samples/tutorial_charuco_create_detect.cpp index 89b538c8a01..1c47003b57a 100644 --- a/modules/aruco/samples/tutorial_charuco_create_detect.cpp +++ b/modules/aruco/samples/tutorial_charuco_create_detect.cpp @@ -50,7 +50,7 @@ static inline void detectCharucoBoardWithCalibrationPose() //! [midcornerdet] std::vector markerIds; std::vector > markerCorners; - cv::aruco::detectMarkers(image, board->dictionary, markerCorners, markerIds, params); + cv::aruco::detectMarkers(image, board->getDictionary(), markerCorners, markerIds, params); //! [midcornerdet] // if at least one marker detected if (markerIds.size() > 0) { @@ -100,7 +100,7 @@ static inline void detectCharucoBoardWithoutCalibration() image.copyTo(imageCopy); std::vector markerIds; std::vector > markerCorners; - cv::aruco::detectMarkers(image, board->dictionary, markerCorners, markerIds, params); + cv::aruco::detectMarkers(image, board->getDictionary(), markerCorners, markerIds, params); //or //cv::aruco::detectMarkers(image, dictionary, markerCorners, markerIds, params); // if at least one marker detected diff --git a/modules/aruco/src/aruco.cpp b/modules/aruco/src/aruco.cpp index 16b23a75569..92abb40ae83 100644 --- a/modules/aruco/src/aruco.cpp +++ b/modules/aruco/src/aruco.cpp @@ -23,7 +23,7 @@ void refineDetectedMarkers(InputArray _image, const Ptr &_board, bool checkAllOrders, OutputArray _recoveredIdxs, const Ptr &_params) { Ptr refineParams = RefineParameters::create(minRepDistance, errorCorrectionRate, checkAllOrders); - ArucoDetector detector(_board->dictionary, _params, refineParams); + ArucoDetector detector(_board->getDictionary(), _params, refineParams); detector.refineDetectedMarkers(_image, _board, _detectedCorners, _detectedIds, _rejectedCorners, _cameraMatrix, _distCoeffs, _recoveredIdxs); } diff --git a/modules/aruco/src/aruco_calib_pose.cpp b/modules/aruco/src/aruco_calib_pose.cpp index 7f81643c43d..1290126d5c7 100644 --- a/modules/aruco/src/aruco_calib_pose.cpp +++ b/modules/aruco/src/aruco_calib_pose.cpp @@ -10,7 +10,7 @@ using namespace std; void getBoardObjectAndImagePoints(const Ptr &board, InputArrayOfArrays detectedCorners, InputArray detectedIds, OutputArray objPoints, OutputArray imgPoints) { - CV_Assert(board->ids.size() == board->objPoints.size()); + CV_Assert(board->getIds().size() == board->getObjPoints().size()); CV_Assert(detectedIds.total() == detectedCorners.total()); size_t nDetectedMarkers = detectedIds.total(); @@ -24,10 +24,10 @@ void getBoardObjectAndImagePoints(const Ptr &board, InputArrayOfArrays de // look for detected markers that belong to the board and get their information for(unsigned int i = 0; i < nDetectedMarkers; i++) { int currentId = detectedIds.getMat().ptr< int >(0)[i]; - for(unsigned int j = 0; j < board->ids.size(); j++) { - if(currentId == board->ids[j]) { + for(unsigned int j = 0; j < board->getIds().size(); j++) { + if(currentId == board->getIds()[j]) { for(int p = 0; p < 4; p++) { - objPnts.push_back(board->objPoints[j][p]); + objPnts.push_back(board->getObjPoints()[j][p]); imgPnts.push_back(detectedCorners.getMat(i).ptr< Point2f >(0)[p]); } } diff --git a/modules/aruco/src/aruco_detector.cpp b/modules/aruco/src/aruco_detector.cpp index 6b6a328bcd2..45f7cc101b8 100644 --- a/modules/aruco/src/aruco_detector.cpp +++ b/modules/aruco/src/aruco_detector.cpp @@ -965,10 +965,10 @@ static void _projectUndetectedMarkers(const Ptr &_board, InputOutputArray // search undetected markers and project them using the previous pose vector > undetectedCorners; vector undetectedIds; - for(unsigned int i = 0; i < _board->ids.size(); i++) { + for(unsigned int i = 0; i < _board->getIds().size(); i++) { int foundIdx = -1; for(unsigned int j = 0; j < _detectedIds.total(); j++) { - if(_board->ids[i] == _detectedIds.getMat().ptr< int >()[j]) { + if(_board->getIds()[i] == _detectedIds.getMat().ptr< int >()[j]) { foundIdx = j; break; } @@ -977,8 +977,8 @@ static void _projectUndetectedMarkers(const Ptr &_board, InputOutputArray // not detected if(foundIdx == -1) { undetectedCorners.push_back(vector()); - undetectedIds.push_back(_board->ids[i]); - projectPoints(_board->objPoints[i], rvec, tvec, _cameraMatrix, _distCoeffs, + undetectedIds.push_back(_board->getIds()[i]); + projectPoints(_board->getObjPoints()[i], rvec, tvec, _cameraMatrix, _distCoeffs, undetectedCorners.back()); } } @@ -996,12 +996,12 @@ static void _projectUndetectedMarkers(const Ptr &_board, InputOutputArray vector >& _undetectedMarkersProjectedCorners, OutputArray _undetectedMarkersIds) { // check board points are in the same plane, if not, global homography cannot be applied - CV_Assert(_board->objPoints.size() > 0); - CV_Assert(_board->objPoints[0].size() > 0); - float boardZ = _board->objPoints[0][0].z; - for(unsigned int i = 0; i < _board->objPoints.size(); i++) { - for(unsigned int j = 0; j < _board->objPoints[i].size(); j++) - CV_Assert(boardZ == _board->objPoints[i][j].z); + CV_Assert(_board->getObjPoints().size() > 0); + CV_Assert(_board->getObjPoints()[0].size() > 0); + float boardZ = _board->getObjPoints()[0][0].z; + for(unsigned int i = 0; i < _board->getObjPoints().size(); i++) { + for(unsigned int j = 0; j < _board->getObjPoints()[i].size(); j++) + CV_Assert(boardZ == _board->getObjPoints()[i][j].z); } vector detectedMarkersObj2DAll; // Object coordinates (without Z) of all the detected @@ -1011,14 +1011,14 @@ static void _projectUndetectedMarkers(const Ptr &_board, InputOutputArray // missing markers in different vectors vector undetectedMarkersIds; // ids of missing markers // find markers included in board, and missing markers from board. Fill the previous vectors - for(unsigned int j = 0; j < _board->ids.size(); j++) { + for(unsigned int j = 0; j < _board->getIds().size(); j++) { bool found = false; for(unsigned int i = 0; i < _detectedIds.total(); i++) { - if(_detectedIds.getMat().ptr< int >()[i] == _board->ids[j]) { + if(_detectedIds.getMat().ptr< int >()[i] == _board->getIds()[j]) { for(int c = 0; c < 4; c++) { imageCornersAll.push_back(_detectedCorners.getMat(i).ptr< Point2f >()[c]); detectedMarkersObj2DAll.push_back( - Point2f(_board->objPoints[j][c].x, _board->objPoints[j][c].y)); + Point2f(_board->getObjPoints()[j][c].x, _board->getObjPoints()[j][c].y)); } found = true; break; @@ -1028,9 +1028,9 @@ static void _projectUndetectedMarkers(const Ptr &_board, InputOutputArray undetectedMarkersObj2D.push_back(vector()); for(int c = 0; c < 4; c++) { undetectedMarkersObj2D.back().push_back( - Point2f(_board->objPoints[j][c].x, _board->objPoints[j][c].y)); + Point2f(_board->getObjPoints()[j][c].x, _board->getObjPoints()[j][c].y)); } - undetectedMarkersIds.push_back(_board->ids[j]); + undetectedMarkersIds.push_back(_board->getIds()[j]); } } if(imageCornersAll.size() == 0) return; diff --git a/modules/aruco/src/board.cpp b/modules/aruco/src/board.cpp index 94f88b01fb9..53352f35f58 100644 --- a/modules/aruco/src/board.cpp +++ b/modules/aruco/src/board.cpp @@ -22,17 +22,17 @@ static void _drawPlanarBoardImpl(Board *_board, Size outSize, OutputArray _img, out.adjustROI(-marginSize, -marginSize, -marginSize, -marginSize); // calculate max and min values in XY plane - CV_Assert(_board->objPoints.size() > 0); + CV_Assert(_board->getObjPoints().size() > 0); float minX, maxX, minY, maxY; - minX = maxX = _board->objPoints[0][0].x; - minY = maxY = _board->objPoints[0][0].y; + minX = maxX = _board->getObjPoints()[0][0].x; + minY = maxY = _board->getObjPoints()[0][0].y; - for(unsigned int i = 0; i < _board->objPoints.size(); i++) { + for(unsigned int i = 0; i < _board->getObjPoints().size(); i++) { for(int j = 0; j < 4; j++) { - minX = min(minX, _board->objPoints[i][j].x); - maxX = max(maxX, _board->objPoints[i][j].x); - minY = min(minY, _board->objPoints[i][j].y); - maxY = max(maxY, _board->objPoints[i][j].y); + minX = min(minX, _board->getObjPoints()[i][j].x); + maxX = max(maxX, _board->getObjPoints()[i][j].x); + minY = min(minY, _board->getObjPoints()[i][j].y); + maxY = max(maxY, _board->getObjPoints()[i][j].y); } } @@ -55,14 +55,14 @@ static void _drawPlanarBoardImpl(Board *_board, Size outSize, OutputArray _img, } // now paint each marker - Dictionary &dictionary = *(_board->dictionary); + Dictionary &dictionary = *(_board->getDictionary()); Mat marker; Point2f outCorners[3]; Point2f inCorners[3]; - for(unsigned int m = 0; m < _board->objPoints.size(); m++) { + for(unsigned int m = 0; m < _board->getObjPoints().size(); m++) { // transform corners to markerZone coordinates for(int j = 0; j < 3; j++) { - Point2f pf = Point2f(_board->objPoints[m][j].x, _board->objPoints[m][j].y); + Point2f pf = Point2f(_board->getObjPoints()[m][j].x, _board->getObjPoints()[m][j].y); // move top left to 0, 0 pf -= Point2f(minX, minY); pf.x = pf.x / sizeX * float(out.cols); @@ -73,7 +73,7 @@ static void _drawPlanarBoardImpl(Board *_board, Size outSize, OutputArray _img, // get marker Size dst_sz(outCorners[2] - outCorners[0]); // assuming CCW order dst_sz.width = dst_sz.height = std::min(dst_sz.width, dst_sz.height); //marker should be square - dictionary.drawMarker(_board->ids[m], dst_sz.width, marker, borderBits); + dictionary.drawMarker(_board->getIds()[m], dst_sz.width, marker, borderBits); if((outCorners[0].y == outCorners[1].y) && (outCorners[1].x == outCorners[2].x)) { // marker is aligned to image axes @@ -150,6 +150,46 @@ void Board::setIds(InputArray ids_) { ids_.copyTo(this->ids); } +Ptr Board::getDictionary() const { + return this->dictionary; +} + +void Board::setDictionary(const Ptr &_dictionary) { + this->dictionary = _dictionary; +} + +const std::vector >& Board::getObjPoints() const { + return this->objPoints; +} + +void Board::setObjPoints(const vector> &_objPoints) { + CV_Assert(!_objPoints.empty()); + this->objPoints = _objPoints; + rightBottomBorder = _objPoints.front().front(); + for (size_t i = 0; i < this->objPoints.size(); i++) { + for (int j = 0; j < 4; j++) { + const Point3f &corner = this->objPoints[i][j]; + rightBottomBorder.x = std::max(rightBottomBorder.x, corner.x); + rightBottomBorder.y = std::max(rightBottomBorder.y, corner.y); + rightBottomBorder.z = std::max(rightBottomBorder.z, corner.z); + } + } +} + +const Point3f& Board::getRightBottomBorder() const { + return this->rightBottomBorder; +} + +const std::vector& Board::getIds() const { + return this->ids; +} + +void Board::changeId(int index, int newId) { + CV_Assert(index >= 0 && index < (int)ids.size()); + CV_Assert(newId >= 0 && newId < dictionary->bytesList.rows); + this->ids[index] = newId; +} + Ptr GridBoard::create(int markersX, int markersY, float markerLength, float markerSeparation, const Ptr &dictionary, int firstMarker) { CV_Assert(markersX > 0 && markersY > 0 && markerLength > 0 && markerSeparation > 0); @@ -158,11 +198,12 @@ Ptr GridBoard::create(int markersX, int markersY, float markerLength, res->gridImpl->sizeY = markersY; res->gridImpl->markerLength = markerLength; res->gridImpl->markerSeparation = markerSeparation; - res->dictionary = dictionary; + res->setDictionary(dictionary); size_t totalMarkers = (size_t) markersX * markersY; res->ids.resize(totalMarkers); - res->objPoints.reserve(totalMarkers); + std::vector > objPoints; + objPoints.reserve(totalMarkers); // fill ids with first identifiers for (unsigned int i = 0; i < totalMarkers; i++) { @@ -178,9 +219,10 @@ Ptr GridBoard::create(int markersX, int markersY, float markerLength, corners[1] = corners[0] + Point3f(markerLength, 0, 0); corners[2] = corners[0] + Point3f(markerLength, markerLength, 0); corners[3] = corners[0] + Point3f(0, markerLength, 0); - res->objPoints.push_back(corners); + objPoints.push_back(corners); } } + res->setObjPoints(objPoints); res->rightBottomBorder = Point3f(markersX * markerLength + markerSeparation * (markersX - 1), markersY * markerLength + markerSeparation * (markersY - 1), 0.f); return res; @@ -281,7 +323,7 @@ static inline void _getNearestMarkerCorners(CharucoBoard &board, float squareLen board.nearestMarkerIdx.resize(board.chessboardCorners.size()); board.nearestMarkerCorners.resize(board.chessboardCorners.size()); - unsigned int nMarkers = (unsigned int)board.ids.size(); + unsigned int nMarkers = (unsigned int)board.getIds().size(); unsigned int nCharucoCorners = (unsigned int)board.chessboardCorners.size(); for(unsigned int i = 0; i < nCharucoCorners; i++) { double minDist = -1; // distance of closest markers @@ -290,7 +332,7 @@ static inline void _getNearestMarkerCorners(CharucoBoard &board, float squareLen // calculate distance from marker center to charuco corner Point3f center = Point3f(0, 0, 0); for(unsigned int k = 0; k < 4; k++) - center += board.objPoints[j][k]; + center += board.getObjPoints()[j][k]; center /= 4.; double sqDistance; Point3f distVector = charucoCorner - center; @@ -313,7 +355,7 @@ static inline void _getNearestMarkerCorners(CharucoBoard &board, float squareLen double minDistCorner = -1; for(unsigned int k = 0; k < 4; k++) { double sqDistance; - Point3f distVector = charucoCorner - board.objPoints[board.nearestMarkerIdx[i][j]][k]; + Point3f distVector = charucoCorner - board.getObjPoints()[board.nearestMarkerIdx[i][j]][k]; sqDistance = distVector.x * distVector.x + distVector.y * distVector.y; if(k == 0 || sqDistance < minDistCorner) { // if this corner is closer to the charuco corner, assing its index @@ -335,7 +377,8 @@ Ptr CharucoBoard::create(int squaresX, int squaresY, float squareL res->charucoImpl->sizeY = squaresY; res->charucoImpl->squareLength = squareLength; res->charucoImpl->markerLength = markerLength; - res->dictionary = dictionary; + res->setDictionary(dictionary); + std::vector > objPoints; float diffSquareMarkerLength = (squareLength - markerLength) / 2; // calculate Board objPoints @@ -350,12 +393,13 @@ Ptr CharucoBoard::create(int squaresX, int squaresY, float squareL corners[1] = corners[0] + Point3f(markerLength, 0, 0); corners[2] = corners[0] + Point3f(markerLength, markerLength, 0); corners[3] = corners[0] + Point3f(0, markerLength, 0); - res->objPoints.push_back(corners); + objPoints.push_back(corners); // first ids in dictionary int nextId = (int)res->ids.size(); res->ids.push_back(nextId); } } + res->setObjPoints(objPoints); // now fill chessboardCorners for(int y = 0; y < squaresY - 1; y++) { diff --git a/modules/aruco/src/charuco.cpp b/modules/aruco/src/charuco.cpp index 0aac51e3b14..a61e0e70567 100644 --- a/modules/aruco/src/charuco.cpp +++ b/modules/aruco/src/charuco.cpp @@ -31,7 +31,7 @@ static int _filterCornersWithoutMinMarkers(const Ptr &_board, int totalMarkers = 0; // nomber of closest marker detected // look for closest markers for(unsigned int m = 0; m < _board->nearestMarkerIdx[currentCharucoId].size(); m++) { - int markerId = _board->ids[_board->nearestMarkerIdx[currentCharucoId][m]]; + int markerId = _board->getIds()[_board->nearestMarkerIdx[currentCharucoId][m]]; bool found = false; for(unsigned int k = 0; k < _allArucoIds.getMat().total(); k++) { if(_allArucoIds.getMat().at< int >(k) == markerId) { @@ -141,7 +141,7 @@ static void _getMaximumSubPixWindowSizes(InputArrayOfArrays markerCorners, Input // calculate the distance to each of the closest corner of each closest marker for(unsigned int j = 0; j < board->nearestMarkerIdx[i].size(); j++) { // find marker - int markerId = board->ids[board->nearestMarkerIdx[i][j]]; + int markerId = board->getIds()[board->nearestMarkerIdx[i][j]]; int markerIdx = -1; for(unsigned int k = 0; k < markerIds.getMat().total(); k++) { if(markerIds.getMat().at< int >(k) == markerId) { @@ -237,16 +237,18 @@ static int _interpolateCornersCharucoLocalHom(InputArrayOfArrays _markerCorners, vector< bool > validTransform(nMarkers, false); + const auto& ids = _board->getIds(); for(unsigned int i = 0; i < nMarkers; i++) { - vector< Point2f > markerObjPoints2D; - int markerId = _markerIds.getMat().at< int >(i); - vector< int >::const_iterator it = find(_board->ids.begin(), _board->ids.end(), markerId); - if(it == _board->ids.end()) continue; - int boardIdx = (int)std::distance::const_iterator>(_board->ids.begin(), it); + vector markerObjPoints2D; + int markerId = _markerIds.getMat().at(i); + + auto it = find(ids.begin(), ids.end(), markerId); + if(it == ids.end()) continue; + auto boardIdx = it - ids.begin(); markerObjPoints2D.resize(4); for(unsigned int j = 0; j < 4; j++) markerObjPoints2D[j] = - Point2f(_board->objPoints[boardIdx][j].x, _board->objPoints[boardIdx][j].y); + Point2f(_board->getObjPoints()[boardIdx][j].x, _board->getObjPoints()[boardIdx][j].y); transformations[i] = getPerspectiveTransform(markerObjPoints2D, _markerCorners.getMat(i)); @@ -265,7 +267,7 @@ static int _interpolateCornersCharucoLocalHom(InputArrayOfArrays _markerCorners, vector< Point2f > interpolatedPositions; for(unsigned int j = 0; j < _board->nearestMarkerIdx[i].size(); j++) { - int markerId = _board->ids[_board->nearestMarkerIdx[i][j]]; + int markerId = _board->getIds()[_board->nearestMarkerIdx[i][j]]; int markerIdx = -1; for(unsigned int k = 0; k < _markerIds.getMat().total(); k++) { if(_markerIds.getMat().at< int >(k) == markerId) { @@ -417,12 +419,11 @@ void detectCharucoDiamond(InputArray _image, InputArrayOfArrays _markerCorners, } } if(candidates.size() < 3) break; // we need at least 3 free markers - // modify charuco layout id to make sure all the ids are different than current id for(int k = 1; k < 4; k++) - _charucoDiamondLayout->ids[k] = currentId + 1 + k; + _charucoDiamondLayout->changeId(k, currentId + 1 + k); // current id is assigned to [0], so it is the marker on the top - _charucoDiamondLayout->ids[0] = currentId; + _charucoDiamondLayout->changeId(0, currentId); // try to find the rest of markers in the diamond vector< int > acceptedIdxs; @@ -496,7 +497,7 @@ void drawCharucoDiamond(const Ptr &dictionary, Vec4i ids, int square // assign the charuco marker ids for(int i = 0; i < 4; i++) - board->ids[i] = ids[i]; + board->changeId(i, ids[i]); Size outSize(3 * squareLength + 2 * marginSize, 3 * squareLength + 2 * marginSize); board->draw(outSize, _img, marginSize, borderBits); diff --git a/modules/aruco/test/test_aruco_utils.hpp b/modules/aruco/test/test_aruco_utils.hpp index 13b69f27d4b..9ae94f789ef 100644 --- a/modules/aruco/test/test_aruco_utils.hpp +++ b/modules/aruco/test/test_aruco_utils.hpp @@ -65,16 +65,16 @@ static inline void projectMarker(Mat& img, Ptr board, int markerIn // canonical image Mat markerImg; const int markerSizePixels = 100; - aruco::drawMarker(board->dictionary, board->ids[markerIndex], markerSizePixels, markerImg, markerBorder); + aruco::drawMarker(board->getDictionary(), board->getIds()[markerIndex], markerSizePixels, markerImg, markerBorder); // projected corners Mat distCoeffs(5, 1, CV_64FC1, Scalar::all(0)); vector corners; // get max coordinate of board - Point3f maxCoord = board->rightBottomBorder; + Point3f maxCoord = board->getRightBottomBorder(); // copy objPoints - vector objPoints = board->objPoints[markerIndex]; + vector objPoints = board->getObjPoints()[markerIndex]; // move the marker to the origin for (size_t i = 0; i < objPoints.size(); i++) objPoints[i] -= (maxCoord / 2.f); @@ -115,7 +115,7 @@ static inline Mat projectBoard(Ptr& board, Mat cameraMatrix, d getSyntheticRT(yaw, pitch, distance, rvec, tvec); Mat img = Mat(imageSize, CV_8UC1, Scalar::all(255)); - for (unsigned int index = 0; index < board->ids.size(); index++) { + for (unsigned int index = 0; index < board->getIds().size(); index++) { projectMarker(img, board.staticCast(), index, cameraMatrix, rvec, tvec, markerBorder); } diff --git a/modules/aruco/test/test_boarddetection.cpp b/modules/aruco/test/test_boarddetection.cpp index 42ab71ce697..148c6d440d0 100644 --- a/modules/aruco/test/test_boarddetection.cpp +++ b/modules/aruco/test/test_boarddetection.cpp @@ -89,41 +89,43 @@ void CV_ArucoBoardPose::run(int) { for(double distance = 0.2; distance <= 0.4; distance += 0.15) { for(int yaw = -55; yaw <= 50; yaw += 25) { for(int pitch = -55; pitch <= 50; pitch += 25) { - for(unsigned int i = 0; i < gridboard->ids.size(); i++) - gridboard->ids[i] = (iter + int(i)) % 250; + vector tmpIds; + for(unsigned int i = 0; i < gridboard->getIds().size(); i++) + tmpIds.push_back((iter + int(i)) % 250); + gridboard->setIds(tmpIds); int markerBorder = iter % 2 + 1; iter++; // create synthetic image Mat img = projectBoard(gridboard, cameraMatrix, deg2rad(yaw), deg2rad(pitch), distance, imgSize, markerBorder); - vector< vector< Point2f > > corners; - vector< int > ids; + vector > corners; + vector ids; detector.params->markerBorderBits = markerBorder; detector.detectMarkers(img, corners, ids); - ASSERT_EQ(ids.size(), gridboard->ids.size()); + ASSERT_EQ(ids.size(), gridboard->getIds().size()); // estimate pose Mat rvec, tvec; aruco::estimatePoseBoard(corners, ids, board, cameraMatrix, distCoeffs, rvec, tvec); // check axes - vector axes = getAxis(cameraMatrix, distCoeffs, rvec, tvec, gridboard->rightBottomBorder.x); - vector topLeft = getMarkerById(gridboard->ids[0], corners, ids); + vector axes = getAxis(cameraMatrix, distCoeffs, rvec, tvec, gridboard->getRightBottomBorder().x); + vector topLeft = getMarkerById(gridboard->getIds()[0], corners, ids); ASSERT_NEAR(topLeft[0].x, axes[0].x, 2.f); ASSERT_NEAR(topLeft[0].y, axes[0].y, 2.f); - vector topRight = getMarkerById(gridboard->ids[2], corners, ids); + vector topRight = getMarkerById(gridboard->getIds()[2], corners, ids); ASSERT_NEAR(topRight[1].x, axes[1].x, 2.f); ASSERT_NEAR(topRight[1].y, axes[1].y, 2.f); - vector bottomLeft = getMarkerById(gridboard->ids[6], corners, ids); + vector bottomLeft = getMarkerById(gridboard->getIds()[6], corners, ids); ASSERT_NEAR(bottomLeft[3].x, axes[2].x, 2.f); ASSERT_NEAR(bottomLeft[3].y, axes[2].y, 2.f); // check estimate result for(unsigned int i = 0; i < ids.size(); i++) { int foundIdx = -1; - for(unsigned int j = 0; j < gridboard->ids.size(); j++) { - if(gridboard->ids[j] == ids[i]) { + for(unsigned int j = 0; j < gridboard->getIds().size(); j++) { + if(gridboard->getIds()[j] == ids[i]) { foundIdx = int(j); break; } @@ -136,7 +138,7 @@ void CV_ArucoBoardPose::run(int) { } vector< Point2f > projectedCorners; - projectPoints(gridboard->objPoints[foundIdx], rvec, tvec, cameraMatrix, distCoeffs, + projectPoints(gridboard->getObjPoints()[foundIdx], rvec, tvec, cameraMatrix, distCoeffs, projectedCorners); for(int c = 0; c < 4; c++) { @@ -194,8 +196,10 @@ void CV_ArucoRefine::run(int) { for(double distance = 0.2; distance <= 0.4; distance += 0.2) { for(int yaw = -60; yaw < 60; yaw += 30) { for(int pitch = -60; pitch <= 60; pitch += 30) { - for(unsigned int i = 0; i < gridboard->ids.size(); i++) - gridboard->ids[i] = (iter + int(i)) % 250; + vector tmpIds; + for(unsigned int i = 0; i < gridboard->getIds().size(); i++) + tmpIds.push_back(iter + int(i) % 250); + gridboard->setIds(tmpIds); int markerBorder = iter % 2 + 1; iter++; @@ -203,8 +207,8 @@ void CV_ArucoRefine::run(int) { Mat img = projectBoard(gridboard, cameraMatrix, deg2rad(yaw), deg2rad(pitch), distance, imgSize, markerBorder); // detect markers - vector< vector< Point2f > > corners, rejected; - vector< int > ids; + vector > corners, rejected; + vector ids; detector.params->markerBorderBits = markerBorder; detector.detectMarkers(img, corners, ids, rejected); @@ -264,23 +268,22 @@ TEST(CV_ArucoBoardPose, CheckNegativeZ) 0., 0., 1 }; cv::Mat cameraMatrix = cv::Mat(3, 3, CV_64F, matrixData); - cv::Ptr boardPtr(new cv::aruco::Board); + cv::Ptr boardPtr = makePtr(); cv::aruco::Board& board = *boardPtr; - board.ids.push_back(0); - board.ids.push_back(1); - - vector pts3d; - pts3d.push_back(cv::Point3f(0.326198f, -0.030621f, 0.303620f)); - pts3d.push_back(cv::Point3f(0.325340f, -0.100594f, 0.301862f)); - pts3d.push_back(cv::Point3f(0.255859f, -0.099530f, 0.293416f)); - pts3d.push_back(cv::Point3f(0.256717f, -0.029557f, 0.295174f)); - board.objPoints.push_back(pts3d); - pts3d.clear(); - pts3d.push_back(cv::Point3f(-0.033144f, -0.034819f, 0.245216f)); - pts3d.push_back(cv::Point3f(-0.035507f, -0.104705f, 0.241987f)); - pts3d.push_back(cv::Point3f(-0.105289f, -0.102120f, 0.237120f)); - pts3d.push_back(cv::Point3f(-0.102926f, -0.032235f, 0.240349f)); - board.objPoints.push_back(pts3d); + + vector pts3d1, pts3d2; + pts3d1.push_back(cv::Point3f(0.326198f, -0.030621f, 0.303620f)); + pts3d1.push_back(cv::Point3f(0.325340f, -0.100594f, 0.301862f)); + pts3d1.push_back(cv::Point3f(0.255859f, -0.099530f, 0.293416f)); + pts3d1.push_back(cv::Point3f(0.256717f, -0.029557f, 0.295174f)); + + pts3d2.push_back(cv::Point3f(-0.033144f, -0.034819f, 0.245216f)); + pts3d2.push_back(cv::Point3f(-0.035507f, -0.104705f, 0.241987f)); + pts3d2.push_back(cv::Point3f(-0.105289f, -0.102120f, 0.237120f)); + pts3d2.push_back(cv::Point3f(-0.102926f, -0.032235f, 0.240349f)); + + board.setObjPoints({pts3d1, pts3d2}); + board.setIds(vector{0, 1}); vector > corners; vector pts2d; @@ -297,12 +300,12 @@ TEST(CV_ArucoBoardPose, CheckNegativeZ) corners.push_back(pts2d); Vec3d rvec, tvec; - int nUsed = cv::aruco::estimatePoseBoard(corners, board.ids, boardPtr, cameraMatrix, Mat(), rvec, tvec); + int nUsed = cv::aruco::estimatePoseBoard(corners, board.getIds(), boardPtr, cameraMatrix, Mat(), rvec, tvec); ASSERT_EQ(nUsed, 2); cv::Matx33d rotm; cv::Point3d out; cv::Rodrigues(rvec, rotm); - out = cv::Point3d(tvec) + rotm*Point3d(board.objPoints[0][0]); + out = cv::Point3d(tvec) + rotm*Point3d(board.getObjPoints()[0][0]); ASSERT_GT(out.z, 0); corners.clear(); pts2d.clear(); @@ -318,11 +321,11 @@ TEST(CV_ArucoBoardPose, CheckNegativeZ) pts2d.push_back(cv::Point2f(586.3f, 188.5f)); corners.push_back(pts2d); - nUsed = cv::aruco::estimatePoseBoard(corners, board.ids, boardPtr, cameraMatrix, Mat(), rvec, tvec, true); + nUsed = cv::aruco::estimatePoseBoard(corners, board.getIds(), boardPtr, cameraMatrix, Mat(), rvec, tvec, true); ASSERT_EQ(nUsed, 2); cv::Rodrigues(rvec, rotm); - out = cv::Point3d(tvec) + rotm*Point3d(board.objPoints[0][0]); + out = cv::Point3d(tvec) + rotm*Point3d(board.getObjPoints()[0][0]); ASSERT_GT(out.z, 0); } diff --git a/modules/aruco/test/test_charucodetection.cpp b/modules/aruco/test/test_charucodetection.cpp index c819b37029c..99b87264df5 100644 --- a/modules/aruco/test/test_charucodetection.cpp +++ b/modules/aruco/test/test_charucodetection.cpp @@ -93,7 +93,7 @@ static Mat projectCharucoBoard(Ptr &board, Mat cameraMatrix // project markers Mat img = Mat(imageSize, CV_8UC1, Scalar::all(255)); - for(unsigned int indexMarker = 0; indexMarker < board->ids.size(); indexMarker++) { + for(unsigned int indexMarker = 0; indexMarker < board->getIds().size(); indexMarker++) { projectMarker(img, board.staticCast(), indexMarker, cameraMatrix, rvec, tvec, markerBorder); } @@ -137,9 +137,9 @@ void CV_CharucoDetection::run(int) { aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_6X6_250), params); Ptr board = aruco::CharucoBoard::create(4, 4, 0.03f, 0.015f, detector.dictionary); - cameraMatrix.at< double >(0, 0) = cameraMatrix.at< double >(1, 1) = 600; - cameraMatrix.at< double >(0, 2) = imgSize.width / 2; - cameraMatrix.at< double >(1, 2) = imgSize.height / 2; + cameraMatrix.at(0, 0) = cameraMatrix.at(1, 1) = 600; + cameraMatrix.at(0, 2) = imgSize.width / 2; + cameraMatrix.at(1, 2) = imgSize.height / 2; Mat distCoeffs(5, 1, CV_64FC1, Scalar::all(0)); @@ -157,8 +157,8 @@ void CV_CharucoDetection::run(int) { distance, imgSize, markerBorder, rvec, tvec); // detect markers - vector< vector< Point2f > > corners; - vector< int > ids; + vector > corners; + vector ids; detector.params->markerBorderBits = markerBorder; detector.detectMarkers(img, corners, ids); @@ -170,8 +170,8 @@ void CV_CharucoDetection::run(int) { } // interpolate charuco corners - vector< Point2f > charucoCorners; - vector< int > charucoIds; + vector charucoCorners; + vector charucoIds; if(iter % 2 == 0) { aruco::interpolateCornersCharuco(corners, ids, img, board, charucoCorners, @@ -188,7 +188,7 @@ void CV_CharucoDetection::run(int) { vector copyChessboardCorners = board->chessboardCorners; // move copyChessboardCorners points for (size_t i = 0; i < copyChessboardCorners.size(); i++) - copyChessboardCorners[i] -= board->rightBottomBorder / 2.f; + copyChessboardCorners[i] -= board->getRightBottomBorder() / 2.f; projectPoints(copyChessboardCorners, rvec, tvec, cameraMatrix, distCoeffs, projectedCharucoCorners); @@ -267,7 +267,7 @@ void CV_CharucoPoseEstimation::run(int) { detector.params->markerBorderBits = markerBorder; detector.detectMarkers(img, corners, ids); - ASSERT_EQ(ids.size(), board->ids.size()); + ASSERT_EQ(ids.size(), board->getIds().size()); // interpolate charuco corners vector< Point2f > charucoCorners; @@ -291,10 +291,10 @@ void CV_CharucoPoseEstimation::run(int) { // check axes const float offset = (board->getSquareLength() - board->getMarkerLength()) / 2.f; vector axes = getAxis(cameraMatrix, distCoeffs, rvec, tvec, board->getSquareLength(), offset); - vector topLeft = getMarkerById(board->ids[0], corners, ids); + vector topLeft = getMarkerById(board->getIds()[0], corners, ids); ASSERT_NEAR(topLeft[0].x, axes[1].x, 3.f); ASSERT_NEAR(topLeft[0].y, axes[1].y, 3.f); - vector bottomLeft = getMarkerById(board->ids[2], corners, ids); + vector bottomLeft = getMarkerById(board->getIds()[2], corners, ids); ASSERT_NEAR(bottomLeft[0].x, axes[2].x, 3.f); ASSERT_NEAR(bottomLeft[0].y, axes[2].y, 3.f); @@ -368,8 +368,10 @@ void CV_CharucoDiamondDetection::run(int) { for(int pitch = -50; pitch <= 50; pitch += 25) { int markerBorder = iter % 2 + 1; + vector idsTmp; for(int i = 0; i < 4; i++) - board->ids[i] = 4 * iter + i; + idsTmp.push_back(4 * iter + i); + board->setIds(idsTmp); iter++; // get synthetic image @@ -388,12 +390,11 @@ void CV_CharucoDiamondDetection::run(int) { ts->set_failed_test_info(cvtest::TS::FAIL_MISMATCH); return; } - // detect diamonds vector< vector< Point2f > > diamondCorners; vector< Vec4i > diamondIds; - aruco::detectCharucoDiamond(img, corners, ids, squareLength / markerLength, - diamondCorners, diamondIds, cameraMatrix, distCoeffs); + aruco::detectCharucoDiamond(img, corners, ids, squareLength / markerLength, diamondCorners, diamondIds, + cameraMatrix, distCoeffs, detector.dictionary); // check results if(diamondIds.size() != 1) { @@ -403,7 +404,7 @@ void CV_CharucoDiamondDetection::run(int) { } for(int i = 0; i < 4; i++) { - if(diamondIds[0][i] != board->ids[i]) { + if(diamondIds[0][i] != board->getIds()[i]) { ts->printf(cvtest::TS::LOG, "Incorrect diamond ids"); ts->set_failed_test_info(cvtest::TS::FAIL_MISMATCH); return; @@ -417,7 +418,7 @@ void CV_CharucoDiamondDetection::run(int) { vector copyChessboardCorners = board->chessboardCorners; // move copyChessboardCorners points for (size_t i = 0; i < copyChessboardCorners.size(); i++) - copyChessboardCorners[i] -= board->rightBottomBorder / 2.f; + copyChessboardCorners[i] -= board->getRightBottomBorder() / 2.f; projectPoints(copyChessboardCorners, rvec, tvec, cameraMatrix, distCoeffs, projectedDiamondCorners); From 839db443779a56559cea1b0d4418934a4aff43b0 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Sun, 7 Aug 2022 15:41:00 +0300 Subject: [PATCH 42/45] Revert "suppress warning on GCC 7 and later" This reverts commit a96a930f1241f3e3a2de477cda22cf30da1f9759. --- modules/aruco/CMakeLists.txt | 4 ---- modules/ccalib/CMakeLists.txt | 4 ---- modules/datasets/CMakeLists.txt | 4 ---- modules/dnn_objdetect/CMakeLists.txt | 5 +---- modules/dpm/CMakeLists.txt | 4 ---- modules/face/CMakeLists.txt | 4 ---- modules/line_descriptor/CMakeLists.txt | 4 ---- modules/optflow/CMakeLists.txt | 4 ---- modules/rgbd/CMakeLists.txt | 4 ---- modules/text/CMakeLists.txt | 4 ---- modules/tracking/CMakeLists.txt | 4 ---- modules/xfeatures2d/CMakeLists.txt | 4 ---- modules/ximgproc/CMakeLists.txt | 4 ---- modules/xobjdetect/CMakeLists.txt | 4 ---- modules/xphoto/CMakeLists.txt | 4 ---- 15 files changed, 1 insertion(+), 60 deletions(-) diff --git a/modules/aruco/CMakeLists.txt b/modules/aruco/CMakeLists.txt index 17f1da666a8..12467e88f4f 100644 --- a/modules/aruco/CMakeLists.txt +++ b/modules/aruco/CMakeLists.txt @@ -1,8 +1,4 @@ set(the_description "ArUco Marker Detection") -if(ARM AND CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) - # suppress warnings from GCC only on 7.1 and later - ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-psabi) -endif() ocv_define_module(aruco opencv_core opencv_imgproc opencv_calib3d WRAP python java) ocv_include_directories(${CMAKE_CURRENT_BINARY_DIR}) diff --git a/modules/ccalib/CMakeLists.txt b/modules/ccalib/CMakeLists.txt index 446e7f433c1..f803322ba9b 100644 --- a/modules/ccalib/CMakeLists.txt +++ b/modules/ccalib/CMakeLists.txt @@ -1,6 +1,2 @@ set(the_description "Custom Calibration Pattern") -if(ARM AND CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) - # suppress warnings from GCC only on 7.1 and later - ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-psabi) -endif() ocv_define_module(ccalib opencv_core opencv_imgproc opencv_calib3d opencv_features2d opencv_highgui WRAP python) diff --git a/modules/datasets/CMakeLists.txt b/modules/datasets/CMakeLists.txt index 97606bb4053..56ca9e3100c 100644 --- a/modules/datasets/CMakeLists.txt +++ b/modules/datasets/CMakeLists.txt @@ -13,7 +13,3 @@ ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4267 # flann, Win64 -Wimplicit-fallthrough # tinyxml2.cpp ) -if(ARM AND CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) - # suppress warnings from GCC only on 7.1 and later - ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-psabi) -endif() diff --git a/modules/dnn_objdetect/CMakeLists.txt b/modules/dnn_objdetect/CMakeLists.txt index c6aa4540b28..895bffbeddb 100644 --- a/modules/dnn_objdetect/CMakeLists.txt +++ b/modules/dnn_objdetect/CMakeLists.txt @@ -1,8 +1,5 @@ set(the_description "Object Detection using CNNs") -if(ARM AND CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) - # suppress warnings from GCC only on 7.1 and later - ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-psabi) -endif() + ocv_define_module(dnn_objdetect opencv_core opencv_imgproc opencv_dnn OPTIONAL opencv_highgui opencv_imgcodecs # samples ) diff --git a/modules/dpm/CMakeLists.txt b/modules/dpm/CMakeLists.txt index 090f90366d6..4d6a302b73b 100644 --- a/modules/dpm/CMakeLists.txt +++ b/modules/dpm/CMakeLists.txt @@ -3,7 +3,3 @@ set(the_description "Object Detection") ocv_define_module(dpm opencv_core opencv_imgproc opencv_objdetect OPTIONAL opencv_highgui WRAP python) ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4512) # disable warning on Win64 -if(ARM AND CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) - # suppress warnings from GCC only on 7.1 and later - ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-psabi) -endif() diff --git a/modules/face/CMakeLists.txt b/modules/face/CMakeLists.txt index f7e5374d6b4..2d5f8075a68 100644 --- a/modules/face/CMakeLists.txt +++ b/modules/face/CMakeLists.txt @@ -1,8 +1,4 @@ set(the_description "Face recognition etc") -if(ARM AND CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) - # suppress warnings from GCC only on 7.1 and later - ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-psabi) -endif() ocv_define_module(face opencv_core opencv_imgproc opencv_objdetect diff --git a/modules/line_descriptor/CMakeLists.txt b/modules/line_descriptor/CMakeLists.txt index fc3b3171183..0c18edf7d4a 100644 --- a/modules/line_descriptor/CMakeLists.txt +++ b/modules/line_descriptor/CMakeLists.txt @@ -1,6 +1,2 @@ set(the_description "Line descriptor") -if(ARM AND CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) - # suppress warnings from GCC only on 7.1 and later - ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-psabi) -endif() ocv_define_module(line_descriptor opencv_imgproc OPTIONAL opencv_features2d WRAP python) diff --git a/modules/optflow/CMakeLists.txt b/modules/optflow/CMakeLists.txt index beee733840f..39a37f143dd 100644 --- a/modules/optflow/CMakeLists.txt +++ b/modules/optflow/CMakeLists.txt @@ -1,6 +1,2 @@ set(the_description "Optical Flow Algorithms") -if(ARM AND CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) - # suppress warnings from GCC only on 7.1 and later - ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-psabi) -endif() ocv_define_module(optflow opencv_core opencv_imgproc opencv_video opencv_ximgproc opencv_imgcodecs opencv_flann WRAP python) diff --git a/modules/rgbd/CMakeLists.txt b/modules/rgbd/CMakeLists.txt index 643be62c754..f2e022fe8a7 100644 --- a/modules/rgbd/CMakeLists.txt +++ b/modules/rgbd/CMakeLists.txt @@ -1,6 +1,2 @@ set(the_description "RGBD algorithms") -if(ARM AND CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) - # suppress warnings from GCC only on 7.1 and later - ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-psabi) -endif() ocv_define_module(rgbd opencv_core opencv_calib3d opencv_imgproc WRAP python) diff --git a/modules/text/CMakeLists.txt b/modules/text/CMakeLists.txt index 36caffb5dd5..82e1e2a7e73 100644 --- a/modules/text/CMakeLists.txt +++ b/modules/text/CMakeLists.txt @@ -3,10 +3,6 @@ set(__extra_deps "") if(DEBUG_opencv_text) list(APPEND __extra_deps PRIVATE_REQUIRED opencv_highgui) endif() -if(ARM AND CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) - # suppress warnings from GCC only on 7.1 and later - ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-psabi) -endif() ocv_define_module(text opencv_ml opencv_imgproc opencv_core opencv_features2d opencv_dnn diff --git a/modules/tracking/CMakeLists.txt b/modules/tracking/CMakeLists.txt index 50aae65a3db..6fa88af29cb 100644 --- a/modules/tracking/CMakeLists.txt +++ b/modules/tracking/CMakeLists.txt @@ -1,7 +1,3 @@ set(the_description "Tracking API") ocv_define_module(tracking opencv_imgproc opencv_core opencv_video opencv_plot OPTIONAL opencv_dnn opencv_datasets WRAP java python) ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-shadow /wd4458) -if(ARM AND CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) - # suppress warnings from GCC only on 7.1 and later - ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-psabi) -endif() diff --git a/modules/xfeatures2d/CMakeLists.txt b/modules/xfeatures2d/CMakeLists.txt index 80b8c64caab..bbc540e278c 100644 --- a/modules/xfeatures2d/CMakeLists.txt +++ b/modules/xfeatures2d/CMakeLists.txt @@ -3,10 +3,6 @@ set(the_description "Contributed/Experimental Algorithms for Salient 2D Features if(HAVE_CUDA) ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef) endif() -if(ARM AND CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) - # suppress warnings from GCC only on 7.1 and later - ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-psabi) -endif() ocv_define_module(xfeatures2d opencv_core opencv_imgproc opencv_features2d opencv_calib3d OPTIONAL opencv_shape opencv_ml opencv_cudaarithm WRAP python java) if(NOT OPENCV_SKIP_FEATURES2D_DOWNLOADING) diff --git a/modules/ximgproc/CMakeLists.txt b/modules/ximgproc/CMakeLists.txt index b6699eeb4ff..f6f88bec66d 100644 --- a/modules/ximgproc/CMakeLists.txt +++ b/modules/ximgproc/CMakeLists.txt @@ -1,6 +1,2 @@ set(the_description "Extended image processing module. It includes edge-aware filters and etc.") -if(ARM AND CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) - # suppress warnings from GCC only on 7.1 and later - ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-psabi) -endif() ocv_define_module(ximgproc opencv_core opencv_imgproc opencv_calib3d opencv_imgcodecs WRAP python java) diff --git a/modules/xobjdetect/CMakeLists.txt b/modules/xobjdetect/CMakeLists.txt index 56b1749bb59..1727a42b75b 100644 --- a/modules/xobjdetect/CMakeLists.txt +++ b/modules/xobjdetect/CMakeLists.txt @@ -1,8 +1,4 @@ set(the_description "Object detection algorithms") -if(ARM AND CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) - # suppress warnings from GCC only on 7.1 and later - ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-psabi) -endif() ocv_define_module(xobjdetect opencv_core opencv_imgproc opencv_objdetect opencv_imgcodecs WRAP python) if (BUILD_opencv_apps AND NOT APPLE_FRAMEWORK) add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/tools ${CMAKE_CURRENT_BINARY_DIR}/tools) diff --git a/modules/xphoto/CMakeLists.txt b/modules/xphoto/CMakeLists.txt index 877384378cc..a05848d389f 100644 --- a/modules/xphoto/CMakeLists.txt +++ b/modules/xphoto/CMakeLists.txt @@ -1,6 +1,2 @@ set(the_description "Addon to basic photo module") -if(ARM AND CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) - # suppress warnings from GCC only on 7.1 and later - ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-psabi) -endif() ocv_define_module(xphoto opencv_core opencv_imgproc opencv_photo WRAP python java) From 843df494c7eec6492d1e7c46b6612c0cdb321e9a Mon Sep 17 00:00:00 2001 From: Kevin Christensen Date: Tue, 9 Aug 2022 20:21:39 -0700 Subject: [PATCH 43/45] fix cuda mem leak and move cuda malloc out of critical path --- modules/cudaimgproc/src/cuda/gftt.cu | 13 +++++-------- modules/cudaimgproc/src/gftt.cpp | 14 +++++++++++--- 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/modules/cudaimgproc/src/cuda/gftt.cu b/modules/cudaimgproc/src/cuda/gftt.cu index 66bd6e0dbc2..b5655611005 100644 --- a/modules/cudaimgproc/src/cuda/gftt.cu +++ b/modules/cudaimgproc/src/cuda/gftt.cu @@ -87,25 +87,22 @@ namespace cv { namespace cuda { namespace device } } - int findCorners_gpu(const cudaTextureObject_t &eigTex, const int &rows, const int &cols, float threshold, PtrStepSzb mask, float2* corners, int max_count, cudaStream_t stream) + int findCorners_gpu(const cudaTextureObject_t &eigTex, const int &rows, const int &cols, float threshold, PtrStepSzb mask, float2* corners, int max_count, int* counterPtr, cudaStream_t stream) { - int* counter_ptr; - cudaSafeCall( cudaMalloc(&counter_ptr, sizeof(int)) ); - - cudaSafeCall( cudaMemsetAsync(counter_ptr, 0, sizeof(int), stream) ); + cudaSafeCall( cudaMemsetAsync(counterPtr, 0, sizeof(int), stream) ); dim3 block(16, 16); dim3 grid(divUp(cols, block.x), divUp(rows, block.y)); if (mask.data) - findCorners<<>>(threshold, SingleMask(mask), corners, max_count, rows, cols, eigTex, counter_ptr); + findCorners<<>>(threshold, SingleMask(mask), corners, max_count, rows, cols, eigTex, counterPtr); else - findCorners<<>>(threshold, WithOutMask(), corners, max_count, rows, cols, eigTex, counter_ptr); + findCorners<<>>(threshold, WithOutMask(), corners, max_count, rows, cols, eigTex, counterPtr); cudaSafeCall( cudaGetLastError() ); int count; - cudaSafeCall( cudaMemcpyAsync(&count, counter_ptr, sizeof(int), cudaMemcpyDeviceToHost, stream) ); + cudaSafeCall( cudaMemcpyAsync(&count, counterPtr, sizeof(int), cudaMemcpyDeviceToHost, stream) ); if (stream) cudaSafeCall(cudaStreamSynchronize(stream)); else diff --git a/modules/cudaimgproc/src/gftt.cpp b/modules/cudaimgproc/src/gftt.cpp index f25158a68d8..544cd8834af 100644 --- a/modules/cudaimgproc/src/gftt.cpp +++ b/modules/cudaimgproc/src/gftt.cpp @@ -55,7 +55,7 @@ namespace cv { namespace cuda { namespace device { namespace gfft { - int findCorners_gpu(const cudaTextureObject_t &eigTex_, const int &rows, const int &cols, float threshold, PtrStepSzb mask, float2* corners, int max_count, cudaStream_t stream); + int findCorners_gpu(const cudaTextureObject_t &eigTex_, const int &rows, const int &cols, float threshold, PtrStepSzb mask, float2* corners, int max_count, int* counterPtr, cudaStream_t stream); void sortCorners_gpu(const cudaTextureObject_t &eigTex_, float2* corners, int count, cudaStream_t stream); } }}} @@ -67,7 +67,7 @@ namespace public: GoodFeaturesToTrackDetector(int srcType, int maxCorners, double qualityLevel, double minDistance, int blockSize, bool useHarrisDetector, double harrisK); - + ~GoodFeaturesToTrackDetector(); void detect(InputArray image, OutputArray corners, InputArray mask, Stream& stream); private: @@ -82,6 +82,8 @@ namespace GpuMat buf_; GpuMat eig_; GpuMat tmpCorners_; + + int* counterPtr_; }; GoodFeaturesToTrackDetector::GoodFeaturesToTrackDetector(int srcType, int maxCorners, double qualityLevel, double minDistance, @@ -93,6 +95,12 @@ namespace cornerCriteria_ = useHarrisDetector ? cuda::createHarrisCorner(srcType, blockSize, 3, harrisK) : cuda::createMinEigenValCorner(srcType, blockSize, 3); + cudaSafeCall(cudaMalloc(&counterPtr_, sizeof(int))); + } + + GoodFeaturesToTrackDetector::~GoodFeaturesToTrackDetector() + { + cudaSafeCall(cudaFree(counterPtr_)); } void GoodFeaturesToTrackDetector::detect(InputArray _image, OutputArray _corners, InputArray _mask, Stream& stream) @@ -125,7 +133,7 @@ namespace PtrStepSzf eig = eig_; cv::cuda::device::createTextureObjectPitch2D(&eigTex_, eig, texDesc); - int total = findCorners_gpu(eigTex_, eig_.rows, eig_.cols, static_cast(maxVal * qualityLevel_), mask, tmpCorners_.ptr(), tmpCorners_.cols, stream_); + int total = findCorners_gpu(eigTex_, eig_.rows, eig_.cols, static_cast(maxVal * qualityLevel_), mask, tmpCorners_.ptr(), tmpCorners_.cols, counterPtr_, stream_); if (total == 0) From 1d1dbe37f49704811af1528b9e51e1277cf61aa8 Mon Sep 17 00:00:00 2001 From: Kevin Christensen Date: Wed, 10 Aug 2022 07:17:37 -0700 Subject: [PATCH 44/45] destroy texture object --- modules/cudaimgproc/src/gftt.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/cudaimgproc/src/gftt.cpp b/modules/cudaimgproc/src/gftt.cpp index 544cd8834af..ae19087aac1 100644 --- a/modules/cudaimgproc/src/gftt.cpp +++ b/modules/cudaimgproc/src/gftt.cpp @@ -135,15 +135,17 @@ namespace int total = findCorners_gpu(eigTex_, eig_.rows, eig_.cols, static_cast(maxVal * qualityLevel_), mask, tmpCorners_.ptr(), tmpCorners_.cols, counterPtr_, stream_); - if (total == 0) { _corners.release(); + cudaSafeCall( cudaDestroyTextureObject(eigTex_) ); return; } sortCorners_gpu(eigTex_, tmpCorners_.ptr(), total, stream_); + cudaSafeCall( cudaDestroyTextureObject(eigTex_) ); + if (minDistance_ < 1) { tmpCorners_.colRange(0, maxCorners_ > 0 ? std::min(maxCorners_, total) : total).copyTo(_corners, stream); From b06dbd2b13097913e41331d3f25dcd5ca53a7b44 Mon Sep 17 00:00:00 2001 From: Biswapriyo Nath Date: Thu, 18 Aug 2022 21:45:28 +0530 Subject: [PATCH 45/45] sfm: Fix unknown uint type error in mingw uint is defined in sys/types.h in Linux for compatibility. But it is not defined in Win32 platform. This fixes the following error: opencv_contrib/modules/sfm/src/libmv_light/libmv/multiview/robust_estimation.h:59:8: error: 'uint' does not name a type; did you mean 'int'? 59 | static uint IterationsRequired(int min_samples, | ^~~~ | int --- .../sfm/src/libmv_light/libmv/multiview/robust_estimation.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/sfm/src/libmv_light/libmv/multiview/robust_estimation.h b/modules/sfm/src/libmv_light/libmv/multiview/robust_estimation.h index a677c5db889..7b20eef3f8a 100644 --- a/modules/sfm/src/libmv_light/libmv/multiview/robust_estimation.h +++ b/modules/sfm/src/libmv_light/libmv/multiview/robust_estimation.h @@ -54,10 +54,10 @@ class MLEScorer { double threshold_; }; -static uint IterationsRequired(int min_samples, +static unsigned int IterationsRequired(int min_samples, double outliers_probability, double inlier_ratio) { - return static_cast( + return static_cast( log(outliers_probability) / log(1.0 - pow(inlier_ratio, min_samples))); }