diff --git a/modules/stereo/samples/sample_quasi_dense.py b/modules/stereo/samples/sample_quasi_dense.py index bd11e1d2898..0f6de2db697 100644 --- a/modules/stereo/samples/sample_quasi_dense.py +++ b/modules/stereo/samples/sample_quasi_dense.py @@ -4,7 +4,7 @@ left_img = cv.imread(cv.samples.findFile("aloeL.jpg"), cv.IMREAD_COLOR) right_img = cv.imread(cv.samples.findFile("aloeR.jpg"), cv.IMREAD_COLOR) -frame_size = leftImg.shape[0:2]; +frame_size = left_img.shape[0:2]; stereo = cv.stereo.QuasiDenseStereo_create(frame_size[::-1]) stereo.process(left_img, right_img) diff --git a/modules/tracking/samples/multitracker.py b/modules/tracking/samples/multitracker.py index 96ed1f1305a..1fe6bf6a61f 100644 --- a/modules/tracking/samples/multitracker.py +++ b/modules/tracking/samples/multitracker.py @@ -10,7 +10,7 @@ cv.namedWindow("tracking") camera = cv.VideoCapture(sys.argv[1]) -tracker = cv.MultiTracker_create() +tracker = cv.legacy.MultiTracker_create() init_once = False ok, image=camera.read() @@ -25,17 +25,17 @@ while camera.isOpened(): ok, image=camera.read() if not ok: - print 'no image to read' + print('no image to read') break if not init_once: - ok = tracker.add(cv.TrackerMIL_create(), image, bbox1) - ok = tracker.add(cv.TrackerMIL_create(), image, bbox2) - ok = tracker.add(cv.TrackerMIL_create(), image, bbox3) + ok = tracker.add(cv.legacy.TrackerMIL_create(), image, bbox1) + ok = tracker.add(cv.legacy.TrackerMIL_create(), image, bbox2) + ok = tracker.add(cv.legacy.TrackerMIL_create(), image, bbox3) init_once = True ok, boxes = tracker.update(image) - print ok, boxes + print(ok, boxes) for newbox in boxes: p1 = (int(newbox[0]), int(newbox[1])) diff --git a/modules/tracking/samples/tracker.py b/modules/tracking/samples/tracker.py index 891571ec69a..8041d409163 100644 --- a/modules/tracking/samples/tracker.py +++ b/modules/tracking/samples/tracker.py @@ -19,7 +19,7 @@ while camera.isOpened(): ok, image=camera.read() if not ok: - print 'no image to read' + print('no image to read') break if not init_once: @@ -27,7 +27,7 @@ init_once = True ok, newbox = tracker.update(image) - print ok, newbox + print(ok, newbox) if ok: p1 = (int(newbox[0]), int(newbox[1])) diff --git a/modules/wechat_qrcode/samples/qrcode.py b/modules/wechat_qrcode/samples/qrcode.py index fd79607efcf..7713734f993 100644 --- a/modules/wechat_qrcode/samples/qrcode.py +++ b/modules/wechat_qrcode/samples/qrcode.py @@ -37,7 +37,7 @@ cap = cv2.VideoCapture(camIdx) while True: res, img = cap.read() - if img.empty(): + if img is None: break res, points = detector.detectAndDecode(img) for t in res: diff --git a/modules/ximgproc/samples/dericheSample.py b/modules/ximgproc/samples/dericheSample.py index 6468b07c86f..917db05a3f6 100644 --- a/modules/ximgproc/samples/dericheSample.py +++ b/modules/ximgproc/samples/dericheSample.py @@ -28,7 +28,6 @@ def DericheFilter(self): self.module = np.sqrt(dx2+dy2) cv.normalize(src=self.module,dst=self.module,norm_type=cv.NORM_MINMAX) def SlideBarDeriche(self): - cv.destroyWindow(self.filename) cv.namedWindow(self.filename) AddSlider("alpha",self.filename,1,400,self.alpha,self.UpdateAlpha) AddSlider("omega",self.filename,1,1000,self.omega,self.UpdateOmega) diff --git a/modules/ximgproc/samples/radon_transform_demo.py b/modules/ximgproc/samples/radon_transform_demo.py index f8ef5663b2a..0ce7386b063 100644 --- a/modules/ximgproc/samples/radon_transform_demo.py +++ b/modules/ximgproc/samples/radon_transform_demo.py @@ -7,7 +7,7 @@ if __name__ == "__main__": src = cv.imread("peilin_plane.png", cv.IMREAD_GRAYSCALE) - radon = cv.ximgproc.RadonTransform(src) + radon = cv.ximgproc.RadonTransform(src).astype(np.float32) cv.imshow("src image", src) cv.imshow("Radon transform", radon) cv.waitKey() diff --git a/samples/python2/seeds.py b/samples/python2/seeds.py index 5507226d575..002f61cd29c 100755 --- a/samples/python2/seeds.py +++ b/samples/python2/seeds.py @@ -12,15 +12,12 @@ import numpy as np import cv2 as cv -# relative module -import video - # built-in module import sys if __name__ == '__main__': - print __doc__ + print(__doc__) try: fn = sys.argv[1] @@ -41,7 +38,7 @@ def nothing(*arg): num_levels = 4 num_histogram_bins = 5 - cap = video.create_capture(fn) + cap = cv.VideoCapture(fn) while True: flag, img = cap.read() converted_img = cv.cvtColor(img, cv.COLOR_BGR2HSV)