From 06c8b48d1e71a7113a68cfa69111ebbfb15f3d93 Mon Sep 17 00:00:00 2001 From: Alex Lin Date: Mon, 16 Aug 2021 19:20:40 +0100 Subject: [PATCH 01/19] Replace in test_datasets* --- test/test_datasets_download.py | 19 +++++------- test/test_datasets_samplers.py | 35 +++++++++++----------- test/test_datasets_utils.py | 49 ++++++++++++++----------------- test/test_datasets_video_utils.py | 33 ++++++++++----------- 4 files changed, 63 insertions(+), 73 deletions(-) diff --git a/test/test_datasets_download.py b/test/test_datasets_download.py index 8c2d575e01d..0cf86918575 100644 --- a/test/test_datasets_download.py +++ b/test/test_datasets_download.py @@ -22,8 +22,6 @@ USER_AGENT, ) -from common_utils import get_tmp_dir - def limit_requests_per_time(min_secs_between_requests=2.0): last_requests = {} @@ -166,16 +164,15 @@ def assert_url_is_accessible(url, timeout=5.0): urlopen(request, timeout=timeout) -def assert_file_downloads_correctly(url, md5, timeout=5.0): - with get_tmp_dir() as root: - file = path.join(root, path.basename(url)) - with assert_server_response_ok(): - with open(file, "wb") as fh: - request = Request(url, headers={"User-Agent": USER_AGENT}) - response = urlopen(request, timeout=timeout) - fh.write(response.read()) +def assert_file_downloads_correctly(url, md5, tmpdir, timeout=5.0): + file = path.join(tmpdir, path.basename(url)) + with assert_server_response_ok(): + with open(file, "wb") as fh: + request = Request(url, headers={"User-Agent": USER_AGENT}) + response = urlopen(request, timeout=timeout) + fh.write(response.read()) - assert check_integrity(file, md5=md5), "The MD5 checksums mismatch" + assert check_integrity(file, md5=md5), "The MD5 checksums mismatch" class DownloadConfig: diff --git a/test/test_datasets_samplers.py b/test/test_datasets_samplers.py index 7754c1a98e8..ddb7d87bf18 100644 --- a/test/test_datasets_samplers.py +++ b/test/test_datasets_samplers.py @@ -13,28 +13,27 @@ from torchvision.datasets.video_utils import VideoClips, unfold from torchvision import get_video_backend -from common_utils import get_tmp_dir, assert_equal +from common_utils import assert_equal @contextlib.contextmanager -def get_list_of_videos(num_videos=5, sizes=None, fps=None): - with get_tmp_dir() as tmp_dir: - names = [] - for i in range(num_videos): - if sizes is None: - size = 5 * (i + 1) - else: - size = sizes[i] - if fps is None: - f = 5 - else: - f = fps[i] - data = torch.randint(0, 256, (size, 300, 400, 3), dtype=torch.uint8) - name = os.path.join(tmp_dir, "{}.mp4".format(i)) - names.append(name) - io.write_video(name, data, fps=f) +def get_list_of_videos(tmpdir, num_videos=5, sizes=None, fps=None): + names = [] + for i in range(num_videos): + if sizes is None: + size = 5 * (i + 1) + else: + size = sizes[i] + if fps is None: + f = 5 + else: + f = fps[i] + data = torch.randint(0, 256, (size, 300, 400, 3), dtype=torch.uint8) + name = os.path.join(tmpdir, "{}.mp4".format(i)) + names.append(name) + io.write_video(name, data, fps=f) - yield names + yield names @pytest.mark.skipif(not io.video._av_available(), reason="this test requires av") diff --git a/test/test_datasets_utils.py b/test/test_datasets_utils.py index 0c2dc5260de..3d147608a59 100644 --- a/test/test_datasets_utils.py +++ b/test/test_datasets_utils.py @@ -12,7 +12,6 @@ import lzma import contextlib -from common_utils import get_tmp_dir from torchvision.datasets.utils import _COMPRESSED_FILE_OPENERS @@ -113,7 +112,7 @@ def test_detect_file_type_incompatible(self, file): utils._detect_file_type(file) @pytest.mark.parametrize('extension', [".bz2", ".gz", ".xz"]) - def test_decompress(self, extension): + def test_decompress(self, extension, tmpdir): def create_compressed(root, content="this is the content"): file = os.path.join(root, "file") compressed = f"{file}{extension}" @@ -124,21 +123,20 @@ def create_compressed(root, content="this is the content"): return compressed, file, content - with get_tmp_dir() as temp_dir: - compressed, file, content = create_compressed(temp_dir) + compressed, file, content = create_compressed(tmpdir) - utils._decompress(compressed) + utils._decompress(compressed) - assert os.path.exists(file) + assert os.path.exists(file) - with open(file, "r") as fh: - assert fh.read() == content + with open(file, "r") as fh: + assert fh.read() == content def test_decompress_no_compression(self): with pytest.raises(RuntimeError): utils._decompress("foo.tar") - def test_decompress_remove_finished(self): + def test_decompress_remove_finished(self, tmpdir): def create_compressed(root, content="this is the content"): file = os.path.join(root, "file") compressed = f"{file}.gz" @@ -148,12 +146,11 @@ def create_compressed(root, content="this is the content"): return compressed, file, content - with get_tmp_dir() as temp_dir: - compressed, file, content = create_compressed(temp_dir) + compressed, file, content = create_compressed(tmpdir) - utils.extract_archive(compressed, temp_dir, remove_finished=True) + utils.extract_archive(compressed, tmpdir, remove_finished=True) - assert not os.path.exists(compressed) + assert not os.path.exists(compressed) @pytest.mark.parametrize('extension', [".gz", ".xz"]) @pytest.mark.parametrize('remove_finished', [True, False]) @@ -166,7 +163,7 @@ def test_extract_archive_defer_to_decompress(self, extension, remove_finished, m mocked.assert_called_once_with(file, filename, remove_finished=remove_finished) - def test_extract_zip(self): + def test_extract_zip(self, tmpdir): def create_archive(root, content="this is the content"): file = os.path.join(root, "dst.txt") archive = os.path.join(root, "archive.zip") @@ -176,19 +173,18 @@ def create_archive(root, content="this is the content"): return archive, file, content - with get_tmp_dir() as temp_dir: - archive, file, content = create_archive(temp_dir) + archive, file, content = create_archive(tmpdir) - utils.extract_archive(archive, temp_dir) + utils.extract_archive(archive, tmpdir) - assert os.path.exists(file) + assert os.path.exists(file) - with open(file, "r") as fh: - assert fh.read() == content + with open(file, "r") as fh: + assert fh.read() == content @pytest.mark.parametrize('extension, mode', [ ('.tar', 'w'), ('.tar.gz', 'w:gz'), ('.tgz', 'w:gz'), ('.tar.xz', 'w:xz')]) - def test_extract_tar(self, extension, mode): + def test_extract_tar(self, extension, mode, tmpdir): def create_archive(root, extension, mode, content="this is the content"): src = os.path.join(root, "src.txt") dst = os.path.join(root, "dst.txt") @@ -202,15 +198,14 @@ def create_archive(root, extension, mode, content="this is the content"): return archive, dst, content - with get_tmp_dir() as temp_dir: - archive, file, content = create_archive(temp_dir, extension, mode) + archive, file, content = create_archive(tmpdir, extension, mode) - utils.extract_archive(archive, temp_dir) + utils.extract_archive(archive, tmpdir) - assert os.path.exists(file) + assert os.path.exists(file) - with open(file, "r") as fh: - assert fh.read() == content + with open(file, "r") as fh: + assert fh.read() == content def test_verify_str_arg(self): assert "a" == utils.verify_str_arg("a", "arg", ("a",)) diff --git a/test/test_datasets_video_utils.py b/test/test_datasets_video_utils.py index 00db0aad127..6453f38d065 100644 --- a/test/test_datasets_video_utils.py +++ b/test/test_datasets_video_utils.py @@ -6,26 +6,25 @@ from torchvision import io from torchvision.datasets.video_utils import VideoClips, unfold -from common_utils import get_tmp_dir, assert_equal +from common_utils import assert_equal @contextlib.contextmanager -def get_list_of_videos(num_videos=5, sizes=None, fps=None): - with get_tmp_dir() as tmp_dir: - names = [] - for i in range(num_videos): - if sizes is None: - size = 5 * (i + 1) - else: - size = sizes[i] - if fps is None: - f = 5 - else: - f = fps[i] - data = torch.randint(0, 256, (size, 300, 400, 3), dtype=torch.uint8) - name = os.path.join(tmp_dir, "{}.mp4".format(i)) - names.append(name) - io.write_video(name, data, fps=f) +def get_list_of_videos(tmpdir, num_videos=5, sizes=None, fps=None): + names = [] + for i in range(num_videos): + if sizes is None: + size = 5 * (i + 1) + else: + size = sizes[i] + if fps is None: + f = 5 + else: + f = fps[i] + data = torch.randint(0, 256, (size, 300, 400, 3), dtype=torch.uint8) + name = os.path.join(tmpdir, "{}.mp4".format(i)) + names.append(name) + io.write_video(name, data, fps=f) yield names From 4c79e6f7e404667a9bec47e761c502096a3ffd8d Mon Sep 17 00:00:00 2001 From: Alex Lin Date: Mon, 16 Aug 2021 19:21:43 +0100 Subject: [PATCH 02/19] Replace in test_image.py --- test/test_image.py | 191 +++++++++++++++++++++------------------------ 1 file changed, 91 insertions(+), 100 deletions(-) diff --git a/test/test_image.py b/test/test_image.py index 7c6764dce64..5630d5d8226 100644 --- a/test/test_image.py +++ b/test/test_image.py @@ -9,7 +9,7 @@ import torch from PIL import Image, __version__ as PILLOW_VERSION import torchvision.transforms.functional as F -from common_utils import get_tmp_dir, needs_cuda, assert_equal +from common_utils import needs_cuda, assert_equal from torchvision.io.image import ( decode_png, decode_jpeg, encode_jpeg, write_jpeg, decode_image, read_file, @@ -197,74 +197,69 @@ def test_encode_png_errors(): pytest.param(png_path, id=_get_safe_image_name(png_path)) for png_path in get_images(IMAGE_DIR, ".png") ]) -def test_write_png(img_path): - with get_tmp_dir() as d: - pil_image = Image.open(img_path) - img_pil = torch.from_numpy(np.array(pil_image)) - img_pil = img_pil.permute(2, 0, 1) +def test_write_png(img_path, tmpdir): + pil_image = Image.open(img_path) + img_pil = torch.from_numpy(np.array(pil_image)) + img_pil = img_pil.permute(2, 0, 1) - filename, _ = os.path.splitext(os.path.basename(img_path)) - torch_png = os.path.join(d, '{0}_torch.png'.format(filename)) - write_png(img_pil, torch_png, compression_level=6) - saved_image = torch.from_numpy(np.array(Image.open(torch_png))) - saved_image = saved_image.permute(2, 0, 1) + filename, _ = os.path.splitext(os.path.basename(img_path)) + torch_png = os.path.join(tmpdir, '{0}_torch.png'.format(filename)) + write_png(img_pil, torch_png, compression_level=6) + saved_image = torch.from_numpy(np.array(Image.open(torch_png))) + saved_image = saved_image.permute(2, 0, 1) - assert_equal(img_pil, saved_image) + assert_equal(img_pil, saved_image) -def test_read_file(): - with get_tmp_dir() as d: - fname, content = 'test1.bin', b'TorchVision\211\n' - fpath = os.path.join(d, fname) - with open(fpath, 'wb') as f: - f.write(content) +def test_read_file(tmpdir): + fname, content = 'test1.bin', b'TorchVision\211\n' + fpath = os.path.join(tmpdir, fname) + with open(fpath, 'wb') as f: + f.write(content) - data = read_file(fpath) - expected = torch.tensor(list(content), dtype=torch.uint8) - os.unlink(fpath) - assert_equal(data, expected) + data = read_file(fpath) + expected = torch.tensor(list(content), dtype=torch.uint8) + os.unlink(fpath) + assert_equal(data, expected) with pytest.raises(RuntimeError, match="No such file or directory: 'tst'"): read_file('tst') -def test_read_file_non_ascii(): - with get_tmp_dir() as d: - fname, content = '日本語(Japanese).bin', b'TorchVision\211\n' - fpath = os.path.join(d, fname) - with open(fpath, 'wb') as f: - f.write(content) +def test_read_file_non_ascii(tmpdir): + fname, content = '日本語(Japanese).bin', b'TorchVision\211\n' + fpath = os.path.join(tmpdir, fname) + with open(fpath, 'wb') as f: + f.write(content) - data = read_file(fpath) - expected = torch.tensor(list(content), dtype=torch.uint8) - os.unlink(fpath) - assert_equal(data, expected) + data = read_file(fpath) + expected = torch.tensor(list(content), dtype=torch.uint8) + os.unlink(fpath) + assert_equal(data, expected) -def test_write_file(): - with get_tmp_dir() as d: - fname, content = 'test1.bin', b'TorchVision\211\n' - fpath = os.path.join(d, fname) - content_tensor = torch.tensor(list(content), dtype=torch.uint8) - write_file(fpath, content_tensor) +def test_write_file(tmpdir): + fname, content = 'test1.bin', b'TorchVision\211\n' + fpath = os.path.join(tmpdir, fname) + content_tensor = torch.tensor(list(content), dtype=torch.uint8) + write_file(fpath, content_tensor) - with open(fpath, 'rb') as f: - saved_content = f.read() - os.unlink(fpath) - assert content == saved_content + with open(fpath, 'rb') as f: + saved_content = f.read() + os.unlink(fpath) + assert content == saved_content -def test_write_file_non_ascii(): - with get_tmp_dir() as d: - fname, content = '日本語(Japanese).bin', b'TorchVision\211\n' - fpath = os.path.join(d, fname) - content_tensor = torch.tensor(list(content), dtype=torch.uint8) - write_file(fpath, content_tensor) +def test_write_file_non_ascii(tmpdir): + fname, content = '日本語(Japanese).bin', b'TorchVision\211\n' + fpath = os.path.join(tmpdir, fname) + content_tensor = torch.tensor(list(content), dtype=torch.uint8) + write_file(fpath, content_tensor) - with open(fpath, 'rb') as f: - saved_content = f.read() - os.unlink(fpath) - assert content == saved_content + with open(fpath, 'rb') as f: + saved_content = f.read() + os.unlink(fpath) + assert content == saved_content @pytest.mark.parametrize('shape', [ @@ -272,16 +267,15 @@ def test_write_file_non_ascii(): (60, 60), (105, 105), ]) -def test_read_1_bit_png(shape): +def test_read_1_bit_png(shape, tmpdir): np_rng = np.random.RandomState(0) - with get_tmp_dir() as root: - image_path = os.path.join(root, f'test_{shape}.png') - pixels = np_rng.rand(*shape) > 0.5 - img = Image.fromarray(pixels) - img.save(image_path) - img1 = read_image(image_path) - img2 = normalize_dimensions(torch.as_tensor(pixels * 255, dtype=torch.uint8)) - assert_equal(img1, img2) + image_path = os.path.join(tmpdir, f'test_{shape}.png') + pixels = np_rng.rand(*shape) > 0.5 + img = Image.fromarray(pixels) + img.save(image_path) + img1 = read_image(image_path) + img2 = normalize_dimensions(torch.as_tensor(pixels * 255, dtype=torch.uint8)) + assert_equal(img1, img2) @pytest.mark.parametrize('shape', [ @@ -293,16 +287,15 @@ def test_read_1_bit_png(shape): ImageReadMode.UNCHANGED, ImageReadMode.GRAY, ]) -def test_read_1_bit_png_consistency(shape, mode): +def test_read_1_bit_png_consistency(shape, mode, tmpdir): np_rng = np.random.RandomState(0) - with get_tmp_dir() as root: - image_path = os.path.join(root, f'test_{shape}.png') - pixels = np_rng.rand(*shape) > 0.5 - img = Image.fromarray(pixels) - img.save(image_path) - img1 = read_image(image_path, mode) - img2 = read_image(image_path, mode) - assert_equal(img1, img2) + image_path = os.path.join(tmpdir, f'test_{shape}.png') + pixels = np_rng.rand(*shape) > 0.5 + img = Image.fromarray(pixels) + img.save(image_path) + img1 = read_image(image_path, mode) + img2 = read_image(image_path, mode) + assert_equal(img1, img2) def test_read_interlaced_png(): @@ -427,28 +420,27 @@ def test_encode_jpeg_reference(img_path): pytest.param(jpeg_path, id=_get_safe_image_name(jpeg_path)) for jpeg_path in get_images(ENCODE_JPEG, ".jpg") ]) -def test_write_jpeg_reference(img_path): +def test_write_jpeg_reference(img_path, tmpdir): # FIXME: Remove this eventually, see test_encode_jpeg_reference - with get_tmp_dir() as d: - data = read_file(img_path) - img = decode_jpeg(data) + data = read_file(img_path) + img = decode_jpeg(data) - basedir = os.path.dirname(img_path) - filename, _ = os.path.splitext(os.path.basename(img_path)) - torch_jpeg = os.path.join( - d, '{0}_torch.jpg'.format(filename)) - pil_jpeg = os.path.join( - basedir, 'jpeg_write', '{0}_pil.jpg'.format(filename)) + basedir = os.path.dirname(img_path) + filename, _ = os.path.splitext(os.path.basename(img_path)) + torch_jpeg = os.path.join( + tmpdir, '{0}_torch.jpg'.format(filename)) + pil_jpeg = os.path.join( + basedir, 'jpeg_write', '{0}_pil.jpg'.format(filename)) - write_jpeg(img, torch_jpeg, quality=75) + write_jpeg(img, torch_jpeg, quality=75) - with open(torch_jpeg, 'rb') as f: - torch_bytes = f.read() + with open(torch_jpeg, 'rb') as f: + torch_bytes = f.read() - with open(pil_jpeg, 'rb') as f: - pil_bytes = f.read() + with open(pil_jpeg, 'rb') as f: + pil_bytes = f.read() - assert_equal(torch_bytes, pil_bytes) + assert_equal(torch_bytes, pil_bytes) @pytest.mark.skipif(IS_WINDOWS, reason=( @@ -481,25 +473,24 @@ def test_encode_jpeg(img_path): pytest.param(jpeg_path, id=_get_safe_image_name(jpeg_path)) for jpeg_path in get_images(ENCODE_JPEG, ".jpg") ]) -def test_write_jpeg(img_path): - with get_tmp_dir() as d: - d = Path(d) - img = read_image(img_path) - pil_img = F.to_pil_image(img) +def test_write_jpeg(img_path, tmpdir): + tmpdir = Path(tmpdir) + img = read_image(img_path) + pil_img = F.to_pil_image(img) - torch_jpeg = str(d / 'torch.jpg') - pil_jpeg = str(d / 'pil.jpg') + torch_jpeg = str(tmpdir / 'torch.jpg') + pil_jpeg = str(tmpdir / 'pil.jpg') - write_jpeg(img, torch_jpeg, quality=75) - pil_img.save(pil_jpeg, quality=75) + write_jpeg(img, torch_jpeg, quality=75) + pil_img.save(pil_jpeg, quality=75) - with open(torch_jpeg, 'rb') as f: - torch_bytes = f.read() + with open(torch_jpeg, 'rb') as f: + torch_bytes = f.read() - with open(pil_jpeg, 'rb') as f: - pil_bytes = f.read() + with open(pil_jpeg, 'rb') as f: + pil_bytes = f.read() - assert_equal(torch_bytes, pil_bytes) + assert_equal(torch_bytes, pil_bytes) if __name__ == "__main__": From d5d3ea43cc5c14e9c1850471835de769f6879ad6 Mon Sep 17 00:00:00 2001 From: Alex Lin Date: Mon, 16 Aug 2021 19:23:13 +0100 Subject: [PATCH 03/19] Replace in test_transforms_tensor.py --- test/test_transforms_tensor.py | 60 ++++++++++++++-------------------- 1 file changed, 24 insertions(+), 36 deletions(-) diff --git a/test/test_transforms_tensor.py b/test/test_transforms_tensor.py index 0bf5d77716f..5081626fec4 100644 --- a/test/test_transforms_tensor.py +++ b/test/test_transforms_tensor.py @@ -230,7 +230,7 @@ def test_crop_pad(size, padding_config, device): @pytest.mark.parametrize('device', cpu_and_gpu()) -def test_center_crop(device): +def test_center_crop(device, tmpdir): fn_kwargs = {"output_size": (4, 5)} meth_kwargs = {"size": (4, 5), } _test_op( @@ -259,8 +259,7 @@ def test_center_crop(device): scripted_fn = torch.jit.script(f) scripted_fn(tensor) - with get_tmp_dir() as tmp_dir: - scripted_fn.save(os.path.join(tmp_dir, "t_center_crop.pt")) + scripted_fn.save(os.path.join(tmpdir, "t_center_crop.pt")) @pytest.mark.parametrize('device', cpu_and_gpu()) @@ -309,11 +308,10 @@ def test_x_crop(fn, method, out_length, size, device): @pytest.mark.parametrize('method', ["FiveCrop", "TenCrop"]) -def test_x_crop_save(method): +def test_x_crop_save(method, tmpdir): fn = getattr(T, method)(size=[5, ]) scripted_fn = torch.jit.script(fn) - with get_tmp_dir() as tmp_dir: - scripted_fn.save(os.path.join(tmp_dir, "t_op_list_{}.pt".format(method))) + scripted_fn.save(os.path.join(tmpdir, "t_op_list_{}.pt".format(method))) class TestResize: @@ -349,11 +347,10 @@ def test_resize_scripted(self, dt, size, max_size, interpolation, device): _test_transform_vs_scripted(transform, s_transform, tensor) _test_transform_vs_scripted_on_batch(transform, s_transform, batch_tensors) - def test_resize_save(self): + def test_resize_save(self, tmpdir): transform = T.Resize(size=[32, ]) s_transform = torch.jit.script(transform) - with get_tmp_dir() as tmp_dir: - s_transform.save(os.path.join(tmp_dir, "t_resize.pt")) + s_transform.save(os.path.join(tmpdir, "t_resize.pt")) @pytest.mark.parametrize('device', cpu_and_gpu()) @pytest.mark.parametrize('scale', [(0.7, 1.2), [0.7, 1.2]]) @@ -368,11 +365,10 @@ def test_resized_crop(self, scale, ratio, size, interpolation, device): _test_transform_vs_scripted(transform, s_transform, tensor) _test_transform_vs_scripted_on_batch(transform, s_transform, batch_tensors) - def test_resized_crop_save(self): + def test_resized_crop_save(self, tmpdir): transform = T.RandomResizedCrop(size=[32, ]) s_transform = torch.jit.script(transform) - with get_tmp_dir() as tmp_dir: - s_transform.save(os.path.join(tmp_dir, "t_resized_crop.pt")) + s_transform.save(os.path.join(tmpdir, "t_resized_crop.pt")) def _test_random_affine_helper(device, **kwargs): @@ -386,11 +382,10 @@ def _test_random_affine_helper(device, **kwargs): @pytest.mark.parametrize('device', cpu_and_gpu()) -def test_random_affine(device): +def test_random_affine(device, tmpdir): transform = T.RandomAffine(degrees=45.0) s_transform = torch.jit.script(transform) - with get_tmp_dir() as tmp_dir: - s_transform.save(os.path.join(tmp_dir, "t_random_affine.pt")) + s_transform.save(os.path.join(tmpdir, "t_random_affine.pt")) @pytest.mark.parametrize('device', cpu_and_gpu()) @@ -447,11 +442,10 @@ def test_random_rotate(device, center, expand, degrees, interpolation, fill): _test_transform_vs_scripted_on_batch(transform, s_transform, batch_tensors) -def test_random_rotate_save(): +def test_random_rotate_save(tmpdir): transform = T.RandomRotation(degrees=45.0) s_transform = torch.jit.script(transform) - with get_tmp_dir() as tmp_dir: - s_transform.save(os.path.join(tmp_dir, "t_random_rotate.pt")) + s_transform.save(os.path.join(tmpdir, "t_random_rotate.pt")) @pytest.mark.parametrize('device', cpu_and_gpu()) @@ -473,11 +467,10 @@ def test_random_perspective(device, distortion_scale, interpolation, fill): _test_transform_vs_scripted_on_batch(transform, s_transform, batch_tensors) -def test_random_perspective_save(): +def test_random_perspective_save(tmpdir): transform = T.RandomPerspective() s_transform = torch.jit.script(transform) - with get_tmp_dir() as tmp_dir: - s_transform.save(os.path.join(tmp_dir, "t_perspective.pt")) + s_transform.save(os.path.join(tmpdir, "t_perspective.pt")) @pytest.mark.parametrize('device', cpu_and_gpu()) @@ -519,11 +512,10 @@ def test_convert_image_dtype(device, in_dtype, out_dtype): _test_transform_vs_scripted_on_batch(fn, scripted_fn, in_batch_tensors) -def test_convert_image_dtype_save(): +def test_convert_image_dtype_save(tmpdir): fn = T.ConvertImageDtype(dtype=torch.uint8) scripted_fn = torch.jit.script(fn) - with get_tmp_dir() as tmp_dir: - scripted_fn.save(os.path.join(tmp_dir, "t_convert_dtype.pt")) + scripted_fn.save(os.path.join(tmpdir, "t_convert_dtype.pt")) @pytest.mark.parametrize('device', cpu_and_gpu()) @@ -541,11 +533,10 @@ def test_autoaugment(device, policy, fill): _test_transform_vs_scripted_on_batch(transform, s_transform, batch_tensors) -def test_autoaugment_save(): +def test_autoaugment_save(tmpdir): transform = T.AutoAugment() s_transform = torch.jit.script(transform) - with get_tmp_dir() as tmp_dir: - s_transform.save(os.path.join(tmp_dir, "t_autoaugment.pt")) + s_transform.save(os.path.join(tmpdir, "t_autoaugment.pt")) @pytest.mark.parametrize('device', cpu_and_gpu()) @@ -567,11 +558,10 @@ def test_random_erasing(device, config): _test_transform_vs_scripted_on_batch(fn, scripted_fn, batch_tensors) -def test_random_erasing_save(): +def test_random_erasing_save(tmpdir): fn = T.RandomErasing(value=0.2) scripted_fn = torch.jit.script(fn) - with get_tmp_dir() as tmp_dir: - scripted_fn.save(os.path.join(tmp_dir, "t_random_erasing.pt")) + scripted_fn.save(os.path.join(tmpdir, "t_random_erasing.pt")) def test_random_erasing_with_invalid_data(): @@ -583,7 +573,7 @@ def test_random_erasing_with_invalid_data(): @pytest.mark.parametrize('device', cpu_and_gpu()) -def test_normalize(device): +def test_normalize(device, tmpdir): fn = T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) tensor, _ = _create_data(26, 34, device=device) @@ -598,12 +588,11 @@ def test_normalize(device): _test_transform_vs_scripted(fn, scripted_fn, tensor) _test_transform_vs_scripted_on_batch(fn, scripted_fn, batch_tensors) - with get_tmp_dir() as tmp_dir: - scripted_fn.save(os.path.join(tmp_dir, "t_norm.pt")) + scripted_fn.save(os.path.join(tmpdir, "t_norm.pt")) @pytest.mark.parametrize('device', cpu_and_gpu()) -def test_linear_transformation(device): +def test_linear_transformation(device, tmpdir): c, h, w = 3, 24, 32 tensor, _ = _create_data(h, w, channels=c, device=device) @@ -625,8 +614,7 @@ def test_linear_transformation(device): s_transformed_batch = scripted_fn(batch_tensors) assert_equal(transformed_batch, s_transformed_batch) - with get_tmp_dir() as tmp_dir: - scripted_fn.save(os.path.join(tmp_dir, "t_norm.pt")) + scripted_fn.save(os.path.join(tmpdir, "t_norm.pt")) @pytest.mark.parametrize('device', cpu_and_gpu()) From d2b2da4f7223ad0747a9b89dada769676bc13d35 Mon Sep 17 00:00:00 2001 From: Alex Lin Date: Mon, 16 Aug 2021 19:24:16 +0100 Subject: [PATCH 04/19] Replace in test_internet.py and test_io.py --- test/test_internet.py | 53 ++++++++++++++++++---------------------- test/test_io.py | 57 +++++++++++++++++++++---------------------- 2 files changed, 52 insertions(+), 58 deletions(-) diff --git a/test/test_internet.py b/test/test_internet.py index 8b1678f7b58..fd552961714 100644 --- a/test/test_internet.py +++ b/test/test_internet.py @@ -11,35 +11,31 @@ from urllib.error import URLError import torchvision.datasets.utils as utils -from common_utils import get_tmp_dir class TestDatasetUtils: - def test_download_url(self): - with get_tmp_dir() as temp_dir: - url = "http://github.com/pytorch/vision/archive/master.zip" - try: - utils.download_url(url, temp_dir) - assert len(os.listdir(temp_dir)) != 0 - except URLError: - pytest.skip(f"could not download test file '{url}'") - - def test_download_url_retry_http(self): - with get_tmp_dir() as temp_dir: - url = "https://github.com/pytorch/vision/archive/master.zip" - try: - utils.download_url(url, temp_dir) - assert len(os.listdir(temp_dir)) != 0 - except URLError: - pytest.skip(f"could not download test file '{url}'") - - def test_download_url_dont_exist(self): - with get_tmp_dir() as temp_dir: - url = "http://github.com/pytorch/vision/archive/this_doesnt_exist.zip" - with pytest.raises(URLError): - utils.download_url(url, temp_dir) - - def test_download_url_dispatch_download_from_google_drive(self, mocker): + def test_download_url(self, tmpdir): + url = "http://github.com/pytorch/vision/archive/master.zip" + try: + utils.download_url(url, tmpdir) + assert len(os.listdir(tmpdir)) != 0 + except URLError: + pytest.skip(f"could not download test file '{url}'") + + def test_download_url_retry_http(self, tmpdir): + url = "https://github.com/pytorch/vision/archive/master.zip" + try: + utils.download_url(url, tmpdir) + assert len(os.listdir(tmpdir)) != 0 + except URLError: + pytest.skip(f"could not download test file '{url}'") + + def test_download_url_dont_exist(self, tmpdir): + url = "http://github.com/pytorch/vision/archive/this_doesnt_exist.zip" + with pytest.raises(URLError): + utils.download_url(url, tmpdir) + + def test_download_url_dispatch_download_from_google_drive(self, mocker, tmpdir): url = "https://drive.google.com/file/d/1hbzc_P1FuxMkcabkgn9ZKinBwW683j45/view" id = "1hbzc_P1FuxMkcabkgn9ZKinBwW683j45" @@ -47,10 +43,9 @@ def test_download_url_dispatch_download_from_google_drive(self, mocker): md5 = "md5" mocked = mocker.patch('torchvision.datasets.utils.download_file_from_google_drive') - with get_tmp_dir() as root: - utils.download_url(url, root, filename, md5) + utils.download_url(url, tmpdir, filename, md5) - mocked.assert_called_once_with(id, root, filename, md5) + mocked.assert_called_once_with(id, tmpdir, filename, md5) if __name__ == '__main__': diff --git a/test/test_io.py b/test/test_io.py index 56cd0af5fd8..150d66f0814 100644 --- a/test/test_io.py +++ b/test/test_io.py @@ -9,7 +9,7 @@ import warnings from urllib.error import URLError -from common_utils import get_tmp_dir, assert_equal +from common_utils import assert_equal try: @@ -255,37 +255,36 @@ def test_read_video_partially_corrupted_file(self): assert_equal(video, data) @pytest.mark.skipif(sys.platform == 'win32', reason='temporarily disabled on Windows') - def test_write_video_with_audio(self): + def test_write_video_with_audio(self, tmpdir): f_name = os.path.join(VIDEO_DIR, "R6llTwEh07w.mp4") video_tensor, audio_tensor, info = io.read_video(f_name, pts_unit="sec") - with get_tmp_dir() as tmpdir: - out_f_name = os.path.join(tmpdir, "testing.mp4") - io.video.write_video( - out_f_name, - video_tensor, - round(info["video_fps"]), - video_codec="libx264rgb", - options={'crf': '0'}, - audio_array=audio_tensor, - audio_fps=info["audio_fps"], - audio_codec="aac", - ) - - out_video_tensor, out_audio_tensor, out_info = io.read_video( - out_f_name, pts_unit="sec" - ) - - assert info["video_fps"] == out_info["video_fps"] - assert_equal(video_tensor, out_video_tensor) - - audio_stream = av.open(f_name).streams.audio[0] - out_audio_stream = av.open(out_f_name).streams.audio[0] - - assert info["audio_fps"] == out_info["audio_fps"] - assert audio_stream.rate == out_audio_stream.rate - assert pytest.approx(out_audio_stream.frames, rel=0.0, abs=1) == audio_stream.frames - assert audio_stream.frame_size == out_audio_stream.frame_size + out_f_name = os.path.join(tmpdir, "testing.mp4") + io.video.write_video( + out_f_name, + video_tensor, + round(info["video_fps"]), + video_codec="libx264rgb", + options={'crf': '0'}, + audio_array=audio_tensor, + audio_fps=info["audio_fps"], + audio_codec="aac", + ) + + out_video_tensor, out_audio_tensor, out_info = io.read_video( + out_f_name, pts_unit="sec" + ) + + assert info["video_fps"] == out_info["video_fps"] + assert_equal(video_tensor, out_video_tensor) + + audio_stream = av.open(f_name).streams.audio[0] + out_audio_stream = av.open(out_f_name).streams.audio[0] + + assert info["audio_fps"] == out_info["audio_fps"] + assert audio_stream.rate == out_audio_stream.rate + assert pytest.approx(out_audio_stream.frames, rel=0.0, abs=1) == audio_stream.frames + assert audio_stream.frame_size == out_audio_stream.frame_size # TODO add tests for audio From 29c1de1c84327d51462403bb370b2bcb04159ab3 Mon Sep 17 00:00:00 2001 From: Alex Lin Date: Mon, 16 Aug 2021 19:57:03 +0100 Subject: [PATCH 05/19] get_list_of_videos is util function still use get_tmp_dir --- test/test_datasets_samplers.py | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/test/test_datasets_samplers.py b/test/test_datasets_samplers.py index ddb7d87bf18..d89422c219b 100644 --- a/test/test_datasets_samplers.py +++ b/test/test_datasets_samplers.py @@ -18,22 +18,23 @@ @contextlib.contextmanager def get_list_of_videos(tmpdir, num_videos=5, sizes=None, fps=None): - names = [] - for i in range(num_videos): - if sizes is None: - size = 5 * (i + 1) - else: - size = sizes[i] - if fps is None: - f = 5 - else: - f = fps[i] - data = torch.randint(0, 256, (size, 300, 400, 3), dtype=torch.uint8) - name = os.path.join(tmpdir, "{}.mp4".format(i)) - names.append(name) - io.write_video(name, data, fps=f) + with get_tmp_dir() as tmp_dir: + names = [] + for i in range(num_videos): + if sizes is None: + size = 5 * (i + 1) + else: + size = sizes[i] + if fps is None: + f = 5 + else: + f = fps[i] + data = torch.randint(0, 256, (size, 300, 400, 3), dtype=torch.uint8) + name = os.path.join(tmp_dir, "{}.mp4".format(i)) + names.append(name) + io.write_video(name, data, fps=f) - yield names + yield names @pytest.mark.skipif(not io.video._av_available(), reason="this test requires av") From 9176edaacca550c55e6537a7fa2e19a0126108c3 Mon Sep 17 00:00:00 2001 From: Alex Lin Date: Mon, 16 Aug 2021 21:52:10 +0100 Subject: [PATCH 06/19] Fix get_list_of_videos siginiture --- test/test_datasets_samplers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_datasets_samplers.py b/test/test_datasets_samplers.py index d89422c219b..b58c43d290d 100644 --- a/test/test_datasets_samplers.py +++ b/test/test_datasets_samplers.py @@ -17,7 +17,7 @@ @contextlib.contextmanager -def get_list_of_videos(tmpdir, num_videos=5, sizes=None, fps=None): +def get_list_of_videos(num_videos=5, sizes=None, fps=None): with get_tmp_dir() as tmp_dir: names = [] for i in range(num_videos): From 991714c0fb450214c4c9fb889e0e7fa829536a65 Mon Sep 17 00:00:00 2001 From: Alex Lin Date: Mon, 16 Aug 2021 21:55:22 +0100 Subject: [PATCH 07/19] Add get_tmp_dir import --- test/test_datasets_samplers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_datasets_samplers.py b/test/test_datasets_samplers.py index b58c43d290d..7754c1a98e8 100644 --- a/test/test_datasets_samplers.py +++ b/test/test_datasets_samplers.py @@ -13,7 +13,7 @@ from torchvision.datasets.video_utils import VideoClips, unfold from torchvision import get_video_backend -from common_utils import assert_equal +from common_utils import get_tmp_dir, assert_equal @contextlib.contextmanager From 3b7a065fc60badfbbea38bd38f522273c1f52105 Mon Sep 17 00:00:00 2001 From: Alex Lin Date: Tue, 17 Aug 2021 08:22:51 +0100 Subject: [PATCH 08/19] Modify test_datasets_video_utils.py for test to pass --- test/test_datasets_video_utils.py | 37 ++++++++++++++++--------------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/test/test_datasets_video_utils.py b/test/test_datasets_video_utils.py index 6453f38d065..a8df1dc815b 100644 --- a/test/test_datasets_video_utils.py +++ b/test/test_datasets_video_utils.py @@ -6,27 +6,28 @@ from torchvision import io from torchvision.datasets.video_utils import VideoClips, unfold -from common_utils import assert_equal +from common_utils import get_tmp_dir, assert_equal @contextlib.contextmanager -def get_list_of_videos(tmpdir, num_videos=5, sizes=None, fps=None): - names = [] - for i in range(num_videos): - if sizes is None: - size = 5 * (i + 1) - else: - size = sizes[i] - if fps is None: - f = 5 - else: - f = fps[i] - data = torch.randint(0, 256, (size, 300, 400, 3), dtype=torch.uint8) - name = os.path.join(tmpdir, "{}.mp4".format(i)) - names.append(name) - io.write_video(name, data, fps=f) - - yield names +def get_list_of_videos(num_videos=5, sizes=None, fps=None): + with get_tmp_dir() as tmp_dir: + names = [] + for i in range(num_videos): + if sizes is None: + size = 5 * (i + 1) + else: + size = sizes[i] + if fps is None: + f = 5 + else: + f = fps[i] + data = torch.randint(0, 256, (size, 300, 400, 3), dtype=torch.uint8) + name = os.path.join(tmp_dir, "{}.mp4".format(i)) + names.append(name) + io.write_video(name, data, fps=f) + + yield names class TestVideo: From 4f6226dc155e9a576161cce7ba6c16df1f3212c2 Mon Sep 17 00:00:00 2001 From: Alex Lin Date: Tue, 17 Aug 2021 08:42:24 +0100 Subject: [PATCH 09/19] Fix indentation --- test/test_datasets_video_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_datasets_video_utils.py b/test/test_datasets_video_utils.py index a8df1dc815b..00db0aad127 100644 --- a/test/test_datasets_video_utils.py +++ b/test/test_datasets_video_utils.py @@ -27,7 +27,7 @@ def get_list_of_videos(num_videos=5, sizes=None, fps=None): names.append(name) io.write_video(name, data, fps=f) - yield names + yield names class TestVideo: From 6adeaf2d4fb70d6cf807ae1c2c5af3122c858418 Mon Sep 17 00:00:00 2001 From: Alex Lin Date: Tue, 17 Aug 2021 09:24:42 +0100 Subject: [PATCH 10/19] Replace get_tmp_dir in util functions in test_dataset_sampler.py --- test/test_datasets_video_utils.py | 45 +++++++++++++++---------------- 1 file changed, 22 insertions(+), 23 deletions(-) diff --git a/test/test_datasets_video_utils.py b/test/test_datasets_video_utils.py index 00db0aad127..c7b3d13e7bd 100644 --- a/test/test_datasets_video_utils.py +++ b/test/test_datasets_video_utils.py @@ -6,28 +6,27 @@ from torchvision import io from torchvision.datasets.video_utils import VideoClips, unfold -from common_utils import get_tmp_dir, assert_equal +from common_utils import assert_equal @contextlib.contextmanager -def get_list_of_videos(num_videos=5, sizes=None, fps=None): - with get_tmp_dir() as tmp_dir: - names = [] - for i in range(num_videos): - if sizes is None: - size = 5 * (i + 1) - else: - size = sizes[i] - if fps is None: - f = 5 - else: - f = fps[i] - data = torch.randint(0, 256, (size, 300, 400, 3), dtype=torch.uint8) - name = os.path.join(tmp_dir, "{}.mp4".format(i)) - names.append(name) - io.write_video(name, data, fps=f) - - yield names +def get_list_of_videos(tmpdir, num_videos=5, sizes=None, fps=None): + names = [] + for i in range(num_videos): + if sizes is None: + size = 5 * (i + 1) + else: + size = sizes[i] + if fps is None: + f = 5 + else: + f = fps[i] + data = torch.randint(0, 256, (size, 300, 400, 3), dtype=torch.uint8) + name = os.path.join(tmpdir, "{}.mp4".format(i)) + names.append(name) + io.write_video(name, data, fps=f) + + yield names class TestVideo: @@ -58,8 +57,8 @@ def test_unfold(self): assert_equal(r, expected) @pytest.mark.skipif(not io.video._av_available(), reason="this test requires av") - def test_video_clips(self): - with get_list_of_videos(num_videos=3) as video_list: + def test_video_clips(self, tmpdir): + with get_list_of_videos(tmpdir, num_videos=3) as video_list: video_clips = VideoClips(video_list, 5, 5, num_workers=2) assert video_clips.num_clips() == 1 + 2 + 3 for i, (v_idx, c_idx) in enumerate([(0, 0), (1, 0), (1, 1), (2, 0), (2, 1), (2, 2)]): @@ -82,8 +81,8 @@ def test_video_clips(self): assert clip_idx == c_idx @pytest.mark.skipif(not io.video._av_available(), reason="this test requires av") - def test_video_clips_custom_fps(self): - with get_list_of_videos(num_videos=3, sizes=[12, 12, 12], fps=[3, 4, 6]) as video_list: + def test_video_clips_custom_fps(self, tmpdir): + with get_list_of_videos(tmpdir, num_videos=3, sizes=[12, 12, 12], fps=[3, 4, 6]) as video_list: num_frames = 4 for fps in [1, 3, 4, 10]: video_clips = VideoClips(video_list, num_frames, num_frames, fps, num_workers=2) From 6fbf9dc998212b085ba3efb38c4ba2fe08541f98 Mon Sep 17 00:00:00 2001 From: Alex Lin Date: Tue, 17 Aug 2021 10:28:53 +0100 Subject: [PATCH 11/19] Replace get_tmp_dir in util functions in test_dataset_video_utils.py --- test/test_datasets_samplers.py | 55 +++++++++++++++++----------------- 1 file changed, 27 insertions(+), 28 deletions(-) diff --git a/test/test_datasets_samplers.py b/test/test_datasets_samplers.py index 7754c1a98e8..340ad17d9ea 100644 --- a/test/test_datasets_samplers.py +++ b/test/test_datasets_samplers.py @@ -13,34 +13,33 @@ from torchvision.datasets.video_utils import VideoClips, unfold from torchvision import get_video_backend -from common_utils import get_tmp_dir, assert_equal +from common_utils import assert_equal @contextlib.contextmanager -def get_list_of_videos(num_videos=5, sizes=None, fps=None): - with get_tmp_dir() as tmp_dir: - names = [] - for i in range(num_videos): - if sizes is None: - size = 5 * (i + 1) - else: - size = sizes[i] - if fps is None: - f = 5 - else: - f = fps[i] - data = torch.randint(0, 256, (size, 300, 400, 3), dtype=torch.uint8) - name = os.path.join(tmp_dir, "{}.mp4".format(i)) - names.append(name) - io.write_video(name, data, fps=f) +def get_list_of_videos(tmpdir, num_videos=5, sizes=None, fps=None): + names = [] + for i in range(num_videos): + if sizes is None: + size = 5 * (i + 1) + else: + size = sizes[i] + if fps is None: + f = 5 + else: + f = fps[i] + data = torch.randint(0, 256, (size, 300, 400, 3), dtype=torch.uint8) + name = os.path.join(tmpdir, "{}.mp4".format(i)) + names.append(name) + io.write_video(name, data, fps=f) - yield names + yield names @pytest.mark.skipif(not io.video._av_available(), reason="this test requires av") class TestDatasetsSamplers: - def test_random_clip_sampler(self): - with get_list_of_videos(num_videos=3, sizes=[25, 25, 25]) as video_list: + def test_random_clip_sampler(self, tmpdir): + with get_list_of_videos(tmpdir, num_videos=3, sizes=[25, 25, 25]) as video_list: video_clips = VideoClips(video_list, 5, 5) sampler = RandomClipSampler(video_clips, 3) assert len(sampler) == 3 * 3 @@ -50,8 +49,8 @@ def test_random_clip_sampler(self): assert_equal(v_idxs, torch.tensor([0, 1, 2])) assert_equal(count, torch.tensor([3, 3, 3])) - def test_random_clip_sampler_unequal(self): - with get_list_of_videos(num_videos=3, sizes=[10, 25, 25]) as video_list: + def test_random_clip_sampler_unequal(self, tmpdir): + with get_list_of_videos(tmpdir, tmpnum_videos=3, sizes=[10, 25, 25]) as video_list: video_clips = VideoClips(video_list, 5, 5) sampler = RandomClipSampler(video_clips, 3) assert len(sampler) == 2 + 3 + 3 @@ -67,8 +66,8 @@ def test_random_clip_sampler_unequal(self): assert_equal(v_idxs, torch.tensor([0, 1])) assert_equal(count, torch.tensor([3, 3])) - def test_uniform_clip_sampler(self): - with get_list_of_videos(num_videos=3, sizes=[25, 25, 25]) as video_list: + def test_uniform_clip_sampler(self, tmpdir): + with get_list_of_videos(tmpdir, num_videos=3, sizes=[25, 25, 25]) as video_list: video_clips = VideoClips(video_list, 5, 5) sampler = UniformClipSampler(video_clips, 3) assert len(sampler) == 3 * 3 @@ -79,16 +78,16 @@ def test_uniform_clip_sampler(self): assert_equal(count, torch.tensor([3, 3, 3])) assert_equal(indices, torch.tensor([0, 2, 4, 5, 7, 9, 10, 12, 14])) - def test_uniform_clip_sampler_insufficient_clips(self): - with get_list_of_videos(num_videos=3, sizes=[10, 25, 25]) as video_list: + def test_uniform_clip_sampler_insufficient_clips(self, tmpdir): + with get_list_of_videos(tmpdir, num_videos=3, sizes=[10, 25, 25]) as video_list: video_clips = VideoClips(video_list, 5, 5) sampler = UniformClipSampler(video_clips, 3) assert len(sampler) == 3 * 3 indices = torch.tensor(list(iter(sampler))) assert_equal(indices, torch.tensor([0, 0, 1, 2, 4, 6, 7, 9, 11])) - def test_distributed_sampler_and_uniform_clip_sampler(self): - with get_list_of_videos(num_videos=3, sizes=[25, 25, 25]) as video_list: + def test_distributed_sampler_and_uniform_clip_sampler(self, tmpdir): + with get_list_of_videos(tmpdir, num_videos=3, sizes=[25, 25, 25]) as video_list: video_clips = VideoClips(video_list, 5, 5) clip_sampler = UniformClipSampler(video_clips, 3) From 12fda566d81769a2bc561222bac4077aca4567dd Mon Sep 17 00:00:00 2001 From: Alex Lin Date: Tue, 17 Aug 2021 11:49:55 +0100 Subject: [PATCH 12/19] Move get_tmp_dir() to datasets_utils.py and refactor --- test/common_utils.py | 12 ------ test/datasets_utils.py | 13 +++++- test/test_transforms_tensor.py | 73 +++++++++++++++++++--------------- 3 files changed, 52 insertions(+), 46 deletions(-) diff --git a/test/common_utils.py b/test/common_utils.py index 5936ae1f713..c9e71168088 100644 --- a/test/common_utils.py +++ b/test/common_utils.py @@ -29,18 +29,6 @@ CIRCLECI_GPU_NO_CUDA_MSG = "We're in a CircleCI GPU machine, and this test doesn't need cuda." -@contextlib.contextmanager -def get_tmp_dir(src=None, **kwargs): - tmp_dir = tempfile.mkdtemp(**kwargs) - if src is not None: - os.rmdir(tmp_dir) - shutil.copytree(src, tmp_dir) - try: - yield tmp_dir - finally: - shutil.rmtree(tmp_dir) - - def set_rng_seed(seed): torch.manual_seed(seed) random.seed(seed) diff --git a/test/datasets_utils.py b/test/datasets_utils.py index ca182b8d25f..10449d10421 100644 --- a/test/datasets_utils.py +++ b/test/datasets_utils.py @@ -19,7 +19,7 @@ import torchvision.datasets import torchvision.io -from common_utils import get_tmp_dir, disable_console_output +from common_utils import disable_console_output __all__ = [ @@ -37,6 +37,17 @@ "create_random_string", ] +@contextlib.contextmanager +def get_tmp_dir(src=None, **kwargs): + tmp_dir = tempfile.mkdtemp(**kwargs) + if src is not None: + os.rmdir(tmp_dir) + shutil.copytree(src, tmp_dir) + try: + yield tmp_dir + finally: + shutil.rmtree(tmp_dir) + class UsageError(Exception): """Should be raised in case an error happens in the setup rather than the test.""" diff --git a/test/test_transforms_tensor.py b/test/test_transforms_tensor.py index 5081626fec4..4eec5473b42 100644 --- a/test/test_transforms_tensor.py +++ b/test/test_transforms_tensor.py @@ -10,7 +10,6 @@ from typing import Sequence from common_utils import ( - get_tmp_dir, int_dtypes, float_dtypes, _create_data, @@ -59,7 +58,7 @@ def _test_functional_op(f, device, fn_kwargs=None, test_exact_match=True, **matc _assert_approx_equal_tensor_to_pil(transformed_tensor, transformed_pil_img, **match_kwargs) -def _test_class_op(method, device, meth_kwargs=None, test_exact_match=True, **match_kwargs): +def _test_class_op(method, device, tmpdir, meth_kwargs=None, test_exact_match=True, **match_kwargs): # TODO: change the name: it's not a method, it's a class. meth_kwargs = meth_kwargs or {} @@ -85,13 +84,12 @@ def _test_class_op(method, device, meth_kwargs=None, test_exact_match=True, **ma batch_tensors = _create_data_batch(height=23, width=34, channels=3, num_samples=4, device=device) _test_transform_vs_scripted_on_batch(f, scripted_fn, batch_tensors) - with get_tmp_dir() as tmp_dir: - scripted_fn.save(os.path.join(tmp_dir, f"t_{method.__name__}.pt")) + scripted_fn.save(os.path.join(tmp_dir, f"t_{method.__name__}.pt")) -def _test_op(func, method, device, fn_kwargs=None, meth_kwargs=None, test_exact_match=True, **match_kwargs): +def _test_op(func, method, device, tmpdir, fn_kwargs=None, meth_kwargs=None, test_exact_match=True, **match_kwargs): _test_functional_op(func, device, fn_kwargs, test_exact_match=test_exact_match, **match_kwargs) - _test_class_op(method, device, meth_kwargs, test_exact_match=test_exact_match, **match_kwargs) + _test_class_op(method, device, tmpdir, meth_kwargs=meth_kwargs, test_exact_match=test_exact_match, **match_kwargs) @pytest.mark.parametrize('device', cpu_and_gpu()) @@ -109,53 +107,56 @@ def _test_op(func, method, device, fn_kwargs=None, meth_kwargs=None, test_exact_ (F.equalize, T.RandomEqualize, None, {}) ] ) -def test_random(func, method, device, fn_kwargs, match_kwargs): - _test_op(func, method, device, fn_kwargs, fn_kwargs, **match_kwargs) @pytest.mark.parametrize('device', cpu_and_gpu()) class TestColorJitter: @pytest.mark.parametrize('brightness', [0.1, 0.5, 1.0, 1.34, (0.3, 0.7), [0.4, 0.5]]) - def test_color_jitter_brightness(self, brightness, device): + @pytest.fixture() + def test_color_jitter_brightness(self, brightness, device, tmpdir): tol = 1.0 + 1e-10 meth_kwargs = {"brightness": brightness} _test_class_op( - T.ColorJitter, meth_kwargs=meth_kwargs, test_exact_match=False, device=device, + T.ColorJitter, device, tmpdir, meth_kwargs=meth_kwargs, test_exact_match=False, tol=tol, agg_method="max" ) @pytest.mark.parametrize('contrast', [0.2, 0.5, 1.0, 1.5, (0.3, 0.7), [0.4, 0.5]]) - def test_color_jitter_contrast(self, contrast, device): + @pytest.fixture() + def test_color_jitter_contrast(self, contrast, device, tmpdir): tol = 1.0 + 1e-10 meth_kwargs = {"contrast": contrast} _test_class_op( - T.ColorJitter, meth_kwargs=meth_kwargs, test_exact_match=False, device=device, + T.ColorJitter, device, tmpdir, meth_kwargs=meth_kwargs, test_exact_match=False, tol=tol, agg_method="max" ) @pytest.mark.parametrize('saturation', [0.5, 0.75, 1.0, 1.25, (0.3, 0.7), [0.3, 0.4]]) - def test_color_jitter_saturation(self, saturation, device): + @pytest.fixture() + def test_color_jitter_saturation(self, saturation, device, tmpdir): tol = 1.0 + 1e-10 meth_kwargs = {"saturation": saturation} _test_class_op( - T.ColorJitter, meth_kwargs=meth_kwargs, test_exact_match=False, device=device, + T.ColorJitter, device, tmpdir, meth_kwargs=meth_kwargs, test_exact_match=False, tol=tol, agg_method="max" ) @pytest.mark.parametrize('hue', [0.2, 0.5, (-0.2, 0.3), [-0.4, 0.5]]) - def test_color_jitter_hue(self, hue, device): + @pytest.fixture() + def test_color_jitter_hue(self, hue, device, tmpdir): meth_kwargs = {"hue": hue} _test_class_op( - T.ColorJitter, meth_kwargs=meth_kwargs, test_exact_match=False, device=device, + T.ColorJitter, device, tmpdir, meth_kwargs=meth_kwargs, test_exact_match=False, tol=16.1, agg_method="max" ) - def test_color_jitter_all(self, device): + @pytest.fixture() + def test_color_jitter_all(self, device, tmpdir): # All 4 parameters together meth_kwargs = {"brightness": 0.2, "contrast": 0.2, "saturation": 0.2, "hue": 0.2} _test_class_op( - T.ColorJitter, meth_kwargs=meth_kwargs, test_exact_match=False, device=device, + T.ColorJitter, device, tmpdir, meth_kwargs=meth_kwargs, test_exact_match=False, tol=12.1, agg_method="max" ) @@ -163,7 +164,8 @@ def test_color_jitter_all(self, device): @pytest.mark.parametrize('device', cpu_and_gpu()) @pytest.mark.parametrize('m', ["constant", "edge", "reflect", "symmetric"]) @pytest.mark.parametrize('mul', [1, -1]) -def test_pad(m, mul, device): +@pytest.fixture() +def test_pad(m, mul, device, tmpdir): fill = 127 if m == "constant" else 0 # Test functional.pad (PIL and Tensor) with padding as single int @@ -174,27 +176,28 @@ def test_pad(m, mul, device): # Test functional.pad and transforms.Pad with padding as [int, ] fn_kwargs = meth_kwargs = {"padding": [mul * 2, ], "fill": fill, "padding_mode": m} _test_op( - F.pad, T.Pad, device=device, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs + F.pad, T.Pad, device, tmpdir, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs ) # Test functional.pad and transforms.Pad with padding as list fn_kwargs = meth_kwargs = {"padding": [mul * 4, 4], "fill": fill, "padding_mode": m} _test_op( - F.pad, T.Pad, device=device, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs + F.pad, T.Pad, device, tmpdir, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs ) # Test functional.pad and transforms.Pad with padding as tuple fn_kwargs = meth_kwargs = {"padding": (mul * 2, 2, 2, mul * 2), "fill": fill, "padding_mode": m} _test_op( - F.pad, T.Pad, device=device, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs + F.pad, T.Pad, device, tmpdir, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs ) @pytest.mark.parametrize('device', cpu_and_gpu()) -def test_crop(device): +@pytest.fixture() +def test_crop(device, tmpdir): fn_kwargs = {"top": 2, "left": 3, "height": 4, "width": 5} # Test transforms.RandomCrop with size and padding as tuple meth_kwargs = {"size": (4, 5), "padding": (4, 4), "pad_if_needed": True, } _test_op( - F.crop, T.RandomCrop, device=device, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs + F.crop, T.RandomCrop, device, tmpdir, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs ) # Test transforms.functional.crop including outside the image area @@ -223,24 +226,26 @@ def test_crop(device): {"padding_mode": "reflect"} ]) @pytest.mark.parametrize('size', [5, [5, ], [6, 6]]) -def test_crop_pad(size, padding_config, device): +@pytest.fixture() +def test_crop_pad(size, padding_config, device, tmpdir): config = dict(padding_config) config["size"] = size - _test_class_op(T.RandomCrop, device, config) + _test_class_op(T.RandomCrop, device, tmpdir, config) @pytest.mark.parametrize('device', cpu_and_gpu()) +@pytest.fixture() def test_center_crop(device, tmpdir): fn_kwargs = {"output_size": (4, 5)} meth_kwargs = {"size": (4, 5), } _test_op( - F.center_crop, T.CenterCrop, device=device, fn_kwargs=fn_kwargs, + F.center_crop, T.CenterCrop, device, tmpdir, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs ) fn_kwargs = {"output_size": (5,)} meth_kwargs = {"size": (5,)} _test_op( - F.center_crop, T.CenterCrop, device=device, fn_kwargs=fn_kwargs, + F.center_crop, T.CenterCrop, device, tmpdir, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs ) tensor = torch.randint(0, 256, (3, 10, 10), dtype=torch.uint8, device=device) @@ -479,10 +484,11 @@ def test_random_perspective_save(tmpdir): (T.Grayscale, {"num_output_channels": 3}), (T.RandomGrayscale, {}) ]) -def test_to_grayscale(device, Klass, meth_kwargs): +@pytest.fixture() +def test_to_grayscale(device, Klass, meth_kwargs, tmpdir): tol = 1.0 + 1e-10 _test_class_op( - Klass, meth_kwargs=meth_kwargs, test_exact_match=False, device=device, + Klass, device, tmpdir, meth_kwargs=meth_kwargs, test_exact_match=False, tol=tol, agg_method="max" ) @@ -681,9 +687,10 @@ def test_random_apply(device): {"kernel_size": (3, 3), "sigma": (0.1, 2.0)}, {"kernel_size": [23], "sigma": 0.75} ]) -def test_gaussian_blur(device, meth_kwargs): +@pytest.fixture() +def test_gaussian_blur(device, meth_kwargs, tmpdir): tol = 1.0 + 1e-10 _test_class_op( - T.GaussianBlur, meth_kwargs=meth_kwargs, - test_exact_match=False, device=device, agg_method="max", tol=tol + T.GaussianBlur, device, tmpdir, meth_kwargs=meth_kwargs, + test_exact_match=False, agg_method="max", tol=tol ) From 686174c4e419b977560ee18348295ee340c4128e Mon Sep 17 00:00:00 2001 From: Alex Lin Date: Tue, 17 Aug 2021 12:05:40 +0100 Subject: [PATCH 13/19] Fix pylint, indentation and imports --- test/common_utils.py | 1 - test/datasets_utils.py | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/test/common_utils.py b/test/common_utils.py index c9e71168088..68b58b601d1 100644 --- a/test/common_utils.py +++ b/test/common_utils.py @@ -1,6 +1,5 @@ import os import shutil -import tempfile import contextlib import unittest import pytest diff --git a/test/datasets_utils.py b/test/datasets_utils.py index 10449d10421..6218fc15f4f 100644 --- a/test/datasets_utils.py +++ b/test/datasets_utils.py @@ -7,6 +7,7 @@ import pathlib import random import string +import tempfile import unittest import unittest.mock from collections import defaultdict @@ -37,6 +38,7 @@ "create_random_string", ] + @contextlib.contextmanager def get_tmp_dir(src=None, **kwargs): tmp_dir = tempfile.mkdtemp(**kwargs) @@ -108,8 +110,6 @@ def _import(package, subpackages): lazy_importer = LazyImporter() - - def requires_lazy_imports(*modules): def outer_wrapper(fn): @functools.wraps(fn) From 49f8e4acb39abb0ec82b9f7c52b0933911604103 Mon Sep 17 00:00:00 2001 From: Alex Lin Date: Tue, 17 Aug 2021 12:20:40 +0100 Subject: [PATCH 14/19] import shutil to common_util.py --- test/common_utils.py | 1 - test/datasets_utils.py | 3 +++ test/test_transforms_tensor.py | 2 -- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/test/common_utils.py b/test/common_utils.py index 68b58b601d1..e40ab06e17f 100644 --- a/test/common_utils.py +++ b/test/common_utils.py @@ -1,5 +1,4 @@ import os -import shutil import contextlib import unittest import pytest diff --git a/test/datasets_utils.py b/test/datasets_utils.py index 6218fc15f4f..0d78a3273c9 100644 --- a/test/datasets_utils.py +++ b/test/datasets_utils.py @@ -6,6 +6,7 @@ import os import pathlib import random +import shutil import string import tempfile import unittest @@ -110,6 +111,8 @@ def _import(package, subpackages): lazy_importer = LazyImporter() + + def requires_lazy_imports(*modules): def outer_wrapper(fn): @functools.wraps(fn) diff --git a/test/test_transforms_tensor.py b/test/test_transforms_tensor.py index 4eec5473b42..69f38626268 100644 --- a/test/test_transforms_tensor.py +++ b/test/test_transforms_tensor.py @@ -107,8 +107,6 @@ def _test_op(func, method, device, tmpdir, fn_kwargs=None, meth_kwargs=None, tes (F.equalize, T.RandomEqualize, None, {}) ] ) - - @pytest.mark.parametrize('device', cpu_and_gpu()) class TestColorJitter: From 03e82384d6709b9120199120eb583db7518422a0 Mon Sep 17 00:00:00 2001 From: Alex Lin Date: Tue, 17 Aug 2021 12:37:37 +0100 Subject: [PATCH 15/19] Fix function signiture --- test/test_datasets_samplers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_datasets_samplers.py b/test/test_datasets_samplers.py index 340ad17d9ea..fb4e9a29ed9 100644 --- a/test/test_datasets_samplers.py +++ b/test/test_datasets_samplers.py @@ -50,7 +50,7 @@ def test_random_clip_sampler(self, tmpdir): assert_equal(count, torch.tensor([3, 3, 3])) def test_random_clip_sampler_unequal(self, tmpdir): - with get_list_of_videos(tmpdir, tmpnum_videos=3, sizes=[10, 25, 25]) as video_list: + with get_list_of_videos(tmpdir, num_videos=3, sizes=[10, 25, 25]) as video_list: video_clips = VideoClips(video_list, 5, 5) sampler = RandomClipSampler(video_clips, 3) assert len(sampler) == 2 + 3 + 3 From 8796cbeb26eaef05a3861f326fc748f542164c6c Mon Sep 17 00:00:00 2001 From: Alex Lin Date: Tue, 17 Aug 2021 14:48:13 +0100 Subject: [PATCH 16/19] Remove get_list_of_videos under context manager --- test/test_datasets_samplers.py | 122 +++++++++++++++--------------- test/test_datasets_video_utils.py | 60 +++++++-------- 2 files changed, 91 insertions(+), 91 deletions(-) diff --git a/test/test_datasets_samplers.py b/test/test_datasets_samplers.py index fb4e9a29ed9..6e36c03c852 100644 --- a/test/test_datasets_samplers.py +++ b/test/test_datasets_samplers.py @@ -39,77 +39,77 @@ def get_list_of_videos(tmpdir, num_videos=5, sizes=None, fps=None): @pytest.mark.skipif(not io.video._av_available(), reason="this test requires av") class TestDatasetsSamplers: def test_random_clip_sampler(self, tmpdir): - with get_list_of_videos(tmpdir, num_videos=3, sizes=[25, 25, 25]) as video_list: - video_clips = VideoClips(video_list, 5, 5) - sampler = RandomClipSampler(video_clips, 3) - assert len(sampler) == 3 * 3 - indices = torch.tensor(list(iter(sampler))) - videos = torch.div(indices, 5, rounding_mode='floor') - v_idxs, count = torch.unique(videos, return_counts=True) - assert_equal(v_idxs, torch.tensor([0, 1, 2])) - assert_equal(count, torch.tensor([3, 3, 3])) + video_list = get_list_of_videos(tmpdir, num_videos=3, sizes=[25, 25, 25]) + video_clips = VideoClips(video_list, 5, 5) + sampler = RandomClipSampler(video_clips, 3) + assert len(sampler) == 3 * 3 + indices = torch.tensor(list(iter(sampler))) + videos = torch.div(indices, 5, rounding_mode='floor') + v_idxs, count = torch.unique(videos, return_counts=True) + assert_equal(v_idxs, torch.tensor([0, 1, 2])) + assert_equal(count, torch.tensor([3, 3, 3])) def test_random_clip_sampler_unequal(self, tmpdir): - with get_list_of_videos(tmpdir, num_videos=3, sizes=[10, 25, 25]) as video_list: - video_clips = VideoClips(video_list, 5, 5) - sampler = RandomClipSampler(video_clips, 3) - assert len(sampler) == 2 + 3 + 3 - indices = list(iter(sampler)) - assert 0 in indices - assert 1 in indices - # remove elements of the first video, to simplify testing - indices.remove(0) - indices.remove(1) - indices = torch.tensor(indices) - 2 - videos = torch.div(indices, 5, rounding_mode='floor') - v_idxs, count = torch.unique(videos, return_counts=True) - assert_equal(v_idxs, torch.tensor([0, 1])) - assert_equal(count, torch.tensor([3, 3])) + video_list = get_list_of_videos(tmpdir, num_videos=3, sizes=[10, 25, 25]) + video_clips = VideoClips(video_list, 5, 5) + sampler = RandomClipSampler(video_clips, 3) + assert len(sampler) == 2 + 3 + 3 + indices = list(iter(sampler)) + assert 0 in indices + assert 1 in indices + # remove elements of the first video, to simplify testing + indices.remove(0) + indices.remove(1) + indices = torch.tensor(indices) - 2 + videos = torch.div(indices, 5, rounding_mode='floor') + v_idxs, count = torch.unique(videos, return_counts=True) + assert_equal(v_idxs, torch.tensor([0, 1])) + assert_equal(count, torch.tensor([3, 3])) def test_uniform_clip_sampler(self, tmpdir): - with get_list_of_videos(tmpdir, num_videos=3, sizes=[25, 25, 25]) as video_list: - video_clips = VideoClips(video_list, 5, 5) - sampler = UniformClipSampler(video_clips, 3) - assert len(sampler) == 3 * 3 - indices = torch.tensor(list(iter(sampler))) - videos = torch.div(indices, 5, rounding_mode='floor') - v_idxs, count = torch.unique(videos, return_counts=True) - assert_equal(v_idxs, torch.tensor([0, 1, 2])) - assert_equal(count, torch.tensor([3, 3, 3])) - assert_equal(indices, torch.tensor([0, 2, 4, 5, 7, 9, 10, 12, 14])) + video_list = get_list_of_videos(tmpdir, num_videos=3, sizes=[25, 25, 25]) + video_clips = VideoClips(video_list, 5, 5) + sampler = UniformClipSampler(video_clips, 3) + assert len(sampler) == 3 * 3 + indices = torch.tensor(list(iter(sampler))) + videos = torch.div(indices, 5, rounding_mode='floor') + v_idxs, count = torch.unique(videos, return_counts=True) + assert_equal(v_idxs, torch.tensor([0, 1, 2])) + assert_equal(count, torch.tensor([3, 3, 3])) + assert_equal(indices, torch.tensor([0, 2, 4, 5, 7, 9, 10, 12, 14])) def test_uniform_clip_sampler_insufficient_clips(self, tmpdir): - with get_list_of_videos(tmpdir, num_videos=3, sizes=[10, 25, 25]) as video_list: - video_clips = VideoClips(video_list, 5, 5) - sampler = UniformClipSampler(video_clips, 3) - assert len(sampler) == 3 * 3 - indices = torch.tensor(list(iter(sampler))) - assert_equal(indices, torch.tensor([0, 0, 1, 2, 4, 6, 7, 9, 11])) + video_list = get_list_of_videos(tmpdir, num_videos=3, sizes=[10, 25, 25]) + video_clips = VideoClips(video_list, 5, 5) + sampler = UniformClipSampler(video_clips, 3) + assert len(sampler) == 3 * 3 + indices = torch.tensor(list(iter(sampler))) + assert_equal(indices, torch.tensor([0, 0, 1, 2, 4, 6, 7, 9, 11])) def test_distributed_sampler_and_uniform_clip_sampler(self, tmpdir): - with get_list_of_videos(tmpdir, num_videos=3, sizes=[25, 25, 25]) as video_list: - video_clips = VideoClips(video_list, 5, 5) - clip_sampler = UniformClipSampler(video_clips, 3) + video_list = get_list_of_videos(tmpdir, num_videos=3, sizes=[25, 25, 25]) + video_clips = VideoClips(video_list, 5, 5) + clip_sampler = UniformClipSampler(video_clips, 3) - distributed_sampler_rank0 = DistributedSampler( - clip_sampler, - num_replicas=2, - rank=0, - group_size=3, - ) - indices = torch.tensor(list(iter(distributed_sampler_rank0))) - assert len(distributed_sampler_rank0) == 6 - assert_equal(indices, torch.tensor([0, 2, 4, 10, 12, 14])) + distributed_sampler_rank0 = DistributedSampler( + clip_sampler, + num_replicas=2, + rank=0, + group_size=3, + ) + indices = torch.tensor(list(iter(distributed_sampler_rank0))) + assert len(distributed_sampler_rank0) == 6 + assert_equal(indices, torch.tensor([0, 2, 4, 10, 12, 14])) - distributed_sampler_rank1 = DistributedSampler( - clip_sampler, - num_replicas=2, - rank=1, - group_size=3, - ) - indices = torch.tensor(list(iter(distributed_sampler_rank1))) - assert len(distributed_sampler_rank1) == 6 - assert_equal(indices, torch.tensor([5, 7, 9, 0, 2, 4])) + distributed_sampler_rank1 = DistributedSampler( + clip_sampler, + num_replicas=2, + rank=1, + group_size=3, + ) + indices = torch.tensor(list(iter(distributed_sampler_rank1))) + assert len(distributed_sampler_rank1) == 6 + assert_equal(indices, torch.tensor([5, 7, 9, 0, 2, 4])) if __name__ == '__main__': diff --git a/test/test_datasets_video_utils.py b/test/test_datasets_video_utils.py index c7b3d13e7bd..7ef460dd444 100644 --- a/test/test_datasets_video_utils.py +++ b/test/test_datasets_video_utils.py @@ -58,39 +58,39 @@ def test_unfold(self): @pytest.mark.skipif(not io.video._av_available(), reason="this test requires av") def test_video_clips(self, tmpdir): - with get_list_of_videos(tmpdir, num_videos=3) as video_list: - video_clips = VideoClips(video_list, 5, 5, num_workers=2) - assert video_clips.num_clips() == 1 + 2 + 3 - for i, (v_idx, c_idx) in enumerate([(0, 0), (1, 0), (1, 1), (2, 0), (2, 1), (2, 2)]): - video_idx, clip_idx = video_clips.get_clip_location(i) - assert video_idx == v_idx - assert clip_idx == c_idx - - video_clips = VideoClips(video_list, 6, 6) - assert video_clips.num_clips() == 0 + 1 + 2 - for i, (v_idx, c_idx) in enumerate([(1, 0), (2, 0), (2, 1)]): - video_idx, clip_idx = video_clips.get_clip_location(i) - assert video_idx == v_idx - assert clip_idx == c_idx - - video_clips = VideoClips(video_list, 6, 1) - assert video_clips.num_clips() == 0 + (10 - 6 + 1) + (15 - 6 + 1) - for i, v_idx, c_idx in [(0, 1, 0), (4, 1, 4), (5, 2, 0), (6, 2, 1)]: - video_idx, clip_idx = video_clips.get_clip_location(i) - assert video_idx == v_idx - assert clip_idx == c_idx + video_list = get_list_of_videos(tmpdir, num_videos=3) + video_clips = VideoClips(video_list, 5, 5, num_workers=2) + assert video_clips.num_clips() == 1 + 2 + 3 + for i, (v_idx, c_idx) in enumerate([(0, 0), (1, 0), (1, 1), (2, 0), (2, 1), (2, 2)]): + video_idx, clip_idx = video_clips.get_clip_location(i) + assert video_idx == v_idx + assert clip_idx == c_idx + + video_clips = VideoClips(video_list, 6, 6) + assert video_clips.num_clips() == 0 + 1 + 2 + for i, (v_idx, c_idx) in enumerate([(1, 0), (2, 0), (2, 1)]): + video_idx, clip_idx = video_clips.get_clip_location(i) + assert video_idx == v_idx + assert clip_idx == c_idx + + video_clips = VideoClips(video_list, 6, 1) + assert video_clips.num_clips() == 0 + (10 - 6 + 1) + (15 - 6 + 1) + for i, v_idx, c_idx in [(0, 1, 0), (4, 1, 4), (5, 2, 0), (6, 2, 1)]: + video_idx, clip_idx = video_clips.get_clip_location(i) + assert video_idx == v_idx + assert clip_idx == c_idx @pytest.mark.skipif(not io.video._av_available(), reason="this test requires av") def test_video_clips_custom_fps(self, tmpdir): - with get_list_of_videos(tmpdir, num_videos=3, sizes=[12, 12, 12], fps=[3, 4, 6]) as video_list: - num_frames = 4 - for fps in [1, 3, 4, 10]: - video_clips = VideoClips(video_list, num_frames, num_frames, fps, num_workers=2) - for i in range(video_clips.num_clips()): - video, audio, info, video_idx = video_clips.get_clip(i) - assert video.shape[0] == num_frames - assert info["video_fps"] == fps - # TODO add tests checking that the content is right + video_list = get_list_of_videos(tmpdir, num_videos=3, sizes=[12, 12, 12], fps=[3, 4, 6]) + num_frames = 4 + for fps in [1, 3, 4, 10]: + video_clips = VideoClips(video_list, num_frames, num_frames, fps, num_workers=2) + for i in range(video_clips.num_clips()): + video, audio, info, video_idx = video_clips.get_clip(i) + assert video.shape[0] == num_frames + assert info["video_fps"] == fps + # TODO add tests checking that the content is right def test_compute_clips_for_video(self): video_pts = torch.arange(30) From ab2435f85d83065fcb4b2a9c2ee1179664094646 Mon Sep 17 00:00:00 2001 From: Alex Lin Date: Tue, 17 Aug 2021 14:57:34 +0100 Subject: [PATCH 17/19] Move get_list_of_videos to common_utils.py --- test/common_utils.py | 18 ++++++++++++++++++ test/test_datasets_samplers.py | 22 +--------------------- test/test_datasets_video_utils.py | 22 +--------------------- 3 files changed, 20 insertions(+), 42 deletions(-) diff --git a/test/common_utils.py b/test/common_utils.py index e40ab06e17f..eb0c8f03e77 100644 --- a/test/common_utils.py +++ b/test/common_utils.py @@ -132,6 +132,24 @@ def _create_data_batch(height=3, width=3, channels=3, num_samples=4, device="cpu assert_equal = functools.partial(torch.testing.assert_close, rtol=0, atol=0) +def get_list_of_videos(tmpdir, num_videos=5, sizes=None, fps=None): + names = [] + for i in range(num_videos): + if sizes is None: + size = 5 * (i + 1) + else: + size = sizes[i] + if fps is None: + f = 5 + else: + f = fps[i] + data = torch.randint(0, 256, (size, 300, 400, 3), dtype=torch.uint8) + name = os.path.join(tmpdir, "{}.mp4".format(i)) + names.append(name) + io.write_video(name, data, fps=f) + + return names + def _assert_equal_tensor_to_pil(tensor, pil_image, msg=None): np_pil_image = np.array(pil_image) diff --git a/test/test_datasets_samplers.py b/test/test_datasets_samplers.py index 6e36c03c852..c76fd1849fc 100644 --- a/test/test_datasets_samplers.py +++ b/test/test_datasets_samplers.py @@ -13,27 +13,7 @@ from torchvision.datasets.video_utils import VideoClips, unfold from torchvision import get_video_backend -from common_utils import assert_equal - - -@contextlib.contextmanager -def get_list_of_videos(tmpdir, num_videos=5, sizes=None, fps=None): - names = [] - for i in range(num_videos): - if sizes is None: - size = 5 * (i + 1) - else: - size = sizes[i] - if fps is None: - f = 5 - else: - f = fps[i] - data = torch.randint(0, 256, (size, 300, 400, 3), dtype=torch.uint8) - name = os.path.join(tmpdir, "{}.mp4".format(i)) - names.append(name) - io.write_video(name, data, fps=f) - - yield names +from common_utils import get_list_of_videos, assert_equal @pytest.mark.skipif(not io.video._av_available(), reason="this test requires av") diff --git a/test/test_datasets_video_utils.py b/test/test_datasets_video_utils.py index 7ef460dd444..9671d1d8f4c 100644 --- a/test/test_datasets_video_utils.py +++ b/test/test_datasets_video_utils.py @@ -6,27 +6,7 @@ from torchvision import io from torchvision.datasets.video_utils import VideoClips, unfold -from common_utils import assert_equal - - -@contextlib.contextmanager -def get_list_of_videos(tmpdir, num_videos=5, sizes=None, fps=None): - names = [] - for i in range(num_videos): - if sizes is None: - size = 5 * (i + 1) - else: - size = sizes[i] - if fps is None: - f = 5 - else: - f = fps[i] - data = torch.randint(0, 256, (size, 300, 400, 3), dtype=torch.uint8) - name = os.path.join(tmpdir, "{}.mp4".format(i)) - names.append(name) - io.write_video(name, data, fps=f) - - yield names +from common_utils import get_list_of_videos, assert_equal class TestVideo: From 860b5fba44384c03751003ee89f1b405cd5c2caa Mon Sep 17 00:00:00 2001 From: Alex Lin Date: Tue, 17 Aug 2021 15:33:10 +0100 Subject: [PATCH 18/19] Move get_tmp_dir() back to common_utils.py --- test/common_utils.py | 14 +++++++ test/datasets_utils.py | 16 +------- test/test_transforms_tensor.py | 75 ++++++++++++++++------------------ 3 files changed, 50 insertions(+), 55 deletions(-) diff --git a/test/common_utils.py b/test/common_utils.py index eb0c8f03e77..4c3a54e7688 100644 --- a/test/common_utils.py +++ b/test/common_utils.py @@ -1,4 +1,6 @@ import os +import shutil +import tempfile import contextlib import unittest import pytest @@ -27,6 +29,18 @@ CIRCLECI_GPU_NO_CUDA_MSG = "We're in a CircleCI GPU machine, and this test doesn't need cuda." +@contextlib.contextmanager +def get_tmp_dir(src=None, **kwargs): + tmp_dir = tempfile.mkdtemp(**kwargs) + if src is not None: + os.rmdir(tmp_dir) + shutil.copytree(src, tmp_dir) + try: + yield tmp_dir + finally: + shutil.rmtree(tmp_dir) + + def set_rng_seed(seed): torch.manual_seed(seed) random.seed(seed) diff --git a/test/datasets_utils.py b/test/datasets_utils.py index 0d78a3273c9..ca182b8d25f 100644 --- a/test/datasets_utils.py +++ b/test/datasets_utils.py @@ -6,9 +6,7 @@ import os import pathlib import random -import shutil import string -import tempfile import unittest import unittest.mock from collections import defaultdict @@ -21,7 +19,7 @@ import torchvision.datasets import torchvision.io -from common_utils import disable_console_output +from common_utils import get_tmp_dir, disable_console_output __all__ = [ @@ -40,18 +38,6 @@ ] -@contextlib.contextmanager -def get_tmp_dir(src=None, **kwargs): - tmp_dir = tempfile.mkdtemp(**kwargs) - if src is not None: - os.rmdir(tmp_dir) - shutil.copytree(src, tmp_dir) - try: - yield tmp_dir - finally: - shutil.rmtree(tmp_dir) - - class UsageError(Exception): """Should be raised in case an error happens in the setup rather than the test.""" diff --git a/test/test_transforms_tensor.py b/test/test_transforms_tensor.py index 69f38626268..5081626fec4 100644 --- a/test/test_transforms_tensor.py +++ b/test/test_transforms_tensor.py @@ -10,6 +10,7 @@ from typing import Sequence from common_utils import ( + get_tmp_dir, int_dtypes, float_dtypes, _create_data, @@ -58,7 +59,7 @@ def _test_functional_op(f, device, fn_kwargs=None, test_exact_match=True, **matc _assert_approx_equal_tensor_to_pil(transformed_tensor, transformed_pil_img, **match_kwargs) -def _test_class_op(method, device, tmpdir, meth_kwargs=None, test_exact_match=True, **match_kwargs): +def _test_class_op(method, device, meth_kwargs=None, test_exact_match=True, **match_kwargs): # TODO: change the name: it's not a method, it's a class. meth_kwargs = meth_kwargs or {} @@ -84,12 +85,13 @@ def _test_class_op(method, device, tmpdir, meth_kwargs=None, test_exact_match=Tr batch_tensors = _create_data_batch(height=23, width=34, channels=3, num_samples=4, device=device) _test_transform_vs_scripted_on_batch(f, scripted_fn, batch_tensors) - scripted_fn.save(os.path.join(tmp_dir, f"t_{method.__name__}.pt")) + with get_tmp_dir() as tmp_dir: + scripted_fn.save(os.path.join(tmp_dir, f"t_{method.__name__}.pt")) -def _test_op(func, method, device, tmpdir, fn_kwargs=None, meth_kwargs=None, test_exact_match=True, **match_kwargs): +def _test_op(func, method, device, fn_kwargs=None, meth_kwargs=None, test_exact_match=True, **match_kwargs): _test_functional_op(func, device, fn_kwargs, test_exact_match=test_exact_match, **match_kwargs) - _test_class_op(method, device, tmpdir, meth_kwargs=meth_kwargs, test_exact_match=test_exact_match, **match_kwargs) + _test_class_op(method, device, meth_kwargs, test_exact_match=test_exact_match, **match_kwargs) @pytest.mark.parametrize('device', cpu_and_gpu()) @@ -107,54 +109,53 @@ def _test_op(func, method, device, tmpdir, fn_kwargs=None, meth_kwargs=None, tes (F.equalize, T.RandomEqualize, None, {}) ] ) +def test_random(func, method, device, fn_kwargs, match_kwargs): + _test_op(func, method, device, fn_kwargs, fn_kwargs, **match_kwargs) + + @pytest.mark.parametrize('device', cpu_and_gpu()) class TestColorJitter: @pytest.mark.parametrize('brightness', [0.1, 0.5, 1.0, 1.34, (0.3, 0.7), [0.4, 0.5]]) - @pytest.fixture() - def test_color_jitter_brightness(self, brightness, device, tmpdir): + def test_color_jitter_brightness(self, brightness, device): tol = 1.0 + 1e-10 meth_kwargs = {"brightness": brightness} _test_class_op( - T.ColorJitter, device, tmpdir, meth_kwargs=meth_kwargs, test_exact_match=False, + T.ColorJitter, meth_kwargs=meth_kwargs, test_exact_match=False, device=device, tol=tol, agg_method="max" ) @pytest.mark.parametrize('contrast', [0.2, 0.5, 1.0, 1.5, (0.3, 0.7), [0.4, 0.5]]) - @pytest.fixture() - def test_color_jitter_contrast(self, contrast, device, tmpdir): + def test_color_jitter_contrast(self, contrast, device): tol = 1.0 + 1e-10 meth_kwargs = {"contrast": contrast} _test_class_op( - T.ColorJitter, device, tmpdir, meth_kwargs=meth_kwargs, test_exact_match=False, + T.ColorJitter, meth_kwargs=meth_kwargs, test_exact_match=False, device=device, tol=tol, agg_method="max" ) @pytest.mark.parametrize('saturation', [0.5, 0.75, 1.0, 1.25, (0.3, 0.7), [0.3, 0.4]]) - @pytest.fixture() - def test_color_jitter_saturation(self, saturation, device, tmpdir): + def test_color_jitter_saturation(self, saturation, device): tol = 1.0 + 1e-10 meth_kwargs = {"saturation": saturation} _test_class_op( - T.ColorJitter, device, tmpdir, meth_kwargs=meth_kwargs, test_exact_match=False, + T.ColorJitter, meth_kwargs=meth_kwargs, test_exact_match=False, device=device, tol=tol, agg_method="max" ) @pytest.mark.parametrize('hue', [0.2, 0.5, (-0.2, 0.3), [-0.4, 0.5]]) - @pytest.fixture() - def test_color_jitter_hue(self, hue, device, tmpdir): + def test_color_jitter_hue(self, hue, device): meth_kwargs = {"hue": hue} _test_class_op( - T.ColorJitter, device, tmpdir, meth_kwargs=meth_kwargs, test_exact_match=False, + T.ColorJitter, meth_kwargs=meth_kwargs, test_exact_match=False, device=device, tol=16.1, agg_method="max" ) - @pytest.fixture() - def test_color_jitter_all(self, device, tmpdir): + def test_color_jitter_all(self, device): # All 4 parameters together meth_kwargs = {"brightness": 0.2, "contrast": 0.2, "saturation": 0.2, "hue": 0.2} _test_class_op( - T.ColorJitter, device, tmpdir, meth_kwargs=meth_kwargs, test_exact_match=False, + T.ColorJitter, meth_kwargs=meth_kwargs, test_exact_match=False, device=device, tol=12.1, agg_method="max" ) @@ -162,8 +163,7 @@ def test_color_jitter_all(self, device, tmpdir): @pytest.mark.parametrize('device', cpu_and_gpu()) @pytest.mark.parametrize('m', ["constant", "edge", "reflect", "symmetric"]) @pytest.mark.parametrize('mul', [1, -1]) -@pytest.fixture() -def test_pad(m, mul, device, tmpdir): +def test_pad(m, mul, device): fill = 127 if m == "constant" else 0 # Test functional.pad (PIL and Tensor) with padding as single int @@ -174,28 +174,27 @@ def test_pad(m, mul, device, tmpdir): # Test functional.pad and transforms.Pad with padding as [int, ] fn_kwargs = meth_kwargs = {"padding": [mul * 2, ], "fill": fill, "padding_mode": m} _test_op( - F.pad, T.Pad, device, tmpdir, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs + F.pad, T.Pad, device=device, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs ) # Test functional.pad and transforms.Pad with padding as list fn_kwargs = meth_kwargs = {"padding": [mul * 4, 4], "fill": fill, "padding_mode": m} _test_op( - F.pad, T.Pad, device, tmpdir, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs + F.pad, T.Pad, device=device, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs ) # Test functional.pad and transforms.Pad with padding as tuple fn_kwargs = meth_kwargs = {"padding": (mul * 2, 2, 2, mul * 2), "fill": fill, "padding_mode": m} _test_op( - F.pad, T.Pad, device, tmpdir, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs + F.pad, T.Pad, device=device, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs ) @pytest.mark.parametrize('device', cpu_and_gpu()) -@pytest.fixture() -def test_crop(device, tmpdir): +def test_crop(device): fn_kwargs = {"top": 2, "left": 3, "height": 4, "width": 5} # Test transforms.RandomCrop with size and padding as tuple meth_kwargs = {"size": (4, 5), "padding": (4, 4), "pad_if_needed": True, } _test_op( - F.crop, T.RandomCrop, device, tmpdir, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs + F.crop, T.RandomCrop, device=device, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs ) # Test transforms.functional.crop including outside the image area @@ -224,26 +223,24 @@ def test_crop(device, tmpdir): {"padding_mode": "reflect"} ]) @pytest.mark.parametrize('size', [5, [5, ], [6, 6]]) -@pytest.fixture() -def test_crop_pad(size, padding_config, device, tmpdir): +def test_crop_pad(size, padding_config, device): config = dict(padding_config) config["size"] = size - _test_class_op(T.RandomCrop, device, tmpdir, config) + _test_class_op(T.RandomCrop, device, config) @pytest.mark.parametrize('device', cpu_and_gpu()) -@pytest.fixture() def test_center_crop(device, tmpdir): fn_kwargs = {"output_size": (4, 5)} meth_kwargs = {"size": (4, 5), } _test_op( - F.center_crop, T.CenterCrop, device, tmpdir, fn_kwargs=fn_kwargs, + F.center_crop, T.CenterCrop, device=device, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs ) fn_kwargs = {"output_size": (5,)} meth_kwargs = {"size": (5,)} _test_op( - F.center_crop, T.CenterCrop, device, tmpdir, fn_kwargs=fn_kwargs, + F.center_crop, T.CenterCrop, device=device, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs ) tensor = torch.randint(0, 256, (3, 10, 10), dtype=torch.uint8, device=device) @@ -482,11 +479,10 @@ def test_random_perspective_save(tmpdir): (T.Grayscale, {"num_output_channels": 3}), (T.RandomGrayscale, {}) ]) -@pytest.fixture() -def test_to_grayscale(device, Klass, meth_kwargs, tmpdir): +def test_to_grayscale(device, Klass, meth_kwargs): tol = 1.0 + 1e-10 _test_class_op( - Klass, device, tmpdir, meth_kwargs=meth_kwargs, test_exact_match=False, + Klass, meth_kwargs=meth_kwargs, test_exact_match=False, device=device, tol=tol, agg_method="max" ) @@ -685,10 +681,9 @@ def test_random_apply(device): {"kernel_size": (3, 3), "sigma": (0.1, 2.0)}, {"kernel_size": [23], "sigma": 0.75} ]) -@pytest.fixture() -def test_gaussian_blur(device, meth_kwargs, tmpdir): +def test_gaussian_blur(device, meth_kwargs): tol = 1.0 + 1e-10 _test_class_op( - T.GaussianBlur, device, tmpdir, meth_kwargs=meth_kwargs, - test_exact_match=False, agg_method="max", tol=tol + T.GaussianBlur, meth_kwargs=meth_kwargs, + test_exact_match=False, device=device, agg_method="max", tol=tol ) From 3304723cb05d33e158a106963c752277c72082e3 Mon Sep 17 00:00:00 2001 From: Alex Lin Date: Tue, 17 Aug 2021 16:03:21 +0100 Subject: [PATCH 19/19] Fix pylint and imports --- test/common_utils.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/common_utils.py b/test/common_utils.py index 4c3a54e7688..79fce27110d 100644 --- a/test/common_utils.py +++ b/test/common_utils.py @@ -15,6 +15,7 @@ from numbers import Number from torch._six import string_classes from collections import OrderedDict +from torchvision import io import numpy as np from PIL import Image @@ -146,6 +147,7 @@ def _create_data_batch(height=3, width=3, channels=3, num_samples=4, device="cpu assert_equal = functools.partial(torch.testing.assert_close, rtol=0, atol=0) + def get_list_of_videos(tmpdir, num_videos=5, sizes=None, fps=None): names = [] for i in range(num_videos):