From ec209208bfa295179d4b65cec7a4f16d3b180b99 Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Fri, 7 Oct 2022 16:31:13 +0100 Subject: [PATCH 01/14] Adding support of Video to missed Transforms and Kernels --- torchvision/prototype/transforms/_deprecated.py | 4 ++-- torchvision/prototype/transforms/_geometry.py | 8 ++++---- torchvision/prototype/transforms/_meta.py | 6 +++--- .../prototype/transforms/functional/_geometry.py | 12 ++++++------ 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/torchvision/prototype/transforms/_deprecated.py b/torchvision/prototype/transforms/_deprecated.py index a9341415c1a..f1225fd5d84 100644 --- a/torchvision/prototype/transforms/_deprecated.py +++ b/torchvision/prototype/transforms/_deprecated.py @@ -29,7 +29,7 @@ def _transform(self, inpt: Union[PIL.Image.Image, np.ndarray], params: Dict[str, class Grayscale(Transform): - _transformed_types = (features.Image, PIL.Image.Image, features.is_simple_tensor) + _transformed_types = (features.Image, PIL.Image.Image, features.is_simple_tensor, features.Video) def __init__(self, num_output_channels: Literal[1, 3] = 1) -> None: deprecation_msg = ( @@ -60,7 +60,7 @@ def _transform(self, inpt: features.ImageType, params: Dict[str, Any]) -> featur class RandomGrayscale(_RandomApplyTransform): - _transformed_types = (features.Image, PIL.Image.Image, features.is_simple_tensor) + _transformed_types = (features.Image, PIL.Image.Image, features.is_simple_tensor, features.Video) def __init__(self, p: float = 0.1) -> None: warnings.warn( diff --git a/torchvision/prototype/transforms/_geometry.py b/torchvision/prototype/transforms/_geometry.py index 1f132ec9238..75fb27ebf00 100644 --- a/torchvision/prototype/transforms/_geometry.py +++ b/torchvision/prototype/transforms/_geometry.py @@ -172,15 +172,15 @@ class FiveCrop(Transform): torch.Size([5]) """ - _transformed_types = (features.Image, PIL.Image.Image, features.is_simple_tensor) + _transformed_types = (features.Image, PIL.Image.Image, features.is_simple_tensor, features.Video) def __init__(self, size: Union[int, Sequence[int]]) -> None: super().__init__() self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.") def _transform( - self, inpt: features.ImageType, params: Dict[str, Any] - ) -> Tuple[features.ImageType, features.ImageType, features.ImageType, features.ImageType, features.ImageType]: + self, inpt: features.ImageOrVideoType, params: Dict[str, Any] + ) -> Tuple[features.ImageOrVideoType, features.ImageOrVideoType, features.ImageOrVideoType, features.ImageOrVideoType, features.ImageOrVideoType]: return F.five_crop(inpt, self.size) def forward(self, *inputs: Any) -> Any: @@ -194,7 +194,7 @@ class TenCrop(Transform): See :class:`~torchvision.prototype.transforms.FiveCrop` for an example. """ - _transformed_types = (features.Image, PIL.Image.Image, features.is_simple_tensor) + _transformed_types = (features.Image, PIL.Image.Image, features.is_simple_tensor, features.Video) def __init__(self, size: Union[int, Sequence[int]], vertical_flip: bool = False) -> None: super().__init__() diff --git a/torchvision/prototype/transforms/_meta.py b/torchvision/prototype/transforms/_meta.py index cb090492a48..d9418087bf5 100644 --- a/torchvision/prototype/transforms/_meta.py +++ b/torchvision/prototype/transforms/_meta.py @@ -22,15 +22,15 @@ def _transform(self, inpt: features.BoundingBox, params: Dict[str, Any]) -> feat class ConvertImageDtype(Transform): - _transformed_types = (features.is_simple_tensor, features.Image) + _transformed_types = (features.is_simple_tensor, features.Image, features.Video) def __init__(self, dtype: torch.dtype = torch.float32) -> None: super().__init__() self.dtype = dtype - def _transform(self, inpt: features.TensorImageType, params: Dict[str, Any]) -> features.TensorImageType: + def _transform(self, inpt: features.TensorImageOrVideoType, params: Dict[str, Any]) -> features.TensorImageOrVideoType: output = F.convert_image_dtype(inpt, dtype=self.dtype) - return output if features.is_simple_tensor(inpt) else features.Image.new_like(inpt, output, dtype=self.dtype) # type: ignore[arg-type] + return output if features.is_simple_tensor(inpt) else type(inpt).new_like(inpt, output, dtype=self.dtype) # type: ignore[arg-type] class ConvertColorSpace(Transform): diff --git a/torchvision/prototype/transforms/functional/_geometry.py b/torchvision/prototype/transforms/functional/_geometry.py index f205b5aeabe..9b9cc5f17f4 100644 --- a/torchvision/prototype/transforms/functional/_geometry.py +++ b/torchvision/prototype/transforms/functional/_geometry.py @@ -1401,15 +1401,15 @@ def five_crop_image_pil( def five_crop( - inpt: features.ImageTypeJIT, size: List[int] + inpt: features.ImageOrVideoTypeJIT, size: List[int] ) -> Tuple[ - features.ImageTypeJIT, features.ImageTypeJIT, features.ImageTypeJIT, features.ImageTypeJIT, features.ImageTypeJIT + features.ImageOrVideoTypeJIT, features.ImageOrVideoTypeJIT, features.ImageOrVideoTypeJIT, features.ImageOrVideoTypeJIT, features.ImageOrVideoTypeJIT ]: # TODO: consider breaking BC here to return List[features.ImageTypeJIT] to align this op with `ten_crop` if isinstance(inpt, torch.Tensor): output = five_crop_image_tensor(inpt, size) - if not torch.jit.is_scripting() and isinstance(inpt, features.Image): - output = tuple(features.Image.new_like(inpt, item) for item in output) # type: ignore[assignment] + if not torch.jit.is_scripting() and isinstance(inpt, (features.Image, features.Video)): + output = tuple(type(inpt).new_like(inpt, item) for item in output) # type: ignore[assignment] return output else: # isinstance(inpt, PIL.Image.Image): return five_crop_image_pil(inpt, size) @@ -1442,11 +1442,11 @@ def ten_crop_image_pil(image: PIL.Image.Image, size: List[int], vertical_flip: b return [tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip] -def ten_crop(inpt: features.ImageTypeJIT, size: List[int], vertical_flip: bool = False) -> List[features.ImageTypeJIT]: +def ten_crop(inpt: features.ImageOrVideoTypeJIT, size: List[int], vertical_flip: bool = False) -> List[features.ImageOrVideoTypeJIT]: if isinstance(inpt, torch.Tensor): output = ten_crop_image_tensor(inpt, size, vertical_flip=vertical_flip) if not torch.jit.is_scripting() and isinstance(inpt, features.Image): - output = [features.Image.new_like(inpt, item) for item in output] + output = [type(inpt).new_like(inpt, item) for item in output] return output else: # isinstance(inpt, PIL.Image.Image): return ten_crop_image_pil(inpt, size, vertical_flip=vertical_flip) From 8c383fbe78daf83ba535b6253ef5cda2871cff96 Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Fri, 7 Oct 2022 17:05:12 +0100 Subject: [PATCH 02/14] Fixing Grayscale Transform. --- torchvision/prototype/transforms/_deprecated.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/torchvision/prototype/transforms/_deprecated.py b/torchvision/prototype/transforms/_deprecated.py index 80a7564b578..0bb94f8fdf8 100644 --- a/torchvision/prototype/transforms/_deprecated.py +++ b/torchvision/prototype/transforms/_deprecated.py @@ -52,10 +52,10 @@ def __init__(self, num_output_channels: Literal[1, 3] = 1) -> None: super().__init__() self.num_output_channels = num_output_channels - def _transform(self, inpt: features.ImageType, params: Dict[str, Any]) -> features.ImageType: + def _transform(self, inpt: features.ImageOrVideoType, params: Dict[str, Any]) -> features.ImageOrVideoType: output = _F.rgb_to_grayscale(inpt, num_output_channels=self.num_output_channels) - if isinstance(inpt, features.Image): - output = features.Image.wrap_like(inpt, output, color_space=features.ColorSpace.GRAY) + if isinstance(inpt, (features.Image, features.Video)): + output = type(inpt).wrap_like(inpt, output, color_space=features.ColorSpace.GRAY) return output @@ -81,8 +81,8 @@ def _get_params(self, sample: Any) -> Dict[str, Any]: num_input_channels, _, _ = query_chw(sample) return dict(num_input_channels=num_input_channels) - def _transform(self, inpt: features.ImageType, params: Dict[str, Any]) -> features.ImageType: + def _transform(self, inpt: features.ImageOrVideoType, params: Dict[str, Any]) -> features.ImageOrVideoType: output = _F.rgb_to_grayscale(inpt, num_output_channels=params["num_input_channels"]) - if isinstance(inpt, features.Image): - output = features.Image.wrap_like(inpt, output, color_space=features.ColorSpace.GRAY) + if isinstance(inpt, (features.Image, features.Video)): + output = type(inpt).wrap_like(inpt, output, color_space=features.ColorSpace.GRAY) return output From 1b78d0ae0779ae2c49dd761e634aad4dd6cef87e Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Fri, 7 Oct 2022 17:10:42 +0100 Subject: [PATCH 03/14] Fixing FiveCrop and TenCrop Transforms. --- torchvision/prototype/transforms/_geometry.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/torchvision/prototype/transforms/_geometry.py b/torchvision/prototype/transforms/_geometry.py index 05f8efb4726..a6915bc143d 100644 --- a/torchvision/prototype/transforms/_geometry.py +++ b/torchvision/prototype/transforms/_geometry.py @@ -155,12 +155,12 @@ class FiveCrop(Transform): """ Example: >>> class BatchMultiCrop(transforms.Transform): - ... def forward(self, sample: Tuple[Tuple[features.Image, ...], features.Label]): - ... images, labels = sample - ... batch_size = len(images) - ... images = features.Image.wrap_like(images[0], torch.stack(images)) + ... def forward(self, sample: Tuple[Tuple[Union[features.Image, features.Video], ...], features.Label]): + ... images_or_videos, labels = sample + ... batch_size = len(images_or_videos) + ... images_or_videos = features.Image.wrap_like(images_or_videos[0], torch.stack(images_or_videos)) ... labels = features.Label.wrap_like(labels, labels.repeat(batch_size)) - ... return images, labels + ... return images_or_videos, labels ... >>> image = features.Image(torch.rand(3, 256, 256)) >>> label = features.Label(0) @@ -201,7 +201,7 @@ def __init__(self, size: Union[int, Sequence[int]], vertical_flip: bool = False) self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.") self.vertical_flip = vertical_flip - def _transform(self, inpt: features.ImageType, params: Dict[str, Any]) -> List[features.ImageType]: + def _transform(self, inpt: features.ImageOrVideoType, params: Dict[str, Any]) -> List[features.ImageOrVideoType]: return F.ten_crop(inpt, self.size, vertical_flip=self.vertical_flip) def forward(self, *inputs: Any) -> Any: From 6fe3c7f53342f55b7f6380bc0d726dfe039c22d2 Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Fri, 7 Oct 2022 17:26:31 +0100 Subject: [PATCH 04/14] Fix Linter --- torchvision/prototype/features/_video.py | 1 + torchvision/prototype/transforms/_deprecated.py | 4 ++-- torchvision/prototype/transforms/_geometry.py | 8 +++++++- torchvision/prototype/transforms/_meta.py | 8 ++++---- .../prototype/transforms/functional/_geometry.py | 14 ++++++++++---- 5 files changed, 24 insertions(+), 11 deletions(-) diff --git a/torchvision/prototype/features/_video.py b/torchvision/prototype/features/_video.py index a58027243cf..e32c36d5d9f 100644 --- a/torchvision/prototype/features/_video.py +++ b/torchvision/prototype/features/_video.py @@ -238,6 +238,7 @@ def gaussian_blur(self, kernel_size: List[int], sigma: Optional[List[float]] = N TensorVideoType = Union[torch.Tensor, Video] TensorVideoTypeJIT = torch.Tensor +# TODO: decide if we should do definitions for both Images and Videos or use unions in the methods ImageOrVideoType = Union[ImageType, VideoType] ImageOrVideoTypeJIT = Union[ImageTypeJIT, VideoTypeJIT] TensorImageOrVideoType = Union[TensorImageType, TensorVideoType] diff --git a/torchvision/prototype/transforms/_deprecated.py b/torchvision/prototype/transforms/_deprecated.py index 0bb94f8fdf8..30df276f814 100644 --- a/torchvision/prototype/transforms/_deprecated.py +++ b/torchvision/prototype/transforms/_deprecated.py @@ -55,7 +55,7 @@ def __init__(self, num_output_channels: Literal[1, 3] = 1) -> None: def _transform(self, inpt: features.ImageOrVideoType, params: Dict[str, Any]) -> features.ImageOrVideoType: output = _F.rgb_to_grayscale(inpt, num_output_channels=self.num_output_channels) if isinstance(inpt, (features.Image, features.Video)): - output = type(inpt).wrap_like(inpt, output, color_space=features.ColorSpace.GRAY) + output = type(inpt).wrap_like(inpt, output, color_space=features.ColorSpace.GRAY) # type: ignore[arg-type] return output @@ -84,5 +84,5 @@ def _get_params(self, sample: Any) -> Dict[str, Any]: def _transform(self, inpt: features.ImageOrVideoType, params: Dict[str, Any]) -> features.ImageOrVideoType: output = _F.rgb_to_grayscale(inpt, num_output_channels=params["num_input_channels"]) if isinstance(inpt, (features.Image, features.Video)): - output = type(inpt).wrap_like(inpt, output, color_space=features.ColorSpace.GRAY) + output = type(inpt).wrap_like(inpt, output, color_space=features.ColorSpace.GRAY) # type: ignore[arg-type] return output diff --git a/torchvision/prototype/transforms/_geometry.py b/torchvision/prototype/transforms/_geometry.py index a6915bc143d..4174d3f7e59 100644 --- a/torchvision/prototype/transforms/_geometry.py +++ b/torchvision/prototype/transforms/_geometry.py @@ -180,7 +180,13 @@ def __init__(self, size: Union[int, Sequence[int]]) -> None: def _transform( self, inpt: features.ImageOrVideoType, params: Dict[str, Any] - ) -> Tuple[features.ImageOrVideoType, features.ImageOrVideoType, features.ImageOrVideoType, features.ImageOrVideoType, features.ImageOrVideoType]: + ) -> Tuple[ + features.ImageOrVideoType, + features.ImageOrVideoType, + features.ImageOrVideoType, + features.ImageOrVideoType, + features.ImageOrVideoType, + ]: return F.five_crop(inpt, self.size) def forward(self, *inputs: Any) -> Any: diff --git a/torchvision/prototype/transforms/_meta.py b/torchvision/prototype/transforms/_meta.py index 9c00ee9bc2e..e5c7d05b017 100644 --- a/torchvision/prototype/transforms/_meta.py +++ b/torchvision/prototype/transforms/_meta.py @@ -28,12 +28,12 @@ def __init__(self, dtype: torch.dtype = torch.float32) -> None: super().__init__() self.dtype = dtype - def _transform(self, inpt: features.TensorImageOrVideoType, params: Dict[str, Any]) -> features.TensorImageOrVideoType: + def _transform( + self, inpt: features.TensorImageOrVideoType, params: Dict[str, Any] + ) -> features.TensorImageOrVideoType: output = F.convert_image_dtype(inpt, dtype=self.dtype) return ( - output - if features.is_simple_tensor(inpt) - else type(inpt).wrap_like(inpt, output) # type: ignore[arg-type] + output if features.is_simple_tensor(inpt) else type(inpt).wrap_like(inpt, output) # type: ignore[attr-defined] ) diff --git a/torchvision/prototype/transforms/functional/_geometry.py b/torchvision/prototype/transforms/functional/_geometry.py index 6cfe7491c2b..710352c7667 100644 --- a/torchvision/prototype/transforms/functional/_geometry.py +++ b/torchvision/prototype/transforms/functional/_geometry.py @@ -1403,13 +1403,17 @@ def five_crop_image_pil( def five_crop( inpt: features.ImageOrVideoTypeJIT, size: List[int] ) -> Tuple[ - features.ImageOrVideoTypeJIT, features.ImageOrVideoTypeJIT, features.ImageOrVideoTypeJIT, features.ImageOrVideoTypeJIT, features.ImageOrVideoTypeJIT + features.ImageOrVideoTypeJIT, + features.ImageOrVideoTypeJIT, + features.ImageOrVideoTypeJIT, + features.ImageOrVideoTypeJIT, + features.ImageOrVideoTypeJIT, ]: # TODO: consider breaking BC here to return List[features.ImageTypeJIT] to align this op with `ten_crop` if isinstance(inpt, torch.Tensor): output = five_crop_image_tensor(inpt, size) if not torch.jit.is_scripting() and isinstance(inpt, (features.Image, features.Video)): - output = tuple(type(inpt).wrap_like(inpt, item) for item in output) # type: ignore[assignment] + output = tuple(type(inpt).wrap_like(inpt, item) for item in output) # type: ignore[assignment,arg-type] return output else: # isinstance(inpt, PIL.Image.Image): return five_crop_image_pil(inpt, size) @@ -1442,11 +1446,13 @@ def ten_crop_image_pil(image: PIL.Image.Image, size: List[int], vertical_flip: b return [tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip] -def ten_crop(inpt: features.ImageOrVideoTypeJIT, size: List[int], vertical_flip: bool = False) -> List[features.ImageOrVideoTypeJIT]: +def ten_crop( + inpt: features.ImageOrVideoTypeJIT, size: List[int], vertical_flip: bool = False +) -> List[features.ImageOrVideoTypeJIT]: if isinstance(inpt, torch.Tensor): output = ten_crop_image_tensor(inpt, size, vertical_flip=vertical_flip) if not torch.jit.is_scripting() and isinstance(inpt, (features.Image, features.Video)): - output = [type(inpt).wrap_like(inpt, item) for item in output] + output = [type(inpt).wrap_like(inpt, item) for item in output] # type: ignore[arg-type] return output else: # isinstance(inpt, PIL.Image.Image): return ten_crop_image_pil(inpt, size, vertical_flip=vertical_flip) From 99b529e198649206fe8116f1912ead5c44cb7204 Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Fri, 7 Oct 2022 17:43:56 +0100 Subject: [PATCH 05/14] Fix more kernels. --- torchvision/prototype/features/__init__.py | 12 +++++++++++- .../prototype/transforms/functional/_augment.py | 1 + .../prototype/transforms/functional/_deprecated.py | 8 +++++--- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/torchvision/prototype/features/__init__.py b/torchvision/prototype/features/__init__.py index 6fc2fb6ea94..944ae9bd3c6 100644 --- a/torchvision/prototype/features/__init__.py +++ b/torchvision/prototype/features/__init__.py @@ -13,4 +13,14 @@ ) from ._label import Label, OneHotLabel from ._mask import Mask -from ._video import ImageOrVideoType, ImageOrVideoTypeJIT, TensorImageOrVideoType, TensorImageOrVideoTypeJIT, Video +from ._video import ( + ImageOrVideoType, + ImageOrVideoTypeJIT, + LegacyVideoType, + LegacyVideoTypeJIT, + TensorImageOrVideoType, + TensorImageOrVideoTypeJIT, + Video, + VideoType, + VideoTypeJIT, +) diff --git a/torchvision/prototype/transforms/functional/_augment.py b/torchvision/prototype/transforms/functional/_augment.py index 847343dbf20..609efface3e 100644 --- a/torchvision/prototype/transforms/functional/_augment.py +++ b/torchvision/prototype/transforms/functional/_augment.py @@ -20,6 +20,7 @@ def erase_image_pil( def erase_video( video: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False ) -> torch.Tensor: + # TODO: Not actally used by the dispatcher. Should we remove kernels that are just redirects? return erase_image_tensor(video, i=i, j=j, h=h, w=w, v=v, inplace=inplace) diff --git a/torchvision/prototype/transforms/functional/_deprecated.py b/torchvision/prototype/transforms/functional/_deprecated.py index cbdea5130ef..fa206915ea7 100644 --- a/torchvision/prototype/transforms/functional/_deprecated.py +++ b/torchvision/prototype/transforms/functional/_deprecated.py @@ -1,5 +1,5 @@ import warnings -from typing import Any, List +from typing import Any, List, Union import PIL.Image import torch @@ -22,7 +22,9 @@ def to_grayscale(inpt: PIL.Image.Image, num_output_channels: int = 1) -> PIL.Ima return _F.to_grayscale(inpt, num_output_channels=num_output_channels) -def rgb_to_grayscale(inpt: features.LegacyImageTypeJIT, num_output_channels: int = 1) -> features.LegacyImageTypeJIT: +def rgb_to_grayscale( + inpt: Union[features.LegacyImageTypeJIT, features.LegacyVideoTypeJIT], num_output_channels: int = 1 +) -> Union[features.LegacyImageTypeJIT, features.LegacyVideoTypeJIT]: old_color_space = ( features._image._from_tensor_shape(inpt.shape) # type: ignore[arg-type] if isinstance(inpt, torch.Tensor) and (torch.jit.is_scripting() or not isinstance(inpt, features.Image)) @@ -56,7 +58,7 @@ def to_tensor(inpt: Any) -> torch.Tensor: return _F.to_tensor(inpt) -def get_image_size(inpt: features.ImageTypeJIT) -> List[int]: +def get_image_size(inpt: features.ImageOrVideoTypeJIT) -> List[int]: warnings.warn( "The function `get_image_size(...)` is deprecated and will be removed in a future release. " "Instead, please use `get_spatial_size(...)` which returns `[h, w]` instead of `[w, h]`." From d896c3dbe30fddee7b584ad488861dac58b98277 Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Fri, 7 Oct 2022 17:55:41 +0100 Subject: [PATCH 06/14] Add `five_crop_video` and `ten_crop_video` kernels --- .../prototype/transforms/functional/__init__.py | 2 ++ .../prototype/transforms/functional/_geometry.py | 12 +++++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/torchvision/prototype/transforms/functional/__init__.py b/torchvision/prototype/transforms/functional/__init__.py index 1e918cc3492..579442dc7b9 100644 --- a/torchvision/prototype/transforms/functional/__init__.py +++ b/torchvision/prototype/transforms/functional/__init__.py @@ -96,6 +96,7 @@ five_crop, five_crop_image_pil, five_crop_image_tensor, + five_crop_video, hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file horizontal_flip, horizontal_flip_bounding_box, @@ -136,6 +137,7 @@ ten_crop, ten_crop_image_pil, ten_crop_image_tensor, + ten_crop_video, vertical_flip, vertical_flip_bounding_box, vertical_flip_image_pil, diff --git a/torchvision/prototype/transforms/functional/_geometry.py b/torchvision/prototype/transforms/functional/_geometry.py index 0842ad930b1..e7a6b385ea9 100644 --- a/torchvision/prototype/transforms/functional/_geometry.py +++ b/torchvision/prototype/transforms/functional/_geometry.py @@ -1405,6 +1405,12 @@ def five_crop_image_pil( return tl, tr, bl, br, center +def five_crop_video( + video: torch.Tensor, size: List[int] +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + return five_crop_image_tensor(video, size) + + def five_crop( inpt: features.ImageOrVideoTypeJIT, size: List[int] ) -> Tuple[ @@ -1414,7 +1420,7 @@ def five_crop( features.ImageOrVideoTypeJIT, features.ImageOrVideoTypeJIT, ]: - # TODO: consider breaking BC here to return List[features.ImageTypeJIT] to align this op with `ten_crop` + # TODO: consider breaking BC here to return List[features.ImageOrVideoTypeJIT] to align this op with `ten_crop` if isinstance(inpt, torch.Tensor): output = five_crop_image_tensor(inpt, size) if not torch.jit.is_scripting() and isinstance(inpt, (features.Image, features.Video)): @@ -1451,6 +1457,10 @@ def ten_crop_image_pil(image: PIL.Image.Image, size: List[int], vertical_flip: b return [tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip] +def ten_crop_video(video: torch.Tensor, size: List[int], vertical_flip: bool = False) -> List[torch.Tensor]: + return ten_crop_image_tensor(video, size, vertical_flip=vertical_flip) + + def ten_crop( inpt: features.ImageOrVideoTypeJIT, size: List[int], vertical_flip: bool = False ) -> List[features.ImageOrVideoTypeJIT]: From fb4b76e7bfc921d2690266a4577eec68462475f4 Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Fri, 7 Oct 2022 18:01:40 +0100 Subject: [PATCH 07/14] Added a TODO. --- torchvision/prototype/transforms/functional/_meta.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/torchvision/prototype/transforms/functional/_meta.py b/torchvision/prototype/transforms/functional/_meta.py index e24b68c9fd6..c03d65c951b 100644 --- a/torchvision/prototype/transforms/functional/_meta.py +++ b/torchvision/prototype/transforms/functional/_meta.py @@ -55,6 +55,10 @@ def get_spatial_size_image_pil(image: PIL.Image.Image) -> List[int]: return [height, width] +# TODO: Should we have get_spatial_size_video here? How about masks/bbox etc? What is the criterion for deciding when +# a kernel will be created? + + def get_spatial_size(inpt: features.InputTypeJIT) -> List[int]: if isinstance(inpt, torch.Tensor) and (torch.jit.is_scripting() or not isinstance(inpt, features._Feature)): return get_spatial_size_image_tensor(inpt) @@ -246,7 +250,7 @@ def convert_color_space( ): if old_color_space is None: raise RuntimeError( - "In order to convert the color space of simple tensor images, " + "In order to convert the color space of simple tensors, " "the `old_color_space=...` parameter needs to be passed." ) return convert_color_space_image_tensor( From 25cacbdb1343e2c3344546154427ebf6e1f49ad6 Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Mon, 10 Oct 2022 09:32:39 +0100 Subject: [PATCH 08/14] Missed Video isinstance --- torchvision/prototype/transforms/functional/_augment.py | 1 - torchvision/prototype/transforms/functional/_deprecated.py | 3 ++- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/torchvision/prototype/transforms/functional/_augment.py b/torchvision/prototype/transforms/functional/_augment.py index 609efface3e..847343dbf20 100644 --- a/torchvision/prototype/transforms/functional/_augment.py +++ b/torchvision/prototype/transforms/functional/_augment.py @@ -20,7 +20,6 @@ def erase_image_pil( def erase_video( video: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False ) -> torch.Tensor: - # TODO: Not actally used by the dispatcher. Should we remove kernels that are just redirects? return erase_image_tensor(video, i=i, j=j, h=h, w=w, v=v, inplace=inplace) diff --git a/torchvision/prototype/transforms/functional/_deprecated.py b/torchvision/prototype/transforms/functional/_deprecated.py index fa206915ea7..854920b968a 100644 --- a/torchvision/prototype/transforms/functional/_deprecated.py +++ b/torchvision/prototype/transforms/functional/_deprecated.py @@ -27,7 +27,8 @@ def rgb_to_grayscale( ) -> Union[features.LegacyImageTypeJIT, features.LegacyVideoTypeJIT]: old_color_space = ( features._image._from_tensor_shape(inpt.shape) # type: ignore[arg-type] - if isinstance(inpt, torch.Tensor) and (torch.jit.is_scripting() or not isinstance(inpt, features.Image)) + if isinstance(inpt, torch.Tensor) + and (torch.jit.is_scripting() or not isinstance(inpt, (features.Image, features.Video))) else None ) From 7e7701d8a18ca6ce4dfa1f8a28e1553b28e91ca9 Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Mon, 10 Oct 2022 09:54:58 +0100 Subject: [PATCH 09/14] nits --- torchvision/prototype/transforms/functional/_geometry.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/torchvision/prototype/transforms/functional/_geometry.py b/torchvision/prototype/transforms/functional/_geometry.py index e7a6b385ea9..b3b13ea1ffe 100644 --- a/torchvision/prototype/transforms/functional/_geometry.py +++ b/torchvision/prototype/transforms/functional/_geometry.py @@ -1424,7 +1424,8 @@ def five_crop( if isinstance(inpt, torch.Tensor): output = five_crop_image_tensor(inpt, size) if not torch.jit.is_scripting() and isinstance(inpt, (features.Image, features.Video)): - output = tuple(type(inpt).wrap_like(inpt, item) for item in output) # type: ignore[assignment,arg-type] + cls = type(inpt) + output = tuple(cls.wrap_like(inpt, item) for item in output) # type: ignore[assignment,arg-type] return output else: # isinstance(inpt, PIL.Image.Image): return five_crop_image_pil(inpt, size) @@ -1467,7 +1468,8 @@ def ten_crop( if isinstance(inpt, torch.Tensor): output = ten_crop_image_tensor(inpt, size, vertical_flip=vertical_flip) if not torch.jit.is_scripting() and isinstance(inpt, (features.Image, features.Video)): - output = [type(inpt).wrap_like(inpt, item) for item in output] # type: ignore[arg-type] + cls = type(inpt) + output = [cls.wrap_like(inpt, item) for item in output] # type: ignore[arg-type] return output else: # isinstance(inpt, PIL.Image.Image): return ten_crop_image_pil(inpt, size, vertical_flip=vertical_flip) From 9cbec19aa868363762d73d5246c5bf29f311cdd9 Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Mon, 10 Oct 2022 10:29:34 +0100 Subject: [PATCH 10/14] Fix bug on AugMix --- torchvision/prototype/transforms/_auto_augment.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/torchvision/prototype/transforms/_auto_augment.py b/torchvision/prototype/transforms/_auto_augment.py index 6ef9edba354..aaaa0783467 100644 --- a/torchvision/prototype/transforms/_auto_augment.py +++ b/torchvision/prototype/transforms/_auto_augment.py @@ -483,7 +483,8 @@ def forward(self, *inputs: Any) -> Any: augmentation_space = self._AUGMENTATION_SPACE if self.all_ops else self._PARTIAL_AUGMENTATION_SPACE orig_dims = list(image_or_video.shape) - batch = image_or_video.view([1] * max(4 - image_or_video.ndim, 0) + orig_dims) + expected_dim = 5 if isinstance(orig_image_or_video, features.Video) else 4 + batch = image_or_video.view([1] * max(expected_dim - image_or_video.ndim, 0) + orig_dims) batch_dims = [batch.size(0)] + [1] * (batch.ndim - 1) # Sample the beta weights for combining the original and augmented image or video. To get Beta, we use a From 11bc8c2b73366affc3fc28858c2b605cad280b13 Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Mon, 10 Oct 2022 11:31:03 +0100 Subject: [PATCH 11/14] Nits and TODOs. --- torchvision/prototype/transforms/_augment.py | 1 + torchvision/prototype/transforms/_auto_augment.py | 2 +- torchvision/prototype/transforms/_color.py | 2 +- torchvision/prototype/transforms/_deprecated.py | 4 ++-- torchvision/prototype/transforms/_misc.py | 1 + torchvision/prototype/transforms/functional/_augment.py | 2 +- torchvision/prototype/transforms/functional/_geometry.py | 6 ++---- 7 files changed, 9 insertions(+), 9 deletions(-) diff --git a/torchvision/prototype/transforms/_augment.py b/torchvision/prototype/transforms/_augment.py index bcab0a3f454..7b2dca8a601 100644 --- a/torchvision/prototype/transforms/_augment.py +++ b/torchvision/prototype/transforms/_augment.py @@ -99,6 +99,7 @@ def _transform(self, inpt: features.ImageOrVideoType, params: Dict[str, Any]) -> return inpt +# TODO: Add support for Video: https://github.com/pytorch/vision/issues/6731 class _BaseMixupCutmix(_RandomApplyTransform): def __init__(self, alpha: float, p: float = 0.5) -> None: super().__init__(p=p) diff --git a/torchvision/prototype/transforms/_auto_augment.py b/torchvision/prototype/transforms/_auto_augment.py index aaaa0783467..d078cb2d1cb 100644 --- a/torchvision/prototype/transforms/_auto_augment.py +++ b/torchvision/prototype/transforms/_auto_augment.py @@ -521,7 +521,7 @@ def forward(self, *inputs: Any) -> Any: mix = mix.view(orig_dims).to(dtype=image_or_video.dtype) if isinstance(orig_image_or_video, (features.Image, features.Video)): - mix = type(orig_image_or_video).wrap_like(orig_image_or_video, mix) # type: ignore[arg-type] + mix = orig_image_or_video.wrap_like(orig_image_or_video, mix) # type: ignore[arg-type] elif isinstance(orig_image_or_video, PIL.Image.Image): mix = F.to_image_pil(mix) diff --git a/torchvision/prototype/transforms/_color.py b/torchvision/prototype/transforms/_color.py index 67a6cc3cc3f..340e721dab9 100644 --- a/torchvision/prototype/transforms/_color.py +++ b/torchvision/prototype/transforms/_color.py @@ -119,7 +119,7 @@ def _permute_channels( output = inpt[..., permutation, :, :] if isinstance(inpt, (features.Image, features.Video)): - output = type(inpt).wrap_like(inpt, output, color_space=features.ColorSpace.OTHER) # type: ignore[arg-type] + output = inpt.wrap_like(inpt, output, color_space=features.ColorSpace.OTHER) # type: ignore[arg-type] elif isinstance(inpt, PIL.Image.Image): output = F.to_image_pil(output) diff --git a/torchvision/prototype/transforms/_deprecated.py b/torchvision/prototype/transforms/_deprecated.py index 30df276f814..f8aec22b96c 100644 --- a/torchvision/prototype/transforms/_deprecated.py +++ b/torchvision/prototype/transforms/_deprecated.py @@ -55,7 +55,7 @@ def __init__(self, num_output_channels: Literal[1, 3] = 1) -> None: def _transform(self, inpt: features.ImageOrVideoType, params: Dict[str, Any]) -> features.ImageOrVideoType: output = _F.rgb_to_grayscale(inpt, num_output_channels=self.num_output_channels) if isinstance(inpt, (features.Image, features.Video)): - output = type(inpt).wrap_like(inpt, output, color_space=features.ColorSpace.GRAY) # type: ignore[arg-type] + output = inpt.wrap_like(inpt, output, color_space=features.ColorSpace.GRAY) # type: ignore[arg-type] return output @@ -84,5 +84,5 @@ def _get_params(self, sample: Any) -> Dict[str, Any]: def _transform(self, inpt: features.ImageOrVideoType, params: Dict[str, Any]) -> features.ImageOrVideoType: output = _F.rgb_to_grayscale(inpt, num_output_channels=params["num_input_channels"]) if isinstance(inpt, (features.Image, features.Video)): - output = type(inpt).wrap_like(inpt, output, color_space=features.ColorSpace.GRAY) # type: ignore[arg-type] + output = inpt.wrap_like(inpt, output, color_space=features.ColorSpace.GRAY) # type: ignore[arg-type] return output diff --git a/torchvision/prototype/transforms/_misc.py b/torchvision/prototype/transforms/_misc.py index dd1e1cdf8a1..d3c8a57dc80 100644 --- a/torchvision/prototype/transforms/_misc.py +++ b/torchvision/prototype/transforms/_misc.py @@ -140,6 +140,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: return F.gaussian_blur(inpt, self.kernel_size, **params) +# TODO: Enhance as described at https://github.com/pytorch/vision/issues/6697 class ToDtype(Lambda): def __init__(self, dtype: torch.dtype, *types: Type) -> None: self.dtype = dtype diff --git a/torchvision/prototype/transforms/functional/_augment.py b/torchvision/prototype/transforms/functional/_augment.py index 847343dbf20..57c3602cc14 100644 --- a/torchvision/prototype/transforms/functional/_augment.py +++ b/torchvision/prototype/transforms/functional/_augment.py @@ -35,7 +35,7 @@ def erase( if isinstance(inpt, torch.Tensor): output = erase_image_tensor(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace) if not torch.jit.is_scripting() and isinstance(inpt, (features.Image, features.Video)): - output = type(inpt).wrap_like(inpt, output) # type: ignore[arg-type] + output = inpt.wrap_like(inpt, output) # type: ignore[arg-type] return output else: # isinstance(inpt, PIL.Image.Image): return erase_image_pil(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace) diff --git a/torchvision/prototype/transforms/functional/_geometry.py b/torchvision/prototype/transforms/functional/_geometry.py index b3b13ea1ffe..33b96d5bf49 100644 --- a/torchvision/prototype/transforms/functional/_geometry.py +++ b/torchvision/prototype/transforms/functional/_geometry.py @@ -1424,8 +1424,7 @@ def five_crop( if isinstance(inpt, torch.Tensor): output = five_crop_image_tensor(inpt, size) if not torch.jit.is_scripting() and isinstance(inpt, (features.Image, features.Video)): - cls = type(inpt) - output = tuple(cls.wrap_like(inpt, item) for item in output) # type: ignore[assignment,arg-type] + output = (inpt.wrap_like(inpt, item) for item in output) # type: ignore[arg-type] return output else: # isinstance(inpt, PIL.Image.Image): return five_crop_image_pil(inpt, size) @@ -1468,8 +1467,7 @@ def ten_crop( if isinstance(inpt, torch.Tensor): output = ten_crop_image_tensor(inpt, size, vertical_flip=vertical_flip) if not torch.jit.is_scripting() and isinstance(inpt, (features.Image, features.Video)): - cls = type(inpt) - output = [cls.wrap_like(inpt, item) for item in output] # type: ignore[arg-type] + output = [inpt.wrap_like(inpt, item) for item in output] # type: ignore[arg-type] return output else: # isinstance(inpt, PIL.Image.Image): return ten_crop_image_pil(inpt, size, vertical_flip=vertical_flip) From ac58aecbe096d30557d28c4e475f8c9a65204b50 Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Mon, 10 Oct 2022 11:42:54 +0100 Subject: [PATCH 12/14] Reapply Philip's recommendation --- torchvision/prototype/transforms/_geometry.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/torchvision/prototype/transforms/_geometry.py b/torchvision/prototype/transforms/_geometry.py index 4174d3f7e59..371ea7f69c5 100644 --- a/torchvision/prototype/transforms/_geometry.py +++ b/torchvision/prototype/transforms/_geometry.py @@ -158,7 +158,8 @@ class FiveCrop(Transform): ... def forward(self, sample: Tuple[Tuple[Union[features.Image, features.Video], ...], features.Label]): ... images_or_videos, labels = sample ... batch_size = len(images_or_videos) - ... images_or_videos = features.Image.wrap_like(images_or_videos[0], torch.stack(images_or_videos)) + ... image_or_video = images_or_videos[0] + ... images_or_videos = image_or_video.wrap_like(image_or_video, torch.stack(images_or_videos)) ... labels = features.Label.wrap_like(labels, labels.repeat(batch_size)) ... return images_or_videos, labels ... From da98c37d53a2a51e5939c5833367b64b558cde6c Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Mon, 10 Oct 2022 11:57:15 +0100 Subject: [PATCH 13/14] Fix mypy and JIT --- torchvision/prototype/transforms/functional/_geometry.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/torchvision/prototype/transforms/functional/_geometry.py b/torchvision/prototype/transforms/functional/_geometry.py index 1ddaa8ed4b0..0651681e1b6 100644 --- a/torchvision/prototype/transforms/functional/_geometry.py +++ b/torchvision/prototype/transforms/functional/_geometry.py @@ -1420,7 +1420,8 @@ def five_crop( if isinstance(inpt, torch.Tensor): output = five_crop_image_tensor(inpt, size) if not torch.jit.is_scripting() and isinstance(inpt, (features.Image, features.Video)): - output = (inpt.wrap_like(inpt, item) for item in output) # type: ignore[arg-type] + tmp = (inpt.wrap_like(inpt, item) for item in output) # type: ignore[arg-type] + output = tmp # type: ignore[assignment] return output else: # isinstance(inpt, PIL.Image.Image): return five_crop_image_pil(inpt, size) From 5f33ef174e1c4c4620a3eec18aafda75c07f3ed8 Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Mon, 10 Oct 2022 12:20:54 +0100 Subject: [PATCH 14/14] Fixing test --- torchvision/prototype/transforms/functional/_geometry.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torchvision/prototype/transforms/functional/_geometry.py b/torchvision/prototype/transforms/functional/_geometry.py index 0651681e1b6..47d04489116 100644 --- a/torchvision/prototype/transforms/functional/_geometry.py +++ b/torchvision/prototype/transforms/functional/_geometry.py @@ -1420,7 +1420,7 @@ def five_crop( if isinstance(inpt, torch.Tensor): output = five_crop_image_tensor(inpt, size) if not torch.jit.is_scripting() and isinstance(inpt, (features.Image, features.Video)): - tmp = (inpt.wrap_like(inpt, item) for item in output) # type: ignore[arg-type] + tmp = tuple(inpt.wrap_like(inpt, item) for item in output) # type: ignore[arg-type] output = tmp # type: ignore[assignment] return output else: # isinstance(inpt, PIL.Image.Image):