From ee93c07c58b7f56ff16867c15947d0761cb13f22 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Tue, 2 Aug 2022 14:16:41 -0600 Subject: [PATCH 1/3] Add missing None type hint to init functions --- torchvision/datasets/_optical_flow.py | 12 ++-- torchvision/datasets/_stereo_matching.py | 8 +-- torchvision/datasets/folder.py | 2 +- torchvision/datasets/kitti.py | 2 +- torchvision/datasets/lfw.py | 6 +- torchvision/datasets/oxford_iiit_pet.py | 2 +- torchvision/datasets/pcam.py | 2 +- torchvision/datasets/voc.py | 2 +- torchvision/models/_api.py | 2 +- torchvision/models/detection/anchor_utils.py | 2 +- torchvision/models/detection/faster_rcnn.py | 8 +-- torchvision/models/detection/fcos.py | 4 +- torchvision/models/detection/keypoint_rcnn.py | 6 +- torchvision/models/detection/mask_rcnn.py | 6 +- torchvision/models/detection/retinanet.py | 8 +-- torchvision/models/detection/roi_heads.py | 2 +- torchvision/models/detection/ssd.py | 12 ++-- torchvision/models/detection/ssdlite.py | 8 +-- torchvision/models/detection/transform.py | 2 +- torchvision/models/feature_extraction.py | 6 +- torchvision/models/mobilenetv2.py | 2 +- torchvision/models/mobilenetv3.py | 4 +- torchvision/models/optical_flow/raft.py | 16 ++--- torchvision/models/swin_transformer.py | 8 +-- torchvision/models/vision_transformer.py | 2 +- torchvision/ops/deform_conv.py | 2 +- torchvision/ops/feature_pyramid_network.py | 4 +- torchvision/ops/misc.py | 6 +- torchvision/ops/poolers.py | 4 +- torchvision/ops/ps_roi_align.py | 2 +- torchvision/ops/ps_roi_pool.py | 2 +- torchvision/ops/roi_align.py | 2 +- torchvision/ops/roi_pool.py | 2 +- torchvision/prototype/datasets/benchmark.py | 6 +- .../models/depth/stereo/raft_stereo.py | 14 ++--- .../prototype/transforms/_auto_augment.py | 2 +- torchvision/prototype/transforms/_geometry.py | 2 +- torchvision/prototype/transforms/_misc.py | 4 +- .../prototype/transforms/_type_conversion.py | 2 +- torchvision/transforms/_transforms_video.py | 10 ++-- torchvision/transforms/transforms.py | 58 +++++++++---------- 41 files changed, 128 insertions(+), 128 deletions(-) diff --git a/torchvision/datasets/_optical_flow.py b/torchvision/datasets/_optical_flow.py index bc26f51dc75..5ece286af9c 100644 --- a/torchvision/datasets/_optical_flow.py +++ b/torchvision/datasets/_optical_flow.py @@ -28,7 +28,7 @@ class FlowDataset(ABC, VisionDataset): # and it's up to whatever consumes the dataset to decide what valid_flow_mask should be. _has_builtin_flow_mask = False - def __init__(self, root, transforms=None): + def __init__(self, root, transforms=None) -> None: super().__init__(root=root) self.transforms = transforms @@ -118,7 +118,7 @@ class Sintel(FlowDataset): return a built-in valid mask, such as :class:`~torchvision.datasets.KittiFlow`. """ - def __init__(self, root, split="train", pass_name="clean", transforms=None): + def __init__(self, root, split="train", pass_name="clean", transforms=None) -> None: super().__init__(root=root, transforms=transforms) verify_str_arg(split, "split", valid_values=("train", "test")) @@ -180,7 +180,7 @@ class KittiFlow(FlowDataset): _has_builtin_flow_mask = True - def __init__(self, root, split="train", transforms=None): + def __init__(self, root, split="train", transforms=None) -> None: super().__init__(root=root, transforms=transforms) verify_str_arg(split, "split", valid_values=("train", "test")) @@ -245,7 +245,7 @@ class FlyingChairs(FlowDataset): return a built-in valid mask, such as :class:`~torchvision.datasets.KittiFlow`. """ - def __init__(self, root, split="train", transforms=None): + def __init__(self, root, split="train", transforms=None) -> None: super().__init__(root=root, transforms=transforms) verify_str_arg(split, "split", valid_values=("train", "val")) @@ -316,7 +316,7 @@ class FlyingThings3D(FlowDataset): return a built-in valid mask, such as :class:`~torchvision.datasets.KittiFlow`. """ - def __init__(self, root, split="train", pass_name="clean", camera="left", transforms=None): + def __init__(self, root, split="train", pass_name="clean", camera="left", transforms=None) -> None: super().__init__(root=root, transforms=transforms) verify_str_arg(split, "split", valid_values=("train", "test")) @@ -401,7 +401,7 @@ class HD1K(FlowDataset): _has_builtin_flow_mask = True - def __init__(self, root, split="train", transforms=None): + def __init__(self, root, split="train", transforms=None) -> None: super().__init__(root=root, transforms=transforms) verify_str_arg(split, "split", valid_values=("train", "test")) diff --git a/torchvision/datasets/_stereo_matching.py b/torchvision/datasets/_stereo_matching.py index de213fc0368..237a4be6f10 100644 --- a/torchvision/datasets/_stereo_matching.py +++ b/torchvision/datasets/_stereo_matching.py @@ -20,7 +20,7 @@ class StereoMatchingDataset(ABC, VisionDataset): _has_built_in_disparity_mask = False - def __init__(self, root: str, transforms: Optional[Callable] = None): + def __init__(self, root: str, transforms: Optional[Callable] = None) -> None: """ Args: root(str): Root directory of the dataset. @@ -152,7 +152,7 @@ class CarlaStereo(StereoMatchingDataset): transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version. """ - def __init__(self, root: str, transforms: Optional[Callable] = None): + def __init__(self, root: str, transforms: Optional[Callable] = None) -> None: super().__init__(root, transforms) root = Path(root) / "carla-highres" @@ -229,7 +229,7 @@ class Kitti2012Stereo(StereoMatchingDataset): _has_built_in_disparity_mask = True - def __init__(self, root: str, split: str = "train", transforms: Optional[Callable] = None): + def __init__(self, root: str, split: str = "train", transforms: Optional[Callable] = None) -> None: super().__init__(root, transforms) verify_str_arg(split, "split", valid_values=("train", "test")) @@ -317,7 +317,7 @@ class Kitti2015Stereo(StereoMatchingDataset): _has_built_in_disparity_mask = True - def __init__(self, root: str, split: str = "train", transforms: Optional[Callable] = None): + def __init__(self, root: str, split: str = "train", transforms: Optional[Callable] = None) -> None: super().__init__(root, transforms) verify_str_arg(split, "split", valid_values=("train", "test")) diff --git a/torchvision/datasets/folder.py b/torchvision/datasets/folder.py index 40d5e26d242..2b1748102ed 100644 --- a/torchvision/datasets/folder.py +++ b/torchvision/datasets/folder.py @@ -305,7 +305,7 @@ def __init__( target_transform: Optional[Callable] = None, loader: Callable[[str], Any] = default_loader, is_valid_file: Optional[Callable[[str], bool]] = None, - ): + ) -> None: super().__init__( root, loader, diff --git a/torchvision/datasets/kitti.py b/torchvision/datasets/kitti.py index c166a25c7d8..4f00f39999c 100644 --- a/torchvision/datasets/kitti.py +++ b/torchvision/datasets/kitti.py @@ -57,7 +57,7 @@ def __init__( target_transform: Optional[Callable] = None, transforms: Optional[Callable] = None, download: bool = False, - ): + ) -> None: super().__init__( root, transform=transform, diff --git a/torchvision/datasets/lfw.py b/torchvision/datasets/lfw.py index a25765d5725..b1f1fda812c 100644 --- a/torchvision/datasets/lfw.py +++ b/torchvision/datasets/lfw.py @@ -38,7 +38,7 @@ def __init__( transform: Optional[Callable] = None, target_transform: Optional[Callable] = None, download: bool = False, - ): + ) -> None: super().__init__(os.path.join(root, self.base_folder), transform=transform, target_transform=target_transform) self.image_set = verify_str_arg(image_set.lower(), "image_set", self.file_dict.keys()) @@ -119,7 +119,7 @@ def __init__( transform: Optional[Callable] = None, target_transform: Optional[Callable] = None, download: bool = False, - ): + ) -> None: super().__init__(root, split, image_set, "people", transform, target_transform, download) self.class_to_idx = self._get_classes() @@ -201,7 +201,7 @@ def __init__( transform: Optional[Callable] = None, target_transform: Optional[Callable] = None, download: bool = False, - ): + ) -> None: super().__init__(root, split, image_set, "pairs", transform, target_transform, download) self.pair_names, self.data, self.targets = self._get_pairs(self.images_dir) diff --git a/torchvision/datasets/oxford_iiit_pet.py b/torchvision/datasets/oxford_iiit_pet.py index 667ee13717d..ea71415c587 100644 --- a/torchvision/datasets/oxford_iiit_pet.py +++ b/torchvision/datasets/oxford_iiit_pet.py @@ -45,7 +45,7 @@ def __init__( transform: Optional[Callable] = None, target_transform: Optional[Callable] = None, download: bool = False, - ): + ) -> None: self._split = verify_str_arg(split, "split", ("trainval", "test")) if isinstance(target_types, str): target_types = [target_types] diff --git a/torchvision/datasets/pcam.py b/torchvision/datasets/pcam.py index 63faf721a0f..c118520212f 100644 --- a/torchvision/datasets/pcam.py +++ b/torchvision/datasets/pcam.py @@ -73,7 +73,7 @@ def __init__( transform: Optional[Callable] = None, target_transform: Optional[Callable] = None, download: bool = False, - ): + ) -> None: try: import h5py diff --git a/torchvision/datasets/voc.py b/torchvision/datasets/voc.py index 32888cd5c8c..d4a7f62f3c3 100644 --- a/torchvision/datasets/voc.py +++ b/torchvision/datasets/voc.py @@ -75,7 +75,7 @@ def __init__( transform: Optional[Callable] = None, target_transform: Optional[Callable] = None, transforms: Optional[Callable] = None, - ): + ) -> None: super().__init__(root, transforms, transform, target_transform) if year == "2007-test": if image_set == "test": diff --git a/torchvision/models/_api.py b/torchvision/models/_api.py index 4df988dcc9a..bb89bf990f6 100644 --- a/torchvision/models/_api.py +++ b/torchvision/models/_api.py @@ -48,7 +48,7 @@ class WeightsEnum(StrEnum): value (Weights): The data class entry with the weight information. """ - def __init__(self, value: Weights): + def __init__(self, value: Weights) -> None: self._value_ = value @classmethod diff --git a/torchvision/models/detection/anchor_utils.py b/torchvision/models/detection/anchor_utils.py index 34fb8d23069..7f490f21d25 100644 --- a/torchvision/models/detection/anchor_utils.py +++ b/torchvision/models/detection/anchor_utils.py @@ -159,7 +159,7 @@ def __init__( scales: Optional[List[float]] = None, steps: Optional[List[int]] = None, clip: bool = True, - ): + ) -> None: super().__init__() if steps is not None and len(aspect_ratios) != len(steps): raise ValueError("aspect_ratios and steps should have the same length") diff --git a/torchvision/models/detection/faster_rcnn.py b/torchvision/models/detection/faster_rcnn.py index 3160e8e89b3..ee83647949b 100644 --- a/torchvision/models/detection/faster_rcnn.py +++ b/torchvision/models/detection/faster_rcnn.py @@ -196,7 +196,7 @@ def __init__( box_positive_fraction=0.25, bbox_reg_weights=None, **kwargs, - ): + ) -> None: if not hasattr(backbone, "out_channels"): raise ValueError( @@ -289,7 +289,7 @@ class TwoMLPHead(nn.Module): representation_size (int): size of the intermediate representation """ - def __init__(self, in_channels, representation_size): + def __init__(self, in_channels, representation_size) -> None: super().__init__() self.fc6 = nn.Linear(in_channels, representation_size) @@ -311,7 +311,7 @@ def __init__( conv_layers: List[int], fc_layers: List[int], norm_layer: Optional[Callable[..., nn.Module]] = None, - ): + ) -> None: """ Args: input_size (Tuple[int, int, int]): the input size in CHW format. @@ -351,7 +351,7 @@ class FastRCNNPredictor(nn.Module): num_classes (int): number of output classes (including background) """ - def __init__(self, in_channels, num_classes): + def __init__(self, in_channels, num_classes) -> None: super().__init__() self.cls_score = nn.Linear(in_channels, num_classes) self.bbox_pred = nn.Linear(in_channels, num_classes * 4) diff --git a/torchvision/models/detection/fcos.py b/torchvision/models/detection/fcos.py index 73c9a6e042d..115646337fe 100644 --- a/torchvision/models/detection/fcos.py +++ b/torchvision/models/detection/fcos.py @@ -218,7 +218,7 @@ def __init__( num_anchors: int, num_convs: int = 4, norm_layer: Optional[Callable[..., nn.Module]] = None, - ): + ) -> None: super().__init__() if norm_layer is None: @@ -376,7 +376,7 @@ def __init__( detections_per_img: int = 100, topk_candidates: int = 1000, **kwargs, - ): + ) -> None: super().__init__() _log_api_usage_once(self) diff --git a/torchvision/models/detection/keypoint_rcnn.py b/torchvision/models/detection/keypoint_rcnn.py index 21fb53c2a49..5ab4a3c2671 100644 --- a/torchvision/models/detection/keypoint_rcnn.py +++ b/torchvision/models/detection/keypoint_rcnn.py @@ -199,7 +199,7 @@ def __init__( keypoint_predictor=None, num_keypoints=None, **kwargs, - ): + ) -> None: if not isinstance(keypoint_roi_pool, (MultiScaleRoIAlign, type(None))): raise TypeError( @@ -269,7 +269,7 @@ def __init__( class KeypointRCNNHeads(nn.Sequential): - def __init__(self, in_channels, layers): + def __init__(self, in_channels, layers) -> None: d = [] next_feature = in_channels for out_channels in layers: @@ -284,7 +284,7 @@ def __init__(self, in_channels, layers): class KeypointRCNNPredictor(nn.Module): - def __init__(self, in_channels, num_keypoints): + def __init__(self, in_channels, num_keypoints) -> None: super().__init__() input_features = in_channels deconv_kernel = 4 diff --git a/torchvision/models/detection/mask_rcnn.py b/torchvision/models/detection/mask_rcnn.py index e2d105b5e41..e623b31c466 100644 --- a/torchvision/models/detection/mask_rcnn.py +++ b/torchvision/models/detection/mask_rcnn.py @@ -198,7 +198,7 @@ def __init__( mask_head=None, mask_predictor=None, **kwargs, - ): + ) -> None: if not isinstance(mask_roi_pool, (MultiScaleRoIAlign, type(None))): raise TypeError( @@ -268,7 +268,7 @@ def __init__( class MaskRCNNHeads(nn.Sequential): _version = 2 - def __init__(self, in_channels, layers, dilation, norm_layer: Optional[Callable[..., nn.Module]] = None): + def __init__(self, in_channels, layers, dilation, norm_layer: Optional[Callable[..., nn.Module]] = None) -> None: """ Args: in_channels (int): number of input channels @@ -332,7 +332,7 @@ def _load_from_state_dict( class MaskRCNNPredictor(nn.Sequential): - def __init__(self, in_channels, dim_reduced, num_classes): + def __init__(self, in_channels, dim_reduced, num_classes) -> None: super().__init__( OrderedDict( [ diff --git a/torchvision/models/detection/retinanet.py b/torchvision/models/detection/retinanet.py index 792c2c36ce4..d8276c488b6 100644 --- a/torchvision/models/detection/retinanet.py +++ b/torchvision/models/detection/retinanet.py @@ -65,7 +65,7 @@ class RetinaNetHead(nn.Module): norm_layer (callable, optional): Module specifying the normalization layer to use. Default: None """ - def __init__(self, in_channels, num_anchors, num_classes, norm_layer: Optional[Callable[..., nn.Module]] = None): + def __init__(self, in_channels, num_anchors, num_classes, norm_layer: Optional[Callable[..., nn.Module]] = None) -> None: super().__init__() self.classification_head = RetinaNetClassificationHead( in_channels, num_anchors, num_classes, norm_layer=norm_layer @@ -104,7 +104,7 @@ def __init__( num_classes, prior_probability=0.01, norm_layer: Optional[Callable[..., nn.Module]] = None, - ): + ) -> None: super().__init__() conv = [] @@ -223,7 +223,7 @@ class RetinaNetRegressionHead(nn.Module): "box_coder": det_utils.BoxCoder, } - def __init__(self, in_channels, num_anchors, norm_layer: Optional[Callable[..., nn.Module]] = None): + def __init__(self, in_channels, num_anchors, norm_layer: Optional[Callable[..., nn.Module]] = None) -> None: super().__init__() conv = [] @@ -430,7 +430,7 @@ def __init__( bg_iou_thresh=0.4, topk_candidates=1000, **kwargs, - ): + ) -> None: super().__init__() _log_api_usage_once(self) diff --git a/torchvision/models/detection/roi_heads.py b/torchvision/models/detection/roi_heads.py index 18a6782a06b..7245215ac96 100644 --- a/torchvision/models/detection/roi_heads.py +++ b/torchvision/models/detection/roi_heads.py @@ -518,7 +518,7 @@ def __init__( keypoint_roi_pool=None, keypoint_head=None, keypoint_predictor=None, - ): + ) -> None: super().__init__() self.box_similarity = box_ops.box_iou diff --git a/torchvision/models/detection/ssd.py b/torchvision/models/detection/ssd.py index c30e508f488..5903a8cd8a4 100644 --- a/torchvision/models/detection/ssd.py +++ b/torchvision/models/detection/ssd.py @@ -54,7 +54,7 @@ def _xavier_init(conv: nn.Module): class SSDHead(nn.Module): - def __init__(self, in_channels: List[int], num_anchors: List[int], num_classes: int): + def __init__(self, in_channels: List[int], num_anchors: List[int], num_classes: int) -> None: super().__init__() self.classification_head = SSDClassificationHead(in_channels, num_anchors, num_classes) self.regression_head = SSDRegressionHead(in_channels, num_anchors) @@ -67,7 +67,7 @@ def forward(self, x: List[Tensor]) -> Dict[str, Tensor]: class SSDScoringHead(nn.Module): - def __init__(self, module_list: nn.ModuleList, num_columns: int): + def __init__(self, module_list: nn.ModuleList, num_columns: int) -> None: super().__init__() self.module_list = module_list self.num_columns = num_columns @@ -104,7 +104,7 @@ def forward(self, x: List[Tensor]) -> Tensor: class SSDClassificationHead(SSDScoringHead): - def __init__(self, in_channels: List[int], num_anchors: List[int], num_classes: int): + def __init__(self, in_channels: List[int], num_anchors: List[int], num_classes: int) -> None: cls_logits = nn.ModuleList() for channels, anchors in zip(in_channels, num_anchors): cls_logits.append(nn.Conv2d(channels, num_classes * anchors, kernel_size=3, padding=1)) @@ -113,7 +113,7 @@ def __init__(self, in_channels: List[int], num_anchors: List[int], num_classes: class SSDRegressionHead(SSDScoringHead): - def __init__(self, in_channels: List[int], num_anchors: List[int]): + def __init__(self, in_channels: List[int], num_anchors: List[int]) -> None: bbox_reg = nn.ModuleList() for channels, anchors in zip(in_channels, num_anchors): bbox_reg.append(nn.Conv2d(channels, 4 * anchors, kernel_size=3, padding=1)) @@ -197,7 +197,7 @@ def __init__( topk_candidates: int = 400, positive_fraction: float = 0.25, **kwargs: Any, - ): + ) -> None: super().__init__() _log_api_usage_once(self) @@ -462,7 +462,7 @@ def postprocess_detections( class SSDFeatureExtractorVGG(nn.Module): - def __init__(self, backbone: nn.Module, highres: bool): + def __init__(self, backbone: nn.Module, highres: bool) -> None: super().__init__() _, _, maxpool3_pos, maxpool4_pos, _ = (i for i, layer in enumerate(backbone) if isinstance(layer, nn.MaxPool2d)) diff --git a/torchvision/models/detection/ssdlite.py b/torchvision/models/detection/ssdlite.py index 63ac0d2bc73..fb68e27212b 100644 --- a/torchvision/models/detection/ssdlite.py +++ b/torchvision/models/detection/ssdlite.py @@ -81,7 +81,7 @@ def _normal_init(conv: nn.Module): class SSDLiteHead(nn.Module): def __init__( self, in_channels: List[int], num_anchors: List[int], num_classes: int, norm_layer: Callable[..., nn.Module] - ): + ) -> None: super().__init__() self.classification_head = SSDLiteClassificationHead(in_channels, num_anchors, num_classes, norm_layer) self.regression_head = SSDLiteRegressionHead(in_channels, num_anchors, norm_layer) @@ -96,7 +96,7 @@ def forward(self, x: List[Tensor]) -> Dict[str, Tensor]: class SSDLiteClassificationHead(SSDScoringHead): def __init__( self, in_channels: List[int], num_anchors: List[int], num_classes: int, norm_layer: Callable[..., nn.Module] - ): + ) -> None: cls_logits = nn.ModuleList() for channels, anchors in zip(in_channels, num_anchors): cls_logits.append(_prediction_block(channels, num_classes * anchors, 3, norm_layer)) @@ -105,7 +105,7 @@ def __init__( class SSDLiteRegressionHead(SSDScoringHead): - def __init__(self, in_channels: List[int], num_anchors: List[int], norm_layer: Callable[..., nn.Module]): + def __init__(self, in_channels: List[int], num_anchors: List[int], norm_layer: Callable[..., nn.Module]) -> None: bbox_reg = nn.ModuleList() for channels, anchors in zip(in_channels, num_anchors): bbox_reg.append(_prediction_block(channels, 4 * anchors, 3, norm_layer)) @@ -121,7 +121,7 @@ def __init__( norm_layer: Callable[..., nn.Module], width_mult: float = 1.0, min_depth: int = 16, - ): + ) -> None: super().__init__() _log_api_usage_once(self) diff --git a/torchvision/models/detection/transform.py b/torchvision/models/detection/transform.py index dd2d728abf9..646f6fd8fac 100644 --- a/torchvision/models/detection/transform.py +++ b/torchvision/models/detection/transform.py @@ -92,7 +92,7 @@ def __init__( size_divisible: int = 32, fixed_size: Optional[Tuple[int, int]] = None, **kwargs: Any, - ): + ) -> None: super().__init__() if not isinstance(min_size, (list, tuple)): min_size = (min_size,) diff --git a/torchvision/models/feature_extraction.py b/torchvision/models/feature_extraction.py index d247d9a3e26..bf86220415a 100644 --- a/torchvision/models/feature_extraction.py +++ b/torchvision/models/feature_extraction.py @@ -23,7 +23,7 @@ class LeafModuleAwareTracer(fx.Tracer): having single nodes referencing calls to the leaf modules' forward methods. """ - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: self.leaf_modules = {} if "leaf_modules" in kwargs: leaf_modules = kwargs.pop("leaf_modules") @@ -55,7 +55,7 @@ class NodePathTracer(LeafModuleAwareTracer): _{int} is added. The counter starts from 1. """ - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) # Track the qualified name of the Node being traced self.current_module_qualname = "" @@ -271,7 +271,7 @@ class DualGraphModule(fx.GraphModule): def __init__( self, root: torch.nn.Module, train_graph: fx.Graph, eval_graph: fx.Graph, class_name: str = "GraphModule" - ): + ) -> None: """ Args: root (nn.Module): module from which the copied module hierarchy is diff --git a/torchvision/models/mobilenetv2.py b/torchvision/models/mobilenetv2.py index 6d8796b7a16..8d85e8f0d25 100644 --- a/torchvision/models/mobilenetv2.py +++ b/torchvision/models/mobilenetv2.py @@ -18,7 +18,7 @@ # necessary for backwards compatibility class _DeprecatedConvBNAct(Conv2dNormActivation): - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: warnings.warn( "The ConvBNReLU/ConvBNActivation classes are deprecated since 0.12 and will be removed in 0.14. " "Use torchvision.ops.misc.Conv2dNormActivation instead.", diff --git a/torchvision/models/mobilenetv3.py b/torchvision/models/mobilenetv3.py index 81fc3c5d4c0..ecb603879c1 100644 --- a/torchvision/models/mobilenetv3.py +++ b/torchvision/models/mobilenetv3.py @@ -25,7 +25,7 @@ class SqueezeExcitation(SElayer): """DEPRECATED""" - def __init__(self, input_channels: int, squeeze_factor: int = 4): + def __init__(self, input_channels: int, squeeze_factor: int = 4) -> None: squeeze_channels = _make_divisible(input_channels // squeeze_factor, 8) super().__init__(input_channels, squeeze_channels, scale_activation=nn.Hardsigmoid) self.relu = self.activation @@ -50,7 +50,7 @@ def __init__( stride: int, dilation: int, width_mult: float, - ): + ) -> None: self.input_channels = self.adjust_channels(input_channels, width_mult) self.kernel = kernel self.expanded_channels = self.adjust_channels(expanded_channels, width_mult) diff --git a/torchvision/models/optical_flow/raft.py b/torchvision/models/optical_flow/raft.py index 04076c96032..b38dcce3f90 100644 --- a/torchvision/models/optical_flow/raft.py +++ b/torchvision/models/optical_flow/raft.py @@ -27,7 +27,7 @@ class ResidualBlock(nn.Module): """Slightly modified Residual block with extra relu and biases.""" - def __init__(self, in_channels, out_channels, *, norm_layer, stride=1): + def __init__(self, in_channels, out_channels, *, norm_layer, stride=1) -> None: super().__init__() # Note regarding bias=True: @@ -71,7 +71,7 @@ def forward(self, x): class BottleneckBlock(nn.Module): """Slightly modified BottleNeck block (extra relu and biases)""" - def __init__(self, in_channels, out_channels, *, norm_layer, stride=1): + def __init__(self, in_channels, out_channels, *, norm_layer, stride=1) -> None: super().__init__() # See note in ResidualBlock for the reason behind bias=True @@ -207,7 +207,7 @@ def forward(self, flow, corr_features): class ConvGRU(nn.Module): """Convolutional Gru unit.""" - def __init__(self, *, input_size, hidden_size, kernel_size, padding): + def __init__(self, *, input_size, hidden_size, kernel_size, padding) -> None: super().__init__() self.convz = nn.Conv2d(hidden_size + input_size, hidden_size, kernel_size=kernel_size, padding=padding) self.convr = nn.Conv2d(hidden_size + input_size, hidden_size, kernel_size=kernel_size, padding=padding) @@ -268,7 +268,7 @@ class FlowHead(nn.Module): Takes the hidden state of the recurrent unit as input, and outputs the predicted "delta flow". """ - def __init__(self, *, in_channels, hidden_size): + def __init__(self, *, in_channels, hidden_size) -> None: super().__init__() self.conv1 = nn.Conv2d(in_channels, hidden_size, 3, padding=1) self.conv2 = nn.Conv2d(hidden_size, 2, 3, padding=1) @@ -284,7 +284,7 @@ class UpdateBlock(nn.Module): It must expose a ``hidden_state_size`` attribute which is the hidden state size of its recurrent block. """ - def __init__(self, *, motion_encoder, recurrent_block, flow_head): + def __init__(self, *, motion_encoder, recurrent_block, flow_head) -> None: super().__init__() self.motion_encoder = motion_encoder self.recurrent_block = recurrent_block @@ -308,7 +308,7 @@ class MaskPredictor(nn.Module): This is not used in the raft-small model. """ - def __init__(self, *, in_channels, hidden_size, multiplier=0.25): + def __init__(self, *, in_channels, hidden_size, multiplier=0.25) -> None: super().__init__() self.convrelu = Conv2dNormActivation(in_channels, hidden_size, norm_layer=None, kernel_size=3) # 8 * 8 * 9 because the predicted flow is downsampled by 8, from the downsampling of the initial FeatureEncoder @@ -337,7 +337,7 @@ class CorrBlock(nn.Module): Note: typo in the paper, it should be infinity norm, not 1-norm. """ - def __init__(self, *, num_levels: int = 4, radius: int = 4): + def __init__(self, *, num_levels: int = 4, radius: int = 4) -> None: super().__init__() self.num_levels = num_levels self.radius = radius @@ -412,7 +412,7 @@ def _compute_corr_volume(self, fmap1, fmap2): class RAFT(nn.Module): - def __init__(self, *, feature_encoder, context_encoder, corr_block, update_block, mask_predictor=None): + def __init__(self, *, feature_encoder, context_encoder, corr_block, update_block, mask_predictor=None) -> None: """RAFT model from `RAFT: Recurrent All Pairs Field Transforms for Optical Flow `_. diff --git a/torchvision/models/swin_transformer.py b/torchvision/models/swin_transformer.py index c5bc43a14fd..67a133a8cd9 100644 --- a/torchvision/models/swin_transformer.py +++ b/torchvision/models/swin_transformer.py @@ -41,7 +41,7 @@ class PatchMerging(nn.Module): norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. """ - def __init__(self, dim: int, norm_layer: Callable[..., nn.Module] = nn.LayerNorm): + def __init__(self, dim: int, norm_layer: Callable[..., nn.Module] = nn.LayerNorm) -> None: super().__init__() _log_api_usage_once(self) self.dim = dim @@ -187,7 +187,7 @@ def __init__( proj_bias: bool = True, attention_dropout: float = 0.0, dropout: float = 0.0, - ): + ) -> None: super().__init__() if len(window_size) != 2 or len(shift_size) != 2: raise ValueError("window_size and shift_size must be of length 2") @@ -276,7 +276,7 @@ def __init__( stochastic_depth_prob: float = 0.0, norm_layer: Callable[..., nn.Module] = nn.LayerNorm, attn_layer: Callable[..., nn.Module] = ShiftedWindowAttention, - ): + ) -> None: super().__init__() _log_api_usage_once(self) @@ -338,7 +338,7 @@ def __init__( num_classes: int = 1000, norm_layer: Optional[Callable[..., nn.Module]] = None, block: Optional[Callable[..., nn.Module]] = None, - ): + ) -> None: super().__init__() _log_api_usage_once(self) self.num_classes = num_classes diff --git a/torchvision/models/vision_transformer.py b/torchvision/models/vision_transformer.py index a0a42ab07b7..1347eddc3ad 100644 --- a/torchvision/models/vision_transformer.py +++ b/torchvision/models/vision_transformer.py @@ -42,7 +42,7 @@ class MLPBlock(MLP): _version = 2 - def __init__(self, in_dim: int, mlp_dim: int, dropout: float): + def __init__(self, in_dim: int, mlp_dim: int, dropout: float) -> None: super().__init__(in_dim, [mlp_dim, in_dim], activation_layer=nn.GELU, inplace=None, dropout=dropout) for m in self.modules(): diff --git a/torchvision/ops/deform_conv.py b/torchvision/ops/deform_conv.py index bb4400e5c29..a767c656055 100644 --- a/torchvision/ops/deform_conv.py +++ b/torchvision/ops/deform_conv.py @@ -122,7 +122,7 @@ def __init__( dilation: int = 1, groups: int = 1, bias: bool = True, - ): + ) -> None: super().__init__() _log_api_usage_once(self) diff --git a/torchvision/ops/feature_pyramid_network.py b/torchvision/ops/feature_pyramid_network.py index ffec3505ec0..d5018ec36c8 100644 --- a/torchvision/ops/feature_pyramid_network.py +++ b/torchvision/ops/feature_pyramid_network.py @@ -80,7 +80,7 @@ def __init__( out_channels: int, extra_blocks: Optional[ExtraFPNBlock] = None, norm_layer: Optional[Callable[..., nn.Module]] = None, - ): + ) -> None: super().__init__() _log_api_usage_once(self) self.inner_blocks = nn.ModuleList() @@ -225,7 +225,7 @@ class LastLevelP6P7(ExtraFPNBlock): This module is used in RetinaNet to generate extra layers, P6 and P7. """ - def __init__(self, in_channels: int, out_channels: int): + def __init__(self, in_channels: int, out_channels: int) -> None: super().__init__() self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1) self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1) diff --git a/torchvision/ops/misc.py b/torchvision/ops/misc.py index d4bda7decc5..8f17983a6e6 100644 --- a/torchvision/ops/misc.py +++ b/torchvision/ops/misc.py @@ -23,7 +23,7 @@ def __init__( self, num_features: int, eps: float = 1e-5, - ): + ) -> None: super().__init__() _log_api_usage_once(self) self.eps = eps @@ -282,7 +282,7 @@ def __init__( inplace: Optional[bool] = True, bias: bool = True, dropout: float = 0.0, - ): + ) -> None: # The addition of `norm_layer` is inspired from the implementation of TorchMultimodal: # https://github.com/facebookresearch/multimodal/blob/5dec8a/torchmultimodal/modules/layers/mlp.py params = {} if inplace is None else {"inplace": inplace} @@ -311,7 +311,7 @@ class Permute(torch.nn.Module): dims (List[int]): The desired ordering of dimensions """ - def __init__(self, dims: List[int]): + def __init__(self, dims: List[int]) -> None: super().__init__() self.dims = dims diff --git a/torchvision/ops/poolers.py b/torchvision/ops/poolers.py index cfcb9e94056..659a5801708 100644 --- a/torchvision/ops/poolers.py +++ b/torchvision/ops/poolers.py @@ -64,7 +64,7 @@ def __init__( canonical_scale: int = 224, canonical_level: int = 4, eps: float = 1e-6, - ): + ) -> None: self.k_min = k_min self.k_max = k_max self.s0 = canonical_scale @@ -274,7 +274,7 @@ def __init__( *, canonical_scale: int = 224, canonical_level: int = 4, - ): + ) -> None: super().__init__() _log_api_usage_once(self) if isinstance(output_size, int): diff --git a/torchvision/ops/ps_roi_align.py b/torchvision/ops/ps_roi_align.py index 0228a2a5554..df680eb68c4 100644 --- a/torchvision/ops/ps_roi_align.py +++ b/torchvision/ops/ps_roi_align.py @@ -67,7 +67,7 @@ def __init__( output_size: int, spatial_scale: float, sampling_ratio: int, - ): + ) -> None: super().__init__() _log_api_usage_once(self) self.output_size = output_size diff --git a/torchvision/ops/ps_roi_pool.py b/torchvision/ops/ps_roi_pool.py index 1a3eed35915..f0c293aacd5 100644 --- a/torchvision/ops/ps_roi_pool.py +++ b/torchvision/ops/ps_roi_pool.py @@ -54,7 +54,7 @@ class PSRoIPool(nn.Module): See :func:`ps_roi_pool`. """ - def __init__(self, output_size: int, spatial_scale: float): + def __init__(self, output_size: int, spatial_scale: float) -> None: super().__init__() _log_api_usage_once(self) self.output_size = output_size diff --git a/torchvision/ops/roi_align.py b/torchvision/ops/roi_align.py index afe9e42af16..b72d0f0adf0 100644 --- a/torchvision/ops/roi_align.py +++ b/torchvision/ops/roi_align.py @@ -74,7 +74,7 @@ def __init__( spatial_scale: float, sampling_ratio: int, aligned: bool = False, - ): + ) -> None: super().__init__() _log_api_usage_once(self) self.output_size = output_size diff --git a/torchvision/ops/roi_pool.py b/torchvision/ops/roi_pool.py index 50dc2f64421..149bb4da025 100644 --- a/torchvision/ops/roi_pool.py +++ b/torchvision/ops/roi_pool.py @@ -56,7 +56,7 @@ class RoIPool(nn.Module): See :func:`roi_pool`. """ - def __init__(self, output_size: BroadcastingList2[int], spatial_scale: float): + def __init__(self, output_size: BroadcastingList2[int], spatial_scale: float) -> None: super().__init__() _log_api_usage_once(self) self.output_size = output_size diff --git a/torchvision/prototype/datasets/benchmark.py b/torchvision/prototype/datasets/benchmark.py index 104ef95c9ae..842a89a50e1 100644 --- a/torchvision/prototype/datasets/benchmark.py +++ b/torchvision/prototype/datasets/benchmark.py @@ -101,7 +101,7 @@ def __init__( legacy_config_map=None, legacy_special_options_map=None, prepare_legacy_root=None, - ): + ) -> None: self.name = name self.variant = variant @@ -315,7 +315,7 @@ def iterations_per_time(cls, fn): return cls._format(iterations_per_second, unit="it/s") class Timer: - def __init__(self): + def __init__(self) -> None: self._start = None self._stop = None @@ -400,7 +400,7 @@ def prepare_legacy_root(benchmark, root): class JointTransform: - def __init__(self, *transforms): + def __init__(self, *transforms) -> None: self.transforms = transforms def __call__(self, *inputs): diff --git a/torchvision/prototype/models/depth/stereo/raft_stereo.py b/torchvision/prototype/models/depth/stereo/raft_stereo.py index 522ad24c3a2..71c6632918f 100644 --- a/torchvision/prototype/models/depth/stereo/raft_stereo.py +++ b/torchvision/prototype/models/depth/stereo/raft_stereo.py @@ -60,7 +60,7 @@ def __init__( output_dim: int = 256, shared_base: bool = False, block: Callable[..., nn.Module] = ResidualBlock, - ): + ) -> None: super().__init__() self.base_encoder = base_encoder self.base_downsampling_ratio = base_encoder.downsampling_ratio @@ -106,7 +106,7 @@ def __init__( out_with_blocks: List[bool], output_dim: int = 256, block: Callable[..., nn.Module] = ResidualBlock, - ): + ) -> None: super().__init__() self.num_level = len(out_with_blocks) self.base_encoder = base_encoder @@ -171,7 +171,7 @@ class MultiLevelUpdateBlock(nn.Module): It must expose a ``hidden_dims`` attribute which is the hidden dimension size of its gru blocks """ - def __init__(self, *, motion_encoder: MotionEncoder, hidden_dims: List[int]): + def __init__(self, *, motion_encoder: MotionEncoder, hidden_dims: List[int]) -> None: super().__init__() self.motion_encoder = motion_encoder @@ -244,7 +244,7 @@ class MaskPredictor(raft.MaskPredictor): """Mask predictor to be used when upsampling the predicted depth.""" # We add out_channels compared to raft.MaskPredictor - def __init__(self, *, in_channels: int, hidden_size: int, out_channels: int, multiplier: float = 0.25): + def __init__(self, *, in_channels: int, hidden_size: int, out_channels: int, multiplier: float = 0.25) -> None: super(raft.MaskPredictor, self).__init__() self.convrelu = Conv2dNormActivation(in_channels, hidden_size, norm_layer=None, kernel_size=3) self.conv = nn.Conv2d(hidden_size, out_channels, kernel_size=1, padding=0) @@ -258,7 +258,7 @@ class CorrPyramid1d(nn.Module): this correlation pyramid will later be used as index to create correlation features using CorrBlock1d. """ - def __init__(self, num_levels: int = 4): + def __init__(self, num_levels: int = 4) -> None: super().__init__() self.num_levels = num_levels @@ -300,7 +300,7 @@ class CorrBlock1d(nn.Module): within radius """ - def __init__(self, *, num_levels: int = 4, radius: int = 4): + def __init__(self, *, num_levels: int = 4, radius: int = 4) -> None: super().__init__() self.radius = radius self.out_channels = num_levels * (2 * radius + 1) @@ -348,7 +348,7 @@ def __init__( depth_head: nn.Module, mask_predictor: Optional[nn.Module] = None, slow_fast: bool = False, - ): + ) -> None: """RAFT-Stereo model from `RAFT-Stereo: Multilevel Recurrent Field Transforms for Stereo Matching `_. diff --git a/torchvision/prototype/transforms/_auto_augment.py b/torchvision/prototype/transforms/_auto_augment.py index 4bf0236a1d1..9b4afb58e93 100644 --- a/torchvision/prototype/transforms/_auto_augment.py +++ b/torchvision/prototype/transforms/_auto_augment.py @@ -412,7 +412,7 @@ def __init__( num_magnitude_bins: int = 31, interpolation: InterpolationMode = InterpolationMode.NEAREST, fill: Union[int, float, Sequence[int], Sequence[float]] = 0, - ): + ) -> None: super().__init__(interpolation=interpolation, fill=fill) self.num_magnitude_bins = num_magnitude_bins diff --git a/torchvision/prototype/transforms/_geometry.py b/torchvision/prototype/transforms/_geometry.py index 2c4da82cfd8..20565f41be7 100644 --- a/torchvision/prototype/transforms/_geometry.py +++ b/torchvision/prototype/transforms/_geometry.py @@ -52,7 +52,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class CenterCrop(Transform): - def __init__(self, output_size: List[int]): + def __init__(self, output_size: List[int]) -> None: super().__init__() self.output_size = output_size diff --git a/torchvision/prototype/transforms/_misc.py b/torchvision/prototype/transforms/_misc.py index 8d0a01eca09..dac191df991 100644 --- a/torchvision/prototype/transforms/_misc.py +++ b/torchvision/prototype/transforms/_misc.py @@ -12,7 +12,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class Lambda(Transform): - def __init__(self, fn: Callable[[Any], Any], *types: Type): + def __init__(self, fn: Callable[[Any], Any], *types: Type) -> None: super().__init__() self.fn = fn self.types = types @@ -33,7 +33,7 @@ def extra_repr(self) -> str: class Normalize(Transform): - def __init__(self, mean: List[float], std: List[float]): + def __init__(self, mean: List[float], std: List[float]) -> None: super().__init__() self.mean = mean self.std = std diff --git a/torchvision/prototype/transforms/_type_conversion.py b/torchvision/prototype/transforms/_type_conversion.py index 67609bc8fd0..508ec50319e 100644 --- a/torchvision/prototype/transforms/_type_conversion.py +++ b/torchvision/prototype/transforms/_type_conversion.py @@ -18,7 +18,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class LabelToOneHot(Transform): - def __init__(self, num_categories: int = -1): + def __init__(self, num_categories: int = -1) -> None: super().__init__() self.num_categories = num_categories diff --git a/torchvision/transforms/_transforms_video.py b/torchvision/transforms/_transforms_video.py index 1ed6de7612d..8d6f04624ff 100644 --- a/torchvision/transforms/_transforms_video.py +++ b/torchvision/transforms/_transforms_video.py @@ -26,7 +26,7 @@ class RandomCropVideo(RandomCrop): - def __init__(self, size): + def __init__(self, size) -> None: if isinstance(size, numbers.Number): self.size = (int(size), int(size)) else: @@ -82,7 +82,7 @@ def __repr__(self) -> str: class CenterCropVideo: - def __init__(self, crop_size): + def __init__(self, crop_size) -> None: if isinstance(crop_size, numbers.Number): self.crop_size = (int(crop_size), int(crop_size)) else: @@ -111,7 +111,7 @@ class NormalizeVideo: inplace (boolean): whether do in-place normalization """ - def __init__(self, mean, std, inplace=False): + def __init__(self, mean, std, inplace=False) -> None: self.mean = mean self.std = std self.inplace = inplace @@ -133,7 +133,7 @@ class ToTensorVideo: permute the dimensions of clip tensor """ - def __init__(self): + def __init__(self) -> None: pass def __call__(self, clip): @@ -156,7 +156,7 @@ class RandomHorizontalFlipVideo: p (float): probability of the clip being flipped. Default value is 0.5 """ - def __init__(self, p=0.5): + def __init__(self, p=0.5) -> None: self.p = p def __call__(self, clip): diff --git a/torchvision/transforms/transforms.py b/torchvision/transforms/transforms.py index 095460675cc..4b3ee60636f 100644 --- a/torchvision/transforms/transforms.py +++ b/torchvision/transforms/transforms.py @@ -85,7 +85,7 @@ class Compose: """ - def __init__(self, transforms): + def __init__(self, transforms) -> None: if not torch.jit.is_scripting() and not torch.jit.is_tracing(): _log_api_usage_once(self) self.transforms = transforms @@ -211,7 +211,7 @@ class ToPILImage: .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes """ - def __init__(self, mode=None): + def __init__(self, mode=None) -> None: _log_api_usage_once(self) self.mode = mode @@ -252,7 +252,7 @@ class Normalize(torch.nn.Module): """ - def __init__(self, mean, std, inplace=False): + def __init__(self, mean, std, inplace=False) -> None: super().__init__() _log_api_usage_once(self) self.mean = mean @@ -314,7 +314,7 @@ class Resize(torch.nn.Module): This can help making the output for PIL images and tensors closer. """ - def __init__(self, size, interpolation=InterpolationMode.BILINEAR, max_size=None, antialias=None): + def __init__(self, size, interpolation=InterpolationMode.BILINEAR, max_size=None, antialias=None) -> None: super().__init__() _log_api_usage_once(self) if not isinstance(size, (int, Sequence)): @@ -362,7 +362,7 @@ class CenterCrop(torch.nn.Module): made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]). """ - def __init__(self, size): + def __init__(self, size) -> None: super().__init__() _log_api_usage_once(self) self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.") @@ -419,7 +419,7 @@ class Pad(torch.nn.Module): will result in [2, 1, 1, 2, 3, 4, 4, 3] """ - def __init__(self, padding, fill=0, padding_mode="constant"): + def __init__(self, padding, fill=0, padding_mode="constant") -> None: super().__init__() _log_api_usage_once(self) if not isinstance(padding, (numbers.Number, tuple, list)): @@ -461,7 +461,7 @@ class Lambda: lambd (function): Lambda/function to be used for transform. """ - def __init__(self, lambd): + def __init__(self, lambd) -> None: _log_api_usage_once(self) if not callable(lambd): raise TypeError(f"Argument lambd should be callable, got {repr(type(lambd).__name__)}") @@ -481,7 +481,7 @@ class RandomTransforms: transforms (sequence): list of transformations """ - def __init__(self, transforms): + def __init__(self, transforms) -> None: _log_api_usage_once(self) if not isinstance(transforms, Sequence): raise TypeError("Argument transforms should be a sequence") @@ -519,7 +519,7 @@ class RandomApply(torch.nn.Module): p (float): probability """ - def __init__(self, transforms, p=0.5): + def __init__(self, transforms, p=0.5) -> None: super().__init__() _log_api_usage_once(self) self.transforms = transforms @@ -556,7 +556,7 @@ def __call__(self, img): class RandomChoice(RandomTransforms): """Apply single transformation randomly picked from a list. This transform does not support torchscript.""" - def __init__(self, transforms, p=None): + def __init__(self, transforms, p=None) -> None: super().__init__(transforms) if p is not None and not isinstance(p, Sequence): raise TypeError("Argument p should be a sequence") @@ -638,7 +638,7 @@ def get_params(img: Tensor, output_size: Tuple[int, int]) -> Tuple[int, int, int j = torch.randint(0, w - tw + 1, size=(1,)).item() return i, j, th, tw - def __init__(self, size, padding=None, pad_if_needed=False, fill=0, padding_mode="constant"): + def __init__(self, size, padding=None, pad_if_needed=False, fill=0, padding_mode="constant") -> None: super().__init__() _log_api_usage_once(self) @@ -688,7 +688,7 @@ class RandomHorizontalFlip(torch.nn.Module): p (float): probability of the image being flipped. Default value is 0.5 """ - def __init__(self, p=0.5): + def __init__(self, p=0.5) -> None: super().__init__() _log_api_usage_once(self) self.p = p @@ -719,7 +719,7 @@ class RandomVerticalFlip(torch.nn.Module): p (float): probability of the image being flipped. Default value is 0.5 """ - def __init__(self, p=0.5): + def __init__(self, p=0.5) -> None: super().__init__() _log_api_usage_once(self) self.p = p @@ -758,7 +758,7 @@ class RandomPerspective(torch.nn.Module): image. Default is ``0``. If given a number, the value is used for all bands respectively. """ - def __init__(self, distortion_scale=0.5, p=0.5, interpolation=InterpolationMode.BILINEAR, fill=0): + def __init__(self, distortion_scale=0.5, p=0.5, interpolation=InterpolationMode.BILINEAR, fill=0) -> None: super().__init__() _log_api_usage_once(self) self.p = p @@ -1000,7 +1000,7 @@ class FiveCrop(torch.nn.Module): >>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops """ - def __init__(self, size): + def __init__(self, size) -> None: super().__init__() _log_api_usage_once(self) self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.") @@ -1049,7 +1049,7 @@ class TenCrop(torch.nn.Module): >>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops """ - def __init__(self, size, vertical_flip=False): + def __init__(self, size, vertical_flip=False) -> None: super().__init__() _log_api_usage_once(self) self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.") @@ -1088,7 +1088,7 @@ class LinearTransformation(torch.nn.Module): mean_vector (Tensor): tensor [D], D = C x H x W """ - def __init__(self, transformation_matrix, mean_vector): + def __init__(self, transformation_matrix, mean_vector) -> None: super().__init__() _log_api_usage_once(self) if transformation_matrix.size(0) != transformation_matrix.size(1): @@ -1172,7 +1172,7 @@ class ColorJitter(torch.nn.Module): or use an interpolation that generates negative values before using this function. """ - def __init__(self, brightness=0, contrast=0, saturation=0, hue=0): + def __init__(self, brightness=0, contrast=0, saturation=0, hue=0) -> None: super().__init__() _log_api_usage_once(self) self.brightness = self._check_input(brightness, "brightness") @@ -1300,7 +1300,7 @@ class RandomRotation(torch.nn.Module): def __init__( self, degrees, interpolation=InterpolationMode.NEAREST, expand=False, center=None, fill=0, resample=None - ): + ) -> None: super().__init__() _log_api_usage_once(self) if resample is not None: @@ -1430,7 +1430,7 @@ def __init__( fillcolor=None, resample=None, center=None, - ): + ) -> None: super().__init__() _log_api_usage_once(self) if resample is not None: @@ -1578,7 +1578,7 @@ class Grayscale(torch.nn.Module): """ - def __init__(self, num_output_channels=1): + def __init__(self, num_output_channels=1) -> None: super().__init__() _log_api_usage_once(self) self.num_output_channels = num_output_channels @@ -1613,7 +1613,7 @@ class RandomGrayscale(torch.nn.Module): """ - def __init__(self, p=0.1): + def __init__(self, p=0.1) -> None: super().__init__() _log_api_usage_once(self) self.p = p @@ -1879,7 +1879,7 @@ class RandomInvert(torch.nn.Module): p (float): probability of the image being color inverted. Default value is 0.5 """ - def __init__(self, p=0.5): + def __init__(self, p=0.5) -> None: super().__init__() _log_api_usage_once(self) self.p = p @@ -1911,7 +1911,7 @@ class RandomPosterize(torch.nn.Module): p (float): probability of the image being posterized. Default value is 0.5 """ - def __init__(self, bits, p=0.5): + def __init__(self, bits, p=0.5) -> None: super().__init__() _log_api_usage_once(self) self.bits = bits @@ -1944,7 +1944,7 @@ class RandomSolarize(torch.nn.Module): p (float): probability of the image being solarized. Default value is 0.5 """ - def __init__(self, threshold, p=0.5): + def __init__(self, threshold, p=0.5) -> None: super().__init__() _log_api_usage_once(self) self.threshold = threshold @@ -1977,7 +1977,7 @@ class RandomAdjustSharpness(torch.nn.Module): p (float): probability of the image being sharpened. Default value is 0.5 """ - def __init__(self, sharpness_factor, p=0.5): + def __init__(self, sharpness_factor, p=0.5) -> None: super().__init__() _log_api_usage_once(self) self.sharpness_factor = sharpness_factor @@ -2009,7 +2009,7 @@ class RandomAutocontrast(torch.nn.Module): p (float): probability of the image being autocontrasted. Default value is 0.5 """ - def __init__(self, p=0.5): + def __init__(self, p=0.5) -> None: super().__init__() _log_api_usage_once(self) self.p = p @@ -2040,7 +2040,7 @@ class RandomEqualize(torch.nn.Module): p (float): probability of the image being equalized. Default value is 0.5 """ - def __init__(self, p=0.5): + def __init__(self, p=0.5) -> None: super().__init__() _log_api_usage_once(self) self.p = p @@ -2085,7 +2085,7 @@ class ElasticTransform(torch.nn.Module): """ - def __init__(self, alpha=50.0, sigma=5.0, interpolation=InterpolationMode.BILINEAR, fill=0): + def __init__(self, alpha=50.0, sigma=5.0, interpolation=InterpolationMode.BILINEAR, fill=0) -> None: super().__init__() _log_api_usage_once(self) if not isinstance(alpha, (float, Sequence)): From f4ea9962870fb6cf024d173c10fe35408ccc001f Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Tue, 2 Aug 2022 14:26:15 -0600 Subject: [PATCH 2/3] Fix ufmt formatting --- torchvision/models/detection/retinanet.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/torchvision/models/detection/retinanet.py b/torchvision/models/detection/retinanet.py index d8276c488b6..9ad14097372 100644 --- a/torchvision/models/detection/retinanet.py +++ b/torchvision/models/detection/retinanet.py @@ -65,7 +65,13 @@ class RetinaNetHead(nn.Module): norm_layer (callable, optional): Module specifying the normalization layer to use. Default: None """ - def __init__(self, in_channels, num_anchors, num_classes, norm_layer: Optional[Callable[..., nn.Module]] = None) -> None: + def __init__( + self, + in_channels, + num_anchors, + num_classes, + norm_layer: Optional[Callable[..., nn.Module]] = None, + ) -> None: super().__init__() self.classification_head = RetinaNetClassificationHead( in_channels, num_anchors, num_classes, norm_layer=norm_layer From f5af5abced9e747f6b728fe93a2ab45192c250cc Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Tue, 2 Aug 2022 14:42:32 -0600 Subject: [PATCH 3/3] Fix lint errors --- torchvision/datasets/_optical_flow.py | 4 ++-- torchvision/models/feature_extraction.py | 2 +- torchvision/models/optical_flow/raft.py | 4 ++-- torchvision/transforms/_transforms_video.py | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/torchvision/datasets/_optical_flow.py b/torchvision/datasets/_optical_flow.py index 5ece286af9c..19204339673 100644 --- a/torchvision/datasets/_optical_flow.py +++ b/torchvision/datasets/_optical_flow.py @@ -28,7 +28,7 @@ class FlowDataset(ABC, VisionDataset): # and it's up to whatever consumes the dataset to decide what valid_flow_mask should be. _has_builtin_flow_mask = False - def __init__(self, root, transforms=None) -> None: + def __init__(self, root, transforms=None): super().__init__(root=root) self.transforms = transforms @@ -70,7 +70,7 @@ def __getitem__(self, index): else: return img1, img2, flow - def __len__(self): + def __len__(self) -> int: return len(self._image_list) def __rmul__(self, v): diff --git a/torchvision/models/feature_extraction.py b/torchvision/models/feature_extraction.py index bf86220415a..ff9e96ac20a 100644 --- a/torchvision/models/feature_extraction.py +++ b/torchvision/models/feature_extraction.py @@ -55,7 +55,7 @@ class NodePathTracer(LeafModuleAwareTracer): _{int} is added. The counter starts from 1. """ - def __init__(self, *args, **kwargs) -> None: + def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Track the qualified name of the Node being traced self.current_module_qualname = "" diff --git a/torchvision/models/optical_flow/raft.py b/torchvision/models/optical_flow/raft.py index b38dcce3f90..3cc6890193f 100644 --- a/torchvision/models/optical_flow/raft.py +++ b/torchvision/models/optical_flow/raft.py @@ -27,7 +27,7 @@ class ResidualBlock(nn.Module): """Slightly modified Residual block with extra relu and biases.""" - def __init__(self, in_channels, out_channels, *, norm_layer, stride=1) -> None: + def __init__(self, in_channels, out_channels, *, norm_layer, stride=1): super().__init__() # Note regarding bias=True: @@ -71,7 +71,7 @@ def forward(self, x): class BottleneckBlock(nn.Module): """Slightly modified BottleNeck block (extra relu and biases)""" - def __init__(self, in_channels, out_channels, *, norm_layer, stride=1) -> None: + def __init__(self, in_channels, out_channels, *, norm_layer, stride=1): super().__init__() # See note in ResidualBlock for the reason behind bias=True diff --git a/torchvision/transforms/_transforms_video.py b/torchvision/transforms/_transforms_video.py index 8d6f04624ff..cad68bc114a 100644 --- a/torchvision/transforms/_transforms_video.py +++ b/torchvision/transforms/_transforms_video.py @@ -26,7 +26,7 @@ class RandomCropVideo(RandomCrop): - def __init__(self, size) -> None: + def __init__(self, size): if isinstance(size, numbers.Number): self.size = (int(size), int(size)) else: @@ -82,7 +82,7 @@ def __repr__(self) -> str: class CenterCropVideo: - def __init__(self, crop_size) -> None: + def __init__(self, crop_size): if isinstance(crop_size, numbers.Number): self.crop_size = (int(crop_size), int(crop_size)) else: