Skip to content

Commit 2227b5d

Browse files
committed
run pre-commit
1 parent 950b793 commit 2227b5d

File tree

188 files changed

+551
-736
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

188 files changed

+551
-736
lines changed

hubconf.py

Lines changed: 26 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,8 @@
33

44
from torchvision.models import get_weight
55
from torchvision.models.alexnet import alexnet
6-
from torchvision.models.convnext import convnext_tiny, convnext_small, convnext_base, convnext_large
7-
from torchvision.models.densenet import densenet121, densenet169, densenet201, densenet161
6+
from torchvision.models.convnext import convnext_base, convnext_large, convnext_small, convnext_tiny
7+
from torchvision.models.densenet import densenet121, densenet161, densenet169, densenet201
88
from torchvision.models.efficientnet import (
99
efficientnet_b0,
1010
efficientnet_b1,
@@ -14,9 +14,9 @@
1414
efficientnet_b5,
1515
efficientnet_b6,
1616
efficientnet_b7,
17-
efficientnet_v2_s,
18-
efficientnet_v2_m,
1917
efficientnet_v2_l,
18+
efficientnet_v2_m,
19+
efficientnet_v2_s,
2020
)
2121
from torchvision.models.googlenet import googlenet
2222
from torchvision.models.inception import inception_v3
@@ -25,39 +25,39 @@
2525
from torchvision.models.mobilenetv3 import mobilenet_v3_large, mobilenet_v3_small
2626
from torchvision.models.optical_flow import raft_large, raft_small
2727
from torchvision.models.regnet import (
28-
regnet_y_400mf,
29-
regnet_y_800mf,
30-
regnet_y_1_6gf,
31-
regnet_y_3_2gf,
32-
regnet_y_8gf,
33-
regnet_y_16gf,
34-
regnet_y_32gf,
35-
regnet_y_128gf,
36-
regnet_x_400mf,
37-
regnet_x_800mf,
28+
regnet_x_16gf,
3829
regnet_x_1_6gf,
30+
regnet_x_32gf,
3931
regnet_x_3_2gf,
32+
regnet_x_400mf,
33+
regnet_x_800mf,
4034
regnet_x_8gf,
41-
regnet_x_16gf,
42-
regnet_x_32gf,
35+
regnet_y_128gf,
36+
regnet_y_16gf,
37+
regnet_y_1_6gf,
38+
regnet_y_32gf,
39+
regnet_y_3_2gf,
40+
regnet_y_400mf,
41+
regnet_y_800mf,
42+
regnet_y_8gf,
4343
)
4444
from torchvision.models.resnet import (
45+
resnet101,
46+
resnet152,
4547
resnet18,
4648
resnet34,
4749
resnet50,
48-
resnet101,
49-
resnet152,
50-
resnext50_32x4d,
5150
resnext101_32x8d,
52-
wide_resnet50_2,
51+
resnext50_32x4d,
5352
wide_resnet101_2,
53+
wide_resnet50_2,
5454
)
5555
from torchvision.models.segmentation import (
56-
fcn_resnet50,
57-
fcn_resnet101,
58-
deeplabv3_resnet50,
59-
deeplabv3_resnet101,
6056
deeplabv3_mobilenet_v3_large,
57+
deeplabv3_resnet101,
58+
deeplabv3_resnet50,
59+
fcn_resnet101,
60+
fcn_resnet50,
6161
lraspp_mobilenet_v3_large,
6262
)
6363
from torchvision.models.shufflenetv2 import (
@@ -67,11 +67,5 @@
6767
shufflenet_v2_x2_0,
6868
)
6969
from torchvision.models.squeezenet import squeezenet1_0, squeezenet1_1
70-
from torchvision.models.vgg import vgg11, vgg13, vgg16, vgg19, vgg11_bn, vgg13_bn, vgg16_bn, vgg19_bn
71-
from torchvision.models.vision_transformer import (
72-
vit_b_16,
73-
vit_b_32,
74-
vit_l_16,
75-
vit_l_32,
76-
vit_h_14,
77-
)
70+
from torchvision.models.vgg import vgg11, vgg11_bn, vgg13, vgg13_bn, vgg16, vgg16_bn, vgg19, vgg19_bn
71+
from torchvision.models.vision_transformer import vit_b_16, vit_b_32, vit_h_14, vit_l_16, vit_l_32

references/classification/train_quantization.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
import torchvision
1010
import utils
1111
from torch import nn
12-
from train import train_one_epoch, evaluate, load_data
12+
from train import evaluate, load_data, train_one_epoch
1313

1414

1515
def main(args):

references/detection/group_by_aspect_ratio.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
import copy
33
import math
44
from collections import defaultdict
5-
from itertools import repeat, chain
5+
from itertools import chain, repeat
66

77
import numpy as np
88
import torch

references/detection/train.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,8 @@
2929
import torchvision.models.detection.mask_rcnn
3030
import utils
3131
from coco_utils import get_coco, get_coco_kp
32-
from engine import train_one_epoch, evaluate
33-
from group_by_aspect_ratio import GroupedBatchSampler, create_aspect_ratio_groups
32+
from engine import evaluate, train_one_epoch
33+
from group_by_aspect_ratio import create_aspect_ratio_groups, GroupedBatchSampler
3434

3535

3636
def get_dataset(name, image_set, transform, data_path):

references/detection/transforms.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,9 @@
1-
from typing import List, Tuple, Dict, Optional, Union
1+
from typing import Dict, List, Optional, Tuple, Union
22

33
import torch
44
import torchvision
55
from torch import nn, Tensor
6-
from torchvision.transforms import functional as F
7-
from torchvision.transforms import transforms as T, InterpolationMode
6+
from torchvision.transforms import functional as F, InterpolationMode, transforms as T
87

98

109
def _flip_coco_person_keypoints(kps, width):

references/optical_flow/train.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,8 @@
66
import torch
77
import torchvision.models.optical_flow
88
import utils
9-
from presets import OpticalFlowPresetTrain, OpticalFlowPresetEval
10-
from torchvision.datasets import KittiFlow, FlyingChairs, FlyingThings3D, Sintel, HD1K
9+
from presets import OpticalFlowPresetEval, OpticalFlowPresetTrain
10+
from torchvision.datasets import FlyingChairs, FlyingThings3D, HD1K, KittiFlow, Sintel
1111

1212

1313
def get_train_dataset(stage, dataset_root):

references/optical_flow/utils.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,7 @@
11
import datetime
22
import os
33
import time
4-
from collections import defaultdict
5-
from collections import deque
4+
from collections import defaultdict, deque
65

76
import torch
87
import torch.distributed as dist
@@ -158,7 +157,7 @@ def log_every(self, iterable, print_freq=5, header=None):
158157
def compute_metrics(flow_pred, flow_gt, valid_flow_mask=None):
159158

160159
epe = ((flow_pred - flow_gt) ** 2).sum(dim=1).sqrt()
161-
flow_norm = (flow_gt ** 2).sum(dim=1).sqrt()
160+
flow_norm = (flow_gt**2).sum(dim=1).sqrt()
162161

163162
if valid_flow_mask is not None:
164163
epe = epe[valid_flow_mask]
@@ -183,7 +182,7 @@ def sequence_loss(flow_preds, flow_gt, valid_flow_mask, gamma=0.8, max_flow=400)
183182
raise ValueError(f"Gamma should be < 1, got {gamma}.")
184183

185184
# exlude invalid pixels and extremely large diplacements
186-
flow_norm = torch.sum(flow_gt ** 2, dim=1).sqrt()
185+
flow_norm = torch.sum(flow_gt**2, dim=1).sqrt()
187186
valid_flow_mask = valid_flow_mask & (flow_norm < max_flow)
188187

189188
valid_flow_mask = valid_flow_mask[:, None, :, :]

references/segmentation/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ def update(self, a, b):
7575
with torch.inference_mode():
7676
k = (a >= 0) & (a < n)
7777
inds = n * a[k].to(torch.int64) + b[k]
78-
self.mat += torch.bincount(inds, minlength=n ** 2).reshape(n, n)
78+
self.mat += torch.bincount(inds, minlength=n**2).reshape(n, n)
7979

8080
def reset(self):
8181
self.mat.zero_()

references/video_classification/presets.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import torch
22
from torchvision.transforms import transforms
3-
from transforms import ConvertBHWCtoBCHW, ConvertBCHWtoCBHW
3+
from transforms import ConvertBCHWtoCBHW, ConvertBHWCtoBCHW
44

55

66
class VideoClassificationPresetTrain:

references/video_classification/train.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
import utils
1212
from torch import nn
1313
from torch.utils.data.dataloader import default_collate
14-
from torchvision.datasets.samplers import DistributedSampler, UniformClipSampler, RandomClipSampler
14+
from torchvision.datasets.samplers import DistributedSampler, RandomClipSampler, UniformClipSampler
1515

1616

1717
def train_one_epoch(model, criterion, optimizer, lr_scheduler, data_loader, device, epoch, print_freq, scaler=None):

0 commit comments

Comments
 (0)