Skip to content

Commit 6811faf

Browse files
committed
Merge branch 'main' of github.com:pytorch/vision into ufmt
2 parents c27c869 + 05d7651 commit 6811faf

File tree

8 files changed

+21
-15
lines changed

8 files changed

+21
-15
lines changed

gallery/plot_visualization_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ def show(imgs):
116116
# lraspp mobilenet models
117117
# (:func:`~torchvision.models.segmentation.lraspp_mobilenet_v3_large`).
118118
#
119-
# Let's start by looking at the ouput of the model. Remember that in general,
119+
# Let's start by looking at the output of the model. Remember that in general,
120120
# images must be normalized before they're passed to a semantic segmentation
121121
# model.
122122

torchvision/models/detection/rpn.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -230,7 +230,7 @@ def filter_proposals(self, proposals, objectness, image_shapes, num_anchors_per_
230230
# type: (Tensor, Tensor, List[Tuple[int, int]], List[int]) -> Tuple[List[Tensor], List[Tensor]]
231231
num_images = proposals.shape[0]
232232
device = proposals.device
233-
# do not backprop throught objectness
233+
# do not backprop through objectness
234234
objectness = objectness.detach()
235235
objectness = objectness.reshape(num_images, -1)
236236

torchvision/models/feature_extraction.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ class NodePathTracer(LeafModuleAwareTracer):
3737
"""
3838
NodePathTracer is an FX tracer that, for each operation, also records the
3939
name of the Node from which the operation originated. A node name here is
40-
a `.` seperated path walking the hierarchy from top level module down to
40+
a `.` separated path walking the hierarchy from top level module down to
4141
leaf operation or leaf module. The name of the top level module is not
4242
included as part of the node name. For example, if we trace a module whose
4343
forward method applies a ReLU module, the name for that node will simply

torchvision/ops/ps_roi_align.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,10 @@ def ps_roi_align(
3030
in the batch.
3131
output_size (int or Tuple[int, int]): the size of the output (in bins or pixels) after the pooling
3232
is performed, as (height, width).
33-
spatial_scale (float): a scaling factor that maps the input coordinates to
34-
the box coordinates. Default: 1.0
33+
spatial_scale (float): a scaling factor that maps the box coordinates to
34+
the input coordinates. For example, if your boxes are defined on the scale
35+
of a 224x224 image and your input is a 112x112 feature map (resulting from a 0.5x scaling of
36+
the original image), you'll want to set this to 0.5. Default: 1.0
3537
sampling_ratio (int): number of sampling points in the interpolation grid
3638
used to compute the output value of each pooled output bin. If > 0,
3739
then exactly ``sampling_ratio x sampling_ratio`` sampling points per bin are used. If

torchvision/ops/ps_roi_pool.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,10 @@ def ps_roi_pool(
2929
in the batch.
3030
output_size (int or Tuple[int, int]): the size of the output (in bins or pixels) after the pooling
3131
is performed, as (height, width).
32-
spatial_scale (float): a scaling factor that maps the input coordinates to
33-
the box coordinates. Default: 1.0
32+
spatial_scale (float): a scaling factor that maps the box coordinates to
33+
the input coordinates. For example, if your boxes are defined on the scale
34+
of a 224x224 image and your input is a 112x112 feature map (resulting from a 0.5x scaling of
35+
the original image), you'll want to set this to 0.5. Default: 1.0
3436
3537
Returns:
3638
Tensor[K, C / (output_size[0] * output_size[1]), output_size[0], output_size[1]]: The pooled RoIs.

torchvision/ops/roi_align.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,8 +33,10 @@ def roi_align(
3333
in the batch.
3434
output_size (int or Tuple[int, int]): the size of the output (in bins or pixels) after the pooling
3535
is performed, as (height, width).
36-
spatial_scale (float): a scaling factor that maps the input coordinates to
37-
the box coordinates. Default: 1.0
36+
spatial_scale (float): a scaling factor that maps the box coordinates to
37+
the input coordinates. For example, if your boxes are defined on the scale
38+
of a 224x224 image and your input is a 112x112 feature map (resulting from a 0.5x scaling of
39+
the original image), you'll want to set this to 0.5. Default: 1.0
3840
sampling_ratio (int): number of sampling points in the interpolation grid
3941
used to compute the output value of each pooled output bin. If > 0,
4042
then exactly ``sampling_ratio x sampling_ratio`` sampling points per bin are used. If

torchvision/ops/roi_pool.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,10 @@ def roi_pool(
3030
in the batch.
3131
output_size (int or Tuple[int, int]): the size of the output after the cropping
3232
is performed, as (height, width)
33-
spatial_scale (float): a scaling factor that maps the input coordinates to
34-
the box coordinates. Default: 1.0
33+
spatial_scale (float): a scaling factor that maps the box coordinates to
34+
the input coordinates. For example, if your boxes are defined on the scale
35+
of a 224x224 image and your input is a 112x112 feature map (resulting from a 0.5x scaling of
36+
the original image), you'll want to set this to 0.5. Default: 1.0
3537
3638
Returns:
3739
Tensor[K, C, output_size[0], output_size[1]]: The pooled RoIs.

torchvision/prototype/datasets/_builtin/caltech.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -120,8 +120,7 @@ def _make_datapipe(
120120

121121
images_dp = TarArchiveReader(images_dp)
122122
images_dp = Filter(images_dp, self._is_not_background_image)
123-
# FIXME: add this after https://github.com/pytorch/pytorch/issues/65808 is resolved
124-
# images_dp = Shuffler(images_dp, buffer_size=INFINITE_BUFFER_SIZE)
123+
images_dp = Shuffler(images_dp, buffer_size=INFINITE_BUFFER_SIZE)
125124

126125
anns_dp = TarArchiveReader(anns_dp)
127126
anns_dp = Filter(anns_dp, self._is_ann)
@@ -189,8 +188,7 @@ def _make_datapipe(
189188
dp = resource_dps[0]
190189
dp = TarArchiveReader(dp)
191190
dp = Filter(dp, self._is_not_rogue_file)
192-
# FIXME: add this after https://github.com/pytorch/pytorch/issues/65808 is resolved
193-
# dp = Shuffler(dp, buffer_size=INFINITE_BUFFER_SIZE)
191+
dp = Shuffler(dp, buffer_size=INFINITE_BUFFER_SIZE)
194192
return Mapper(dp, self._collate_and_decode_sample, fn_kwargs=dict(decoder=decoder))
195193

196194
def generate_categories_file(self, root: Union[str, pathlib.Path]) -> None:

0 commit comments

Comments
 (0)