From 04519a3b7ddcb668ee8fbbfdeff963c9c734e87c Mon Sep 17 00:00:00 2001 From: Minliang Lin <62332477+MinliangLin@users.noreply.github.com> Date: Sun, 13 Aug 2023 16:05:00 +0800 Subject: [PATCH 1/2] fix: typo in plot_transforms_v2_e2e.py --- gallery/plot_transforms_v2_e2e.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gallery/plot_transforms_v2_e2e.py b/gallery/plot_transforms_v2_e2e.py index 8a80c78e1f7..11c0060cb4f 100644 --- a/gallery/plot_transforms_v2_e2e.py +++ b/gallery/plot_transforms_v2_e2e.py @@ -5,7 +5,7 @@ Object detection is not supported out of the box by ``torchvision.transforms`` v1, since it only supports images. ``torchvision.transforms.v2`` enables jointly transforming images, videos, bounding boxes, and masks. This example -showcases an end-to-end object detection training using the stable ``torchvisio.datasets`` and ``torchvision.models`` as +showcases an end-to-end object detection training using the stable ``torchvision.datasets`` and ``torchvision.models`` as well as the new ``torchvision.transforms.v2`` v2 API. """ From c03e60dc9f373f1ca36daeb47bc7d8aa65981842 Mon Sep 17 00:00:00 2001 From: MinliangLin Date: Sun, 13 Aug 2023 08:19:53 +0000 Subject: [PATCH 2/2] fix: format the docstring to less than 120 characters --- gallery/plot_transforms_v2_e2e.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gallery/plot_transforms_v2_e2e.py b/gallery/plot_transforms_v2_e2e.py index 11c0060cb4f..ccffea766c8 100644 --- a/gallery/plot_transforms_v2_e2e.py +++ b/gallery/plot_transforms_v2_e2e.py @@ -5,8 +5,8 @@ Object detection is not supported out of the box by ``torchvision.transforms`` v1, since it only supports images. ``torchvision.transforms.v2`` enables jointly transforming images, videos, bounding boxes, and masks. This example -showcases an end-to-end object detection training using the stable ``torchvision.datasets`` and ``torchvision.models`` as -well as the new ``torchvision.transforms.v2`` v2 API. +showcases an end-to-end object detection training using the stable ``torchvision.datasets`` and ``torchvision.models`` +as well as the new ``torchvision.transforms.v2`` v2 API. """ import pathlib