88from torchvision .transforms import functional_tensor as _FT , functional_pil as _FP
99from torchvision .transforms .functional import pil_modes_mapping , _get_inverse_affine_matrix
1010
11- from ._meta_conversion import convert_bounding_box_format
11+ from ._meta import convert_bounding_box_format , get_dimensions_image_tensor , get_dimensions_image_pil
1212
1313
1414horizontal_flip_image_tensor = _FT .hflip
@@ -39,7 +39,7 @@ def resize_image_tensor(
3939 antialias : Optional [bool ] = None ,
4040) -> torch .Tensor :
4141 new_height , new_width = size
42- num_channels , old_height , old_width = _FT . get_dimensions (image )
42+ num_channels , old_height , old_width = get_dimensions_image_tensor (image )
4343 batch_shape = image .shape [:- 3 ]
4444 return _FT .resize (
4545 image .reshape ((- 1 , num_channels , old_height , old_width )),
@@ -141,7 +141,7 @@ def affine_image_tensor(
141141
142142 center_f = [0.0 , 0.0 ]
143143 if center is not None :
144- _ , height , width = _FT . get_dimensions (img )
144+ _ , height , width = get_dimensions_image_tensor (img )
145145 # Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center.
146146 center_f = [1.0 * (c - s * 0.5 ) for c , s in zip (center , (width , height ))]
147147
@@ -167,7 +167,7 @@ def affine_image_pil(
167167 # it is visually better to estimate the center without 0.5 offset
168168 # otherwise image rotated by 90 degrees is shifted vs output image of torch.rot90 or F_t.affine
169169 if center is None :
170- _ , height , width = _FP . get_dimensions (img )
170+ _ , height , width = get_dimensions_image_pil (img )
171171 center = [width * 0.5 , height * 0.5 ]
172172 matrix = _get_inverse_affine_matrix (center , angle , translate , scale , shear )
173173
@@ -184,7 +184,7 @@ def rotate_image_tensor(
184184) -> torch .Tensor :
185185 center_f = [0.0 , 0.0 ]
186186 if center is not None :
187- _ , height , width = _FT . get_dimensions (img )
187+ _ , height , width = get_dimensions_image_tensor (img )
188188 # Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center.
189189 center_f = [1.0 * (c - s * 0.5 ) for c , s in zip (center , (width , height ))]
190190
@@ -260,13 +260,13 @@ def _center_crop_compute_crop_anchor(
260260
261261def center_crop_image_tensor (img : torch .Tensor , output_size : List [int ]) -> torch .Tensor :
262262 crop_height , crop_width = _center_crop_parse_output_size (output_size )
263- _ , image_height , image_width = _FT . get_dimensions (img )
263+ _ , image_height , image_width = get_dimensions_image_tensor (img )
264264
265265 if crop_height > image_height or crop_width > image_width :
266266 padding_ltrb = _center_crop_compute_padding (crop_height , crop_width , image_height , image_width )
267267 img = pad_image_tensor (img , padding_ltrb , fill = 0 )
268268
269- _ , image_height , image_width = _FT . get_dimensions (img )
269+ _ , image_height , image_width = get_dimensions_image_tensor (img )
270270 if crop_width == image_width and crop_height == image_height :
271271 return img
272272
@@ -276,13 +276,13 @@ def center_crop_image_tensor(img: torch.Tensor, output_size: List[int]) -> torch
276276
277277def center_crop_image_pil (img : PIL .Image .Image , output_size : List [int ]) -> PIL .Image .Image :
278278 crop_height , crop_width = _center_crop_parse_output_size (output_size )
279- _ , image_height , image_width = _FP . get_dimensions (img )
279+ _ , image_height , image_width = get_dimensions_image_pil (img )
280280
281281 if crop_height > image_height or crop_width > image_width :
282282 padding_ltrb = _center_crop_compute_padding (crop_height , crop_width , image_height , image_width )
283283 img = pad_image_pil (img , padding_ltrb , fill = 0 )
284284
285- _ , image_height , image_width = _FP . get_dimensions (img )
285+ _ , image_height , image_width = get_dimensions_image_pil (img )
286286 if crop_width == image_width and crop_height == image_height :
287287 return img
288288
0 commit comments