@@ -38,13 +38,13 @@ def horizontal_flip_bounding_box(
3838
3939 bounding_box = convert_format_bounding_box (
4040 bounding_box , old_format = format , new_format = features .BoundingBoxFormat .XYXY
41- ).view (- 1 , 4 )
41+ ).reshape (- 1 , 4 )
4242
4343 bounding_box [:, [0 , 2 ]] = spatial_size [1 ] - bounding_box [:, [2 , 0 ]]
4444
4545 return convert_format_bounding_box (
4646 bounding_box , old_format = features .BoundingBoxFormat .XYXY , new_format = format , copy = False
47- ).view (shape )
47+ ).reshape (shape )
4848
4949
5050def horizontal_flip_video (video : torch .Tensor ) -> torch .Tensor :
@@ -75,13 +75,13 @@ def vertical_flip_bounding_box(
7575
7676 bounding_box = convert_format_bounding_box (
7777 bounding_box , old_format = format , new_format = features .BoundingBoxFormat .XYXY
78- ).view (- 1 , 4 )
78+ ).reshape (- 1 , 4 )
7979
8080 bounding_box [:, [1 , 3 ]] = spatial_size [0 ] - bounding_box [:, [3 , 1 ]]
8181
8282 return convert_format_bounding_box (
8383 bounding_box , old_format = features .BoundingBoxFormat .XYXY , new_format = format , copy = False
84- ).view (shape )
84+ ).reshape (shape )
8585
8686
8787def vertical_flip_video (video : torch .Tensor ) -> torch .Tensor :
@@ -123,7 +123,7 @@ def resize_image_tensor(
123123 extra_dims = image .shape [:- 3 ]
124124
125125 if image .numel () > 0 :
126- image = image .view (- 1 , num_channels , old_height , old_width )
126+ image = image .reshape (- 1 , num_channels , old_height , old_width )
127127
128128 image = _FT .resize (
129129 image ,
@@ -132,7 +132,7 @@ def resize_image_tensor(
132132 antialias = antialias ,
133133 )
134134
135- return image .view (extra_dims + (num_channels , new_height , new_width ))
135+ return image .reshape (extra_dims + (num_channels , new_height , new_width ))
136136
137137
138138@torch .jit .unused
@@ -168,7 +168,7 @@ def resize_bounding_box(
168168 new_height , new_width = _compute_resized_output_size (spatial_size , size = size , max_size = max_size )
169169 ratios = torch .tensor ((new_width / old_width , new_height / old_height ), device = bounding_box .device )
170170 return (
171- bounding_box .view (- 1 , 2 , 2 ).mul (ratios ).to (bounding_box .dtype ).view (bounding_box .shape ),
171+ bounding_box .reshape (- 1 , 2 , 2 ).mul (ratios ).to (bounding_box .dtype ).reshape (bounding_box .shape ),
172172 (new_height , new_width ),
173173 )
174174
@@ -270,7 +270,7 @@ def affine_image_tensor(
270270
271271 num_channels , height , width = image .shape [- 3 :]
272272 extra_dims = image .shape [:- 3 ]
273- image = image .view (- 1 , num_channels , height , width )
273+ image = image .reshape (- 1 , num_channels , height , width )
274274
275275 angle , translate , shear , center = _affine_parse_args (angle , translate , scale , shear , interpolation , center )
276276
@@ -283,7 +283,7 @@ def affine_image_tensor(
283283 matrix = _get_inverse_affine_matrix (center_f , angle , translate_f , scale , shear )
284284
285285 output = _FT .affine (image , matrix , interpolation = interpolation .value , fill = fill )
286- return output .view (extra_dims + (num_channels , height , width ))
286+ return output .reshape (extra_dims + (num_channels , height , width ))
287287
288288
289289@torch .jit .unused
@@ -338,20 +338,20 @@ def _affine_bounding_box_xyxy(
338338 dtype = dtype ,
339339 device = device ,
340340 )
341- .view (2 , 3 )
341+ .reshape (2 , 3 )
342342 .T
343343 )
344344 # 1) Let's transform bboxes into a tensor of 4 points (top-left, top-right, bottom-left, bottom-right corners).
345345 # Tensor of points has shape (N * 4, 3), where N is the number of bboxes
346346 # Single point structure is similar to
347347 # [(xmin, ymin, 1), (xmax, ymin, 1), (xmax, ymax, 1), (xmin, ymax, 1)]
348- points = bounding_box [:, [[0 , 1 ], [2 , 1 ], [2 , 3 ], [0 , 3 ]]].view (- 1 , 2 )
348+ points = bounding_box [:, [[0 , 1 ], [2 , 1 ], [2 , 3 ], [0 , 3 ]]].reshape (- 1 , 2 )
349349 points = torch .cat ([points , torch .ones (points .shape [0 ], 1 , device = points .device )], dim = - 1 )
350350 # 2) Now let's transform the points using affine matrix
351351 transformed_points = torch .matmul (points , transposed_affine_matrix )
352352 # 3) Reshape transformed points to [N boxes, 4 points, x/y coords]
353353 # and compute bounding box from 4 transformed points:
354- transformed_points = transformed_points .view (- 1 , 4 , 2 )
354+ transformed_points = transformed_points .reshape (- 1 , 4 , 2 )
355355 out_bbox_mins , _ = torch .min (transformed_points , dim = 1 )
356356 out_bbox_maxs , _ = torch .max (transformed_points , dim = 1 )
357357 out_bboxes = torch .cat ([out_bbox_mins , out_bbox_maxs ], dim = 1 )
@@ -396,15 +396,15 @@ def affine_bounding_box(
396396 original_shape = bounding_box .shape
397397 bounding_box = convert_format_bounding_box (
398398 bounding_box , old_format = format , new_format = features .BoundingBoxFormat .XYXY
399- ).view (- 1 , 4 )
399+ ).reshape (- 1 , 4 )
400400
401401 out_bboxes , _ = _affine_bounding_box_xyxy (bounding_box , spatial_size , angle , translate , scale , shear , center )
402402
403403 # out_bboxes should be of shape [N boxes, 4]
404404
405405 return convert_format_bounding_box (
406406 out_bboxes , old_format = features .BoundingBoxFormat .XYXY , new_format = format , copy = False
407- ).view (original_shape )
407+ ).reshape (original_shape )
408408
409409
410410def affine_mask (
@@ -539,7 +539,7 @@ def rotate_image_tensor(
539539
540540 if image .numel () > 0 :
541541 image = _FT .rotate (
542- image .view (- 1 , num_channels , height , width ),
542+ image .reshape (- 1 , num_channels , height , width ),
543543 matrix ,
544544 interpolation = interpolation .value ,
545545 expand = expand ,
@@ -549,7 +549,7 @@ def rotate_image_tensor(
549549 else :
550550 new_width , new_height = _FT ._compute_affine_output_size (matrix , width , height ) if expand else (width , height )
551551
552- return image .view (extra_dims + (num_channels , new_height , new_width ))
552+ return image .reshape (extra_dims + (num_channels , new_height , new_width ))
553553
554554
555555@torch .jit .unused
@@ -585,7 +585,7 @@ def rotate_bounding_box(
585585 original_shape = bounding_box .shape
586586 bounding_box = convert_format_bounding_box (
587587 bounding_box , old_format = format , new_format = features .BoundingBoxFormat .XYXY
588- ).view (- 1 , 4 )
588+ ).reshape (- 1 , 4 )
589589
590590 out_bboxes , spatial_size = _affine_bounding_box_xyxy (
591591 bounding_box ,
@@ -601,7 +601,7 @@ def rotate_bounding_box(
601601 return (
602602 convert_format_bounding_box (
603603 out_bboxes , old_format = features .BoundingBoxFormat .XYXY , new_format = format , copy = False
604- ).view (original_shape ),
604+ ).reshape (original_shape ),
605605 spatial_size ,
606606 )
607607
@@ -691,15 +691,15 @@ def _pad_with_scalar_fill(
691691
692692 if image .numel () > 0 :
693693 image = _FT .pad (
694- img = image .view (- 1 , num_channels , height , width ), padding = padding , fill = fill , padding_mode = padding_mode
694+ img = image .reshape (- 1 , num_channels , height , width ), padding = padding , fill = fill , padding_mode = padding_mode
695695 )
696696 new_height , new_width = image .shape [- 2 :]
697697 else :
698698 left , right , top , bottom = _FT ._parse_pad_padding (padding )
699699 new_height = height + top + bottom
700700 new_width = width + left + right
701701
702- return image .view (extra_dims + (num_channels , new_height , new_width ))
702+ return image .reshape (extra_dims + (num_channels , new_height , new_width ))
703703
704704
705705# TODO: This should be removed once pytorch pad supports non-scalar padding values
@@ -714,7 +714,7 @@ def _pad_with_vector_fill(
714714
715715 output = _pad_with_scalar_fill (image , padding , fill = 0 , padding_mode = "constant" )
716716 left , right , top , bottom = _parse_pad_padding (padding )
717- fill = torch .tensor (fill , dtype = image .dtype , device = image .device ).view (- 1 , 1 , 1 )
717+ fill = torch .tensor (fill , dtype = image .dtype , device = image .device ).reshape (- 1 , 1 , 1 )
718718
719719 if top > 0 :
720720 output [..., :top , :] = fill
@@ -863,15 +863,15 @@ def perspective_image_tensor(
863863 shape = image .shape
864864
865865 if image .ndim > 4 :
866- image = image .view ((- 1 ,) + shape [- 3 :])
866+ image = image .reshape ((- 1 ,) + shape [- 3 :])
867867 needs_unsquash = True
868868 else :
869869 needs_unsquash = False
870870
871871 output = _FT .perspective (image , perspective_coeffs , interpolation = interpolation .value , fill = fill )
872872
873873 if needs_unsquash :
874- output = output .view (shape )
874+ output = output .reshape (shape )
875875
876876 return output
877877
@@ -898,7 +898,7 @@ def perspective_bounding_box(
898898 original_shape = bounding_box .shape
899899 bounding_box = convert_format_bounding_box (
900900 bounding_box , old_format = format , new_format = features .BoundingBoxFormat .XYXY
901- ).view (- 1 , 4 )
901+ ).reshape (- 1 , 4 )
902902
903903 dtype = bounding_box .dtype if torch .is_floating_point (bounding_box ) else torch .float32
904904 device = bounding_box .device
@@ -947,7 +947,7 @@ def perspective_bounding_box(
947947 # Tensor of points has shape (N * 4, 3), where N is the number of bboxes
948948 # Single point structure is similar to
949949 # [(xmin, ymin, 1), (xmax, ymin, 1), (xmax, ymax, 1), (xmin, ymax, 1)]
950- points = bounding_box [:, [[0 , 1 ], [2 , 1 ], [2 , 3 ], [0 , 3 ]]].view (- 1 , 2 )
950+ points = bounding_box [:, [[0 , 1 ], [2 , 1 ], [2 , 3 ], [0 , 3 ]]].reshape (- 1 , 2 )
951951 points = torch .cat ([points , torch .ones (points .shape [0 ], 1 , device = points .device )], dim = - 1 )
952952 # 2) Now let's transform the points using perspective matrices
953953 # x_out = (coeffs[0] * x + coeffs[1] * y + coeffs[2]) / (coeffs[6] * x + coeffs[7] * y + 1)
@@ -959,7 +959,7 @@ def perspective_bounding_box(
959959
960960 # 3) Reshape transformed points to [N boxes, 4 points, x/y coords]
961961 # and compute bounding box from 4 transformed points:
962- transformed_points = transformed_points .view (- 1 , 4 , 2 )
962+ transformed_points = transformed_points .reshape (- 1 , 4 , 2 )
963963 out_bbox_mins , _ = torch .min (transformed_points , dim = 1 )
964964 out_bbox_maxs , _ = torch .max (transformed_points , dim = 1 )
965965 out_bboxes = torch .cat ([out_bbox_mins , out_bbox_maxs ], dim = 1 ).to (bounding_box .dtype )
@@ -968,7 +968,7 @@ def perspective_bounding_box(
968968
969969 return convert_format_bounding_box (
970970 out_bboxes , old_format = features .BoundingBoxFormat .XYXY , new_format = format , copy = False
971- ).view (original_shape )
971+ ).reshape (original_shape )
972972
973973
974974def perspective_mask (
@@ -1027,15 +1027,15 @@ def elastic_image_tensor(
10271027 shape = image .shape
10281028
10291029 if image .ndim > 4 :
1030- image = image .view ((- 1 ,) + shape [- 3 :])
1030+ image = image .reshape ((- 1 ,) + shape [- 3 :])
10311031 needs_unsquash = True
10321032 else :
10331033 needs_unsquash = False
10341034
10351035 output = _FT .elastic_transform (image , displacement , interpolation = interpolation .value , fill = fill )
10361036
10371037 if needs_unsquash :
1038- output = output .view (shape )
1038+ output = output .reshape (shape )
10391039
10401040 return output
10411041
@@ -1063,7 +1063,7 @@ def elastic_bounding_box(
10631063 original_shape = bounding_box .shape
10641064 bounding_box = convert_format_bounding_box (
10651065 bounding_box , old_format = format , new_format = features .BoundingBoxFormat .XYXY
1066- ).view (- 1 , 4 )
1066+ ).reshape (- 1 , 4 )
10671067
10681068 # Question (vfdev-5): should we rely on good displacement shape and fetch image size from it
10691069 # Or add spatial_size arg and check displacement shape
@@ -1075,21 +1075,21 @@ def elastic_bounding_box(
10751075 inv_grid = id_grid - displacement
10761076
10771077 # Get points from bboxes
1078- points = bounding_box [:, [[0 , 1 ], [2 , 1 ], [2 , 3 ], [0 , 3 ]]].view (- 1 , 2 )
1078+ points = bounding_box [:, [[0 , 1 ], [2 , 1 ], [2 , 3 ], [0 , 3 ]]].reshape (- 1 , 2 )
10791079 index_x = torch .floor (points [:, 0 ] + 0.5 ).to (dtype = torch .long )
10801080 index_y = torch .floor (points [:, 1 ] + 0.5 ).to (dtype = torch .long )
10811081 # Transform points:
10821082 t_size = torch .tensor (spatial_size [::- 1 ], device = displacement .device , dtype = displacement .dtype )
10831083 transformed_points = (inv_grid [0 , index_y , index_x , :] + 1 ) * 0.5 * t_size - 0.5
10841084
1085- transformed_points = transformed_points .view (- 1 , 4 , 2 )
1085+ transformed_points = transformed_points .reshape (- 1 , 4 , 2 )
10861086 out_bbox_mins , _ = torch .min (transformed_points , dim = 1 )
10871087 out_bbox_maxs , _ = torch .max (transformed_points , dim = 1 )
10881088 out_bboxes = torch .cat ([out_bbox_mins , out_bbox_maxs ], dim = 1 ).to (bounding_box .dtype )
10891089
10901090 return convert_format_bounding_box (
10911091 out_bboxes , old_format = features .BoundingBoxFormat .XYXY , new_format = format , copy = False
1092- ).view (original_shape )
1092+ ).reshape (original_shape )
10931093
10941094
10951095def elastic_mask (
0 commit comments