@@ -842,7 +842,7 @@ def _reference_horizontal_flip_bounding_box(self, bounding_box):
842842 "fn" , [F .horizontal_flip , transform_cls_to_functional (transforms .RandomHorizontalFlip , p = 1 )]
843843 )
844844 def test_bounding_box_correctness (self , format , fn ):
845- bounding_box = self ._make_input (datapoints .BoundingBox )
845+ bounding_box = self ._make_input (datapoints .BoundingBox , format = format )
846846
847847 actual = fn (bounding_box )
848848 expected = self ._reference_horizontal_flip_bounding_box (bounding_box )
@@ -1025,12 +1025,10 @@ def test_kernel_bounding_box(self, param, value, format, dtype, device):
10251025
10261026 @pytest .mark .parametrize ("mask_type" , ["segmentation" , "detection" ])
10271027 def test_kernel_mask (self , mask_type ):
1028- check_kernel (
1029- F .affine_mask , self ._make_input (datapoints .Mask , mask_type = mask_type ), ** self ._MINIMAL_AFFINE_KWARGS
1030- )
1028+ self ._check_kernel (F .affine_mask , self ._make_input (datapoints .Mask , mask_type = mask_type ))
10311029
10321030 def test_kernel_video (self ):
1033- check_kernel (F .affine_video , self ._make_input (datapoints .Video ), ** self . _MINIMAL_AFFINE_KWARGS )
1031+ self . _check_kernel (F .affine_video , self ._make_input (datapoints .Video ))
10341032
10351033 @pytest .mark .parametrize (
10361034 ("input_type" , "kernel" ),
@@ -1301,3 +1299,143 @@ def test_transform_negative_shear_error(self):
13011299 def test_transform_unknown_fill_error (self ):
13021300 with pytest .raises (TypeError , match = "Got inappropriate fill arg" ):
13031301 transforms .RandomAffine (degrees = 0 , fill = "fill" )
1302+
1303+
1304+ class TestVerticalFlip :
1305+ def _make_input (self , input_type , * , dtype = None , device = "cpu" , spatial_size = (17 , 11 ), ** kwargs ):
1306+ if input_type in {torch .Tensor , PIL .Image .Image , datapoints .Image }:
1307+ input = make_image (size = spatial_size , dtype = dtype or torch .uint8 , device = device , ** kwargs )
1308+ if input_type is torch .Tensor :
1309+ input = input .as_subclass (torch .Tensor )
1310+ elif input_type is PIL .Image .Image :
1311+ input = F .to_image_pil (input )
1312+ elif input_type is datapoints .BoundingBox :
1313+ kwargs .setdefault ("format" , datapoints .BoundingBoxFormat .XYXY )
1314+ input = make_bounding_box (
1315+ dtype = dtype or torch .float32 ,
1316+ device = device ,
1317+ spatial_size = spatial_size ,
1318+ ** kwargs ,
1319+ )
1320+ elif input_type is datapoints .Mask :
1321+ input = make_segmentation_mask (size = spatial_size , dtype = dtype or torch .uint8 , device = device , ** kwargs )
1322+ elif input_type is datapoints .Video :
1323+ input = make_video (size = spatial_size , dtype = dtype or torch .uint8 , device = device , ** kwargs )
1324+
1325+ return input
1326+
1327+ @pytest .mark .parametrize ("dtype" , [torch .float32 , torch .uint8 ])
1328+ @pytest .mark .parametrize ("device" , cpu_and_cuda ())
1329+ def test_kernel_image_tensor (self , dtype , device ):
1330+ check_kernel (F .vertical_flip_image_tensor , self ._make_input (torch .Tensor , dtype = dtype , device = device ))
1331+
1332+ @pytest .mark .parametrize ("format" , list (datapoints .BoundingBoxFormat ))
1333+ @pytest .mark .parametrize ("dtype" , [torch .float32 , torch .int64 ])
1334+ @pytest .mark .parametrize ("device" , cpu_and_cuda ())
1335+ def test_kernel_bounding_box (self , format , dtype , device ):
1336+ bounding_box = self ._make_input (datapoints .BoundingBox , dtype = dtype , device = device , format = format )
1337+ check_kernel (
1338+ F .vertical_flip_bounding_box ,
1339+ bounding_box ,
1340+ format = format ,
1341+ spatial_size = bounding_box .spatial_size ,
1342+ )
1343+
1344+ @pytest .mark .parametrize (
1345+ "dtype_and_make_mask" , [(torch .uint8 , make_segmentation_mask ), (torch .bool , make_detection_mask )]
1346+ )
1347+ def test_kernel_mask (self , dtype_and_make_mask ):
1348+ dtype , make_mask = dtype_and_make_mask
1349+ check_kernel (F .vertical_flip_mask , make_mask (dtype = dtype ))
1350+
1351+ def test_kernel_video (self ):
1352+ check_kernel (F .vertical_flip_video , self ._make_input (datapoints .Video ))
1353+
1354+ @pytest .mark .parametrize (
1355+ ("input_type" , "kernel" ),
1356+ [
1357+ (torch .Tensor , F .vertical_flip_image_tensor ),
1358+ (PIL .Image .Image , F .vertical_flip_image_pil ),
1359+ (datapoints .Image , F .vertical_flip_image_tensor ),
1360+ (datapoints .BoundingBox , F .vertical_flip_bounding_box ),
1361+ (datapoints .Mask , F .vertical_flip_mask ),
1362+ (datapoints .Video , F .vertical_flip_video ),
1363+ ],
1364+ )
1365+ def test_dispatcher (self , kernel , input_type ):
1366+ check_dispatcher (F .vertical_flip , kernel , self ._make_input (input_type ))
1367+
1368+ @pytest .mark .parametrize (
1369+ ("input_type" , "kernel" ),
1370+ [
1371+ (torch .Tensor , F .vertical_flip_image_tensor ),
1372+ (PIL .Image .Image , F .vertical_flip_image_pil ),
1373+ (datapoints .Image , F .vertical_flip_image_tensor ),
1374+ (datapoints .BoundingBox , F .vertical_flip_bounding_box ),
1375+ (datapoints .Mask , F .vertical_flip_mask ),
1376+ (datapoints .Video , F .vertical_flip_video ),
1377+ ],
1378+ )
1379+ def test_dispatcher_signature (self , kernel , input_type ):
1380+ check_dispatcher_signatures_match (F .vertical_flip , kernel = kernel , input_type = input_type )
1381+
1382+ @pytest .mark .parametrize (
1383+ "input_type" ,
1384+ [torch .Tensor , PIL .Image .Image , datapoints .Image , datapoints .BoundingBox , datapoints .Mask , datapoints .Video ],
1385+ )
1386+ @pytest .mark .parametrize ("device" , cpu_and_cuda ())
1387+ def test_transform (self , input_type , device ):
1388+ input = self ._make_input (input_type , device = device )
1389+
1390+ check_transform (transforms .RandomVerticalFlip , input , p = 1 )
1391+
1392+ @pytest .mark .parametrize ("fn" , [F .vertical_flip , transform_cls_to_functional (transforms .RandomVerticalFlip , p = 1 )])
1393+ def test_image_correctness (self , fn ):
1394+ image = self ._make_input (torch .Tensor , dtype = torch .uint8 , device = "cpu" )
1395+
1396+ actual = fn (image )
1397+ expected = F .to_image_tensor (F .vertical_flip (F .to_image_pil (image )))
1398+
1399+ torch .testing .assert_close (actual , expected )
1400+
1401+ def _reference_vertical_flip_bounding_box (self , bounding_box ):
1402+ affine_matrix = np .array (
1403+ [
1404+ [1 , 0 , 0 ],
1405+ [0 , - 1 , bounding_box .spatial_size [0 ]],
1406+ ],
1407+ dtype = "float64" if bounding_box .dtype == torch .float64 else "float32" ,
1408+ )
1409+
1410+ expected_bboxes = reference_affine_bounding_box_helper (
1411+ bounding_box ,
1412+ format = bounding_box .format ,
1413+ spatial_size = bounding_box .spatial_size ,
1414+ affine_matrix = affine_matrix ,
1415+ )
1416+
1417+ return datapoints .BoundingBox .wrap_like (bounding_box , expected_bboxes )
1418+
1419+ @pytest .mark .parametrize ("format" , list (datapoints .BoundingBoxFormat ))
1420+ @pytest .mark .parametrize ("fn" , [F .vertical_flip , transform_cls_to_functional (transforms .RandomVerticalFlip , p = 1 )])
1421+ def test_bounding_box_correctness (self , format , fn ):
1422+ bounding_box = self ._make_input (datapoints .BoundingBox , format = format )
1423+
1424+ actual = fn (bounding_box )
1425+ expected = self ._reference_vertical_flip_bounding_box (bounding_box )
1426+
1427+ torch .testing .assert_close (actual , expected )
1428+
1429+ @pytest .mark .parametrize (
1430+ "input_type" ,
1431+ [torch .Tensor , PIL .Image .Image , datapoints .Image , datapoints .BoundingBox , datapoints .Mask , datapoints .Video ],
1432+ )
1433+ @pytest .mark .parametrize ("device" , cpu_and_cuda ())
1434+ def test_transform_noop (self , input_type , device ):
1435+ input = self ._make_input (input_type , device = device )
1436+
1437+ transform = transforms .RandomVerticalFlip (p = 0 )
1438+
1439+ output = transform (input )
1440+
1441+ assert_equal (output , input )
0 commit comments