@@ -1424,7 +1424,8 @@ def five_crop(
14241424 if isinstance (inpt , torch .Tensor ):
14251425 output = five_crop_image_tensor (inpt , size )
14261426 if not torch .jit .is_scripting () and isinstance (inpt , (features .Image , features .Video )):
1427- output = tuple (type (inpt ).wrap_like (inpt , item ) for item in output ) # type: ignore[assignment,arg-type]
1427+ cls = type (inpt )
1428+ output = tuple (cls .wrap_like (inpt , item ) for item in output ) # type: ignore[assignment,arg-type]
14281429 return output
14291430 else : # isinstance(inpt, PIL.Image.Image):
14301431 return five_crop_image_pil (inpt , size )
@@ -1467,7 +1468,8 @@ def ten_crop(
14671468 if isinstance (inpt , torch .Tensor ):
14681469 output = ten_crop_image_tensor (inpt , size , vertical_flip = vertical_flip )
14691470 if not torch .jit .is_scripting () and isinstance (inpt , (features .Image , features .Video )):
1470- output = [type (inpt ).wrap_like (inpt , item ) for item in output ] # type: ignore[arg-type]
1471+ cls = type (inpt )
1472+ output = [cls .wrap_like (inpt , item ) for item in output ] # type: ignore[arg-type]
14711473 return output
14721474 else : # isinstance(inpt, PIL.Image.Image):
14731475 return ten_crop_image_pil (inpt , size , vertical_flip = vertical_flip )
0 commit comments