diff --git a/torchvision/transforms/v2/_type_conversion.py b/torchvision/transforms/v2/_type_conversion.py index dc4f79c9b19..e92c98e6cb3 100644 --- a/torchvision/transforms/v2/_type_conversion.py +++ b/torchvision/transforms/v2/_type_conversion.py @@ -44,23 +44,24 @@ def _transform( class ToPILImage(Transform): - """[BETA] Convert a tensor or an ndarray to PIL Image - this does not scale values. + """[BETA] Convert a tensor or an ndarray to PIL Image .. v2betastatus:: ToPILImage transform This transform does not support torchscript. Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape - H x W x C to a PIL Image while preserving the value range. + H x W x C to a PIL Image while adjusting the value range depending on the ``mode``. Args: mode (`PIL.Image mode`_): color space and pixel depth of input data (optional). If ``mode`` is ``None`` (default) there are some assumptions made about the input data: + - If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``. - If the input has 3 channels, the ``mode`` is assumed to be ``RGB``. - If the input has 2 channels, the ``mode`` is assumed to be ``LA``. - If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``, - ``short``). + ``short``). .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes """