_image.py 1.9 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253
  1. from __future__ import annotations
  2. from typing import Any, Optional, Union
  3. import PIL.Image
  4. import torch
  5. from ._tv_tensor import TVTensor
  6. class Image(TVTensor):
  7. """[BETA] :class:`torch.Tensor` subclass for images.
  8. .. note::
  9. In the :ref:`transforms <transforms>`, ``Image`` instances are largely
  10. interchangeable with pure :class:`torch.Tensor`. See
  11. :ref:`this note <passthrough_heuristic>` for more details.
  12. Args:
  13. data (tensor-like, PIL.Image.Image): Any data that can be turned into a tensor with :func:`torch.as_tensor` as
  14. well as PIL images.
  15. dtype (torch.dtype, optional): Desired data type. If omitted, will be inferred from
  16. ``data``.
  17. device (torch.device, optional): Desired device. If omitted and ``data`` is a
  18. :class:`torch.Tensor`, the device is taken from it. Otherwise, the image is constructed on the CPU.
  19. requires_grad (bool, optional): Whether autograd should record operations. If omitted and
  20. ``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
  21. """
  22. def __new__(
  23. cls,
  24. data: Any,
  25. *,
  26. dtype: Optional[torch.dtype] = None,
  27. device: Optional[Union[torch.device, str, int]] = None,
  28. requires_grad: Optional[bool] = None,
  29. ) -> Image:
  30. if isinstance(data, PIL.Image.Image):
  31. from torchvision.transforms.v2 import functional as F
  32. data = F.pil_to_tensor(data)
  33. tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
  34. if tensor.ndim < 2:
  35. raise ValueError
  36. elif tensor.ndim == 2:
  37. tensor = tensor.unsqueeze(0)
  38. return tensor.as_subclass(cls)
  39. def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
  40. return self._make_repr()