1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253 |
- from __future__ import annotations
- from typing import Any, Optional, Union
- import PIL.Image
- import torch
- from ._tv_tensor import TVTensor
- class Image(TVTensor):
- """[BETA] :class:`torch.Tensor` subclass for images.
- .. note::
- In the :ref:`transforms <transforms>`, ``Image`` instances are largely
- interchangeable with pure :class:`torch.Tensor`. See
- :ref:`this note <passthrough_heuristic>` for more details.
- Args:
- data (tensor-like, PIL.Image.Image): Any data that can be turned into a tensor with :func:`torch.as_tensor` as
- well as PIL images.
- dtype (torch.dtype, optional): Desired data type. If omitted, will be inferred from
- ``data``.
- device (torch.device, optional): Desired device. If omitted and ``data`` is a
- :class:`torch.Tensor`, the device is taken from it. Otherwise, the image is constructed on the CPU.
- requires_grad (bool, optional): Whether autograd should record operations. If omitted and
- ``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
- """
- def __new__(
- cls,
- data: Any,
- *,
- dtype: Optional[torch.dtype] = None,
- device: Optional[Union[torch.device, str, int]] = None,
- requires_grad: Optional[bool] = None,
- ) -> Image:
- if isinstance(data, PIL.Image.Image):
- from torchvision.transforms.v2 import functional as F
- data = F.pil_to_tensor(data)
- tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
- if tensor.ndim < 2:
- raise ValueError
- elif tensor.ndim == 2:
- tensor = tensor.unsqueeze(0)
- return tensor.as_subclass(cls)
- def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
- return self._make_repr()
|