_misc.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421
  1. import warnings
  2. from typing import Any, Callable, cast, Dict, List, Optional, Sequence, Type, Union
  3. import PIL.Image
  4. import torch
  5. from torch.utils._pytree import tree_flatten, tree_unflatten
  6. from torchvision import transforms as _transforms, tv_tensors
  7. from torchvision.transforms.v2 import functional as F, Transform
  8. from ._utils import _parse_labels_getter, _setup_number_or_seq, _setup_size, get_bounding_boxes, has_any, is_pure_tensor
  9. # TODO: do we want/need to expose this?
  10. class Identity(Transform):
  11. def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
  12. return inpt
  13. class Lambda(Transform):
  14. """[BETA] Apply a user-defined function as a transform.
  15. .. v2betastatus:: Lambda transform
  16. This transform does not support torchscript.
  17. Args:
  18. lambd (function): Lambda/function to be used for transform.
  19. """
  20. _transformed_types = (object,)
  21. def __init__(self, lambd: Callable[[Any], Any], *types: Type):
  22. super().__init__()
  23. self.lambd = lambd
  24. self.types = types or self._transformed_types
  25. def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
  26. if isinstance(inpt, self.types):
  27. return self.lambd(inpt)
  28. else:
  29. return inpt
  30. def extra_repr(self) -> str:
  31. extras = []
  32. name = getattr(self.lambd, "__name__", None)
  33. if name:
  34. extras.append(name)
  35. extras.append(f"types={[type.__name__ for type in self.types]}")
  36. return ", ".join(extras)
  37. class LinearTransformation(Transform):
  38. """[BETA] Transform a tensor image or video with a square transformation matrix and a mean_vector computed offline.
  39. .. v2betastatus:: LinearTransformation transform
  40. This transform does not support PIL Image.
  41. Given transformation_matrix and mean_vector, will flatten the torch.*Tensor and
  42. subtract mean_vector from it which is then followed by computing the dot
  43. product with the transformation matrix and then reshaping the tensor to its
  44. original shape.
  45. Applications:
  46. whitening transformation: Suppose X is a column vector zero-centered data.
  47. Then compute the data covariance matrix [D x D] with torch.mm(X.t(), X),
  48. perform SVD on this matrix and pass it as transformation_matrix.
  49. Args:
  50. transformation_matrix (Tensor): tensor [D x D], D = C x H x W
  51. mean_vector (Tensor): tensor [D], D = C x H x W
  52. """
  53. _v1_transform_cls = _transforms.LinearTransformation
  54. _transformed_types = (is_pure_tensor, tv_tensors.Image, tv_tensors.Video)
  55. def __init__(self, transformation_matrix: torch.Tensor, mean_vector: torch.Tensor):
  56. super().__init__()
  57. if transformation_matrix.size(0) != transformation_matrix.size(1):
  58. raise ValueError(
  59. "transformation_matrix should be square. Got "
  60. f"{tuple(transformation_matrix.size())} rectangular matrix."
  61. )
  62. if mean_vector.size(0) != transformation_matrix.size(0):
  63. raise ValueError(
  64. f"mean_vector should have the same length {mean_vector.size(0)}"
  65. f" as any one of the dimensions of the transformation_matrix [{tuple(transformation_matrix.size())}]"
  66. )
  67. if transformation_matrix.device != mean_vector.device:
  68. raise ValueError(
  69. f"Input tensors should be on the same device. Got {transformation_matrix.device} and {mean_vector.device}"
  70. )
  71. if transformation_matrix.dtype != mean_vector.dtype:
  72. raise ValueError(
  73. f"Input tensors should have the same dtype. Got {transformation_matrix.dtype} and {mean_vector.dtype}"
  74. )
  75. self.transformation_matrix = transformation_matrix
  76. self.mean_vector = mean_vector
  77. def _check_inputs(self, sample: Any) -> Any:
  78. if has_any(sample, PIL.Image.Image):
  79. raise TypeError(f"{type(self).__name__}() does not support PIL images.")
  80. def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
  81. shape = inpt.shape
  82. n = shape[-3] * shape[-2] * shape[-1]
  83. if n != self.transformation_matrix.shape[0]:
  84. raise ValueError(
  85. "Input tensor and transformation matrix have incompatible shape."
  86. + f"[{shape[-3]} x {shape[-2]} x {shape[-1]}] != "
  87. + f"{self.transformation_matrix.shape[0]}"
  88. )
  89. if inpt.device.type != self.mean_vector.device.type:
  90. raise ValueError(
  91. "Input tensor should be on the same device as transformation matrix and mean vector. "
  92. f"Got {inpt.device} vs {self.mean_vector.device}"
  93. )
  94. flat_inpt = inpt.reshape(-1, n) - self.mean_vector
  95. transformation_matrix = self.transformation_matrix.to(flat_inpt.dtype)
  96. output = torch.mm(flat_inpt, transformation_matrix)
  97. output = output.reshape(shape)
  98. if isinstance(inpt, (tv_tensors.Image, tv_tensors.Video)):
  99. output = tv_tensors.wrap(output, like=inpt)
  100. return output
  101. class Normalize(Transform):
  102. """[BETA] Normalize a tensor image or video with mean and standard deviation.
  103. .. v2betastatus:: Normalize transform
  104. This transform does not support PIL Image.
  105. Given mean: ``(mean[1],...,mean[n])`` and std: ``(std[1],..,std[n])`` for ``n``
  106. channels, this transform will normalize each channel of the input
  107. ``torch.*Tensor`` i.e.,
  108. ``output[channel] = (input[channel] - mean[channel]) / std[channel]``
  109. .. note::
  110. This transform acts out of place, i.e., it does not mutate the input tensor.
  111. Args:
  112. mean (sequence): Sequence of means for each channel.
  113. std (sequence): Sequence of standard deviations for each channel.
  114. inplace(bool,optional): Bool to make this operation in-place.
  115. """
  116. _v1_transform_cls = _transforms.Normalize
  117. def __init__(self, mean: Sequence[float], std: Sequence[float], inplace: bool = False):
  118. super().__init__()
  119. self.mean = list(mean)
  120. self.std = list(std)
  121. self.inplace = inplace
  122. def _check_inputs(self, sample: Any) -> Any:
  123. if has_any(sample, PIL.Image.Image):
  124. raise TypeError(f"{type(self).__name__}() does not support PIL images.")
  125. def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
  126. return self._call_kernel(F.normalize, inpt, mean=self.mean, std=self.std, inplace=self.inplace)
  127. class GaussianBlur(Transform):
  128. """[BETA] Blurs image with randomly chosen Gaussian blur.
  129. .. v2betastatus:: GausssianBlur transform
  130. If the input is a Tensor, it is expected
  131. to have [..., C, H, W] shape, where ... means an arbitrary number of leading dimensions.
  132. Args:
  133. kernel_size (int or sequence): Size of the Gaussian kernel.
  134. sigma (float or tuple of float (min, max)): Standard deviation to be used for
  135. creating kernel to perform blurring. If float, sigma is fixed. If it is tuple
  136. of float (min, max), sigma is chosen uniformly at random to lie in the
  137. given range.
  138. """
  139. _v1_transform_cls = _transforms.GaussianBlur
  140. def __init__(
  141. self, kernel_size: Union[int, Sequence[int]], sigma: Union[int, float, Sequence[float]] = (0.1, 2.0)
  142. ) -> None:
  143. super().__init__()
  144. self.kernel_size = _setup_size(kernel_size, "Kernel size should be a tuple/list of two integers")
  145. for ks in self.kernel_size:
  146. if ks <= 0 or ks % 2 == 0:
  147. raise ValueError("Kernel size value should be an odd and positive number.")
  148. self.sigma = _setup_number_or_seq(sigma, "sigma")
  149. if not 0.0 < self.sigma[0] <= self.sigma[1]:
  150. raise ValueError(f"sigma values should be positive and of the form (min, max). Got {self.sigma}")
  151. def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
  152. sigma = torch.empty(1).uniform_(self.sigma[0], self.sigma[1]).item()
  153. return dict(sigma=[sigma, sigma])
  154. def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
  155. return self._call_kernel(F.gaussian_blur, inpt, self.kernel_size, **params)
  156. class ToDtype(Transform):
  157. """[BETA] Converts the input to a specific dtype, optionally scaling the values for images or videos.
  158. .. v2betastatus:: ToDtype transform
  159. .. note::
  160. ``ToDtype(dtype, scale=True)`` is the recommended replacement for ``ConvertImageDtype(dtype)``.
  161. Args:
  162. dtype (``torch.dtype`` or dict of ``TVTensor`` -> ``torch.dtype``): The dtype to convert to.
  163. If a ``torch.dtype`` is passed, e.g. ``torch.float32``, only images and videos will be converted
  164. to that dtype: this is for compatibility with :class:`~torchvision.transforms.v2.ConvertImageDtype`.
  165. A dict can be passed to specify per-tv_tensor conversions, e.g.
  166. ``dtype={tv_tensors.Image: torch.float32, tv_tensors.Mask: torch.int64, "others":None}``. The "others"
  167. key can be used as a catch-all for any other tv_tensor type, and ``None`` means no conversion.
  168. scale (bool, optional): Whether to scale the values for images or videos. See :ref:`range_and_dtype`.
  169. Default: ``False``.
  170. """
  171. _transformed_types = (torch.Tensor,)
  172. def __init__(
  173. self, dtype: Union[torch.dtype, Dict[Union[Type, str], Optional[torch.dtype]]], scale: bool = False
  174. ) -> None:
  175. super().__init__()
  176. if not isinstance(dtype, (dict, torch.dtype)):
  177. raise ValueError(f"dtype must be a dict or a torch.dtype, got {type(dtype)} instead")
  178. if (
  179. isinstance(dtype, dict)
  180. and torch.Tensor in dtype
  181. and any(cls in dtype for cls in [tv_tensors.Image, tv_tensors.Video])
  182. ):
  183. warnings.warn(
  184. "Got `dtype` values for `torch.Tensor` and either `tv_tensors.Image` or `tv_tensors.Video`. "
  185. "Note that a plain `torch.Tensor` will *not* be transformed by this (or any other transformation) "
  186. "in case a `tv_tensors.Image` or `tv_tensors.Video` is present in the input."
  187. )
  188. self.dtype = dtype
  189. self.scale = scale
  190. def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
  191. if isinstance(self.dtype, torch.dtype):
  192. # For consistency / BC with ConvertImageDtype, we only care about images or videos when dtype
  193. # is a simple torch.dtype
  194. if not is_pure_tensor(inpt) and not isinstance(inpt, (tv_tensors.Image, tv_tensors.Video)):
  195. return inpt
  196. dtype: Optional[torch.dtype] = self.dtype
  197. elif type(inpt) in self.dtype:
  198. dtype = self.dtype[type(inpt)]
  199. elif "others" in self.dtype:
  200. dtype = self.dtype["others"]
  201. else:
  202. raise ValueError(
  203. f"No dtype was specified for type {type(inpt)}. "
  204. "If you only need to convert the dtype of images or videos, you can just pass e.g. dtype=torch.float32. "
  205. "If you're passing a dict as dtype, "
  206. 'you can use "others" as a catch-all key '
  207. 'e.g. dtype={tv_tensors.Mask: torch.int64, "others": None} to pass-through the rest of the inputs.'
  208. )
  209. supports_scaling = is_pure_tensor(inpt) or isinstance(inpt, (tv_tensors.Image, tv_tensors.Video))
  210. if dtype is None:
  211. if self.scale and supports_scaling:
  212. warnings.warn(
  213. "scale was set to True but no dtype was specified for images or videos: no scaling will be done."
  214. )
  215. return inpt
  216. return self._call_kernel(F.to_dtype, inpt, dtype=dtype, scale=self.scale)
  217. class ConvertImageDtype(Transform):
  218. """[BETA] [DEPRECATED] Use ``v2.ToDtype(dtype, scale=True)`` instead.
  219. Convert input image to the given ``dtype`` and scale the values accordingly.
  220. .. v2betastatus:: ConvertImageDtype transform
  221. .. warning::
  222. Consider using ``ToDtype(dtype, scale=True)`` instead. See :class:`~torchvision.transforms.v2.ToDtype`.
  223. This function does not support PIL Image.
  224. Args:
  225. dtype (torch.dtype): Desired data type of the output
  226. .. note::
  227. When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
  228. If converted back and forth, this mismatch has no effect.
  229. Raises:
  230. RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as
  231. well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to
  232. overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
  233. of the integer ``dtype``.
  234. """
  235. _v1_transform_cls = _transforms.ConvertImageDtype
  236. def __init__(self, dtype: torch.dtype = torch.float32) -> None:
  237. super().__init__()
  238. self.dtype = dtype
  239. def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
  240. return self._call_kernel(F.to_dtype, inpt, dtype=self.dtype, scale=True)
  241. class SanitizeBoundingBoxes(Transform):
  242. """[BETA] Remove degenerate/invalid bounding boxes and their corresponding labels and masks.
  243. .. v2betastatus:: SanitizeBoundingBoxes transform
  244. This transform removes bounding boxes and their associated labels/masks that:
  245. - are below a given ``min_size``: by default this also removes degenerate boxes that have e.g. X2 <= X1.
  246. - have any coordinate outside of their corresponding image. You may want to
  247. call :class:`~torchvision.transforms.v2.ClampBoundingBoxes` first to avoid undesired removals.
  248. It is recommended to call it at the end of a pipeline, before passing the
  249. input to the models. It is critical to call this transform if
  250. :class:`~torchvision.transforms.v2.RandomIoUCrop` was called.
  251. If you want to be extra careful, you may call it after all transforms that
  252. may modify bounding boxes but once at the end should be enough in most
  253. cases.
  254. Args:
  255. min_size (float, optional) The size below which bounding boxes are removed. Default is 1.
  256. labels_getter (callable or str or None, optional): indicates how to identify the labels in the input.
  257. By default, this will try to find a "labels" key in the input (case-insensitive), if
  258. the input is a dict or it is a tuple whose second element is a dict.
  259. This heuristic should work well with a lot of datasets, including the built-in torchvision datasets.
  260. It can also be a callable that takes the same input
  261. as the transform, and returns the labels.
  262. """
  263. def __init__(
  264. self,
  265. min_size: float = 1.0,
  266. labels_getter: Union[Callable[[Any], Optional[torch.Tensor]], str, None] = "default",
  267. ) -> None:
  268. super().__init__()
  269. if min_size < 1:
  270. raise ValueError(f"min_size must be >= 1, got {min_size}.")
  271. self.min_size = min_size
  272. self.labels_getter = labels_getter
  273. self._labels_getter = _parse_labels_getter(labels_getter)
  274. def forward(self, *inputs: Any) -> Any:
  275. inputs = inputs if len(inputs) > 1 else inputs[0]
  276. labels = self._labels_getter(inputs)
  277. if labels is not None and not isinstance(labels, torch.Tensor):
  278. raise ValueError(
  279. f"The labels in the input to forward() must be a tensor or None, got {type(labels)} instead."
  280. )
  281. flat_inputs, spec = tree_flatten(inputs)
  282. boxes = get_bounding_boxes(flat_inputs)
  283. if labels is not None and boxes.shape[0] != labels.shape[0]:
  284. raise ValueError(
  285. f"Number of boxes (shape={boxes.shape}) and number of labels (shape={labels.shape}) do not match."
  286. )
  287. boxes = cast(
  288. tv_tensors.BoundingBoxes,
  289. F.convert_bounding_box_format(
  290. boxes,
  291. new_format=tv_tensors.BoundingBoxFormat.XYXY,
  292. ),
  293. )
  294. ws, hs = boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1]
  295. valid = (ws >= self.min_size) & (hs >= self.min_size) & (boxes >= 0).all(dim=-1)
  296. # TODO: Do we really need to check for out of bounds here? All
  297. # transforms should be clamping anyway, so this should never happen?
  298. image_h, image_w = boxes.canvas_size
  299. valid &= (boxes[:, 0] <= image_w) & (boxes[:, 2] <= image_w)
  300. valid &= (boxes[:, 1] <= image_h) & (boxes[:, 3] <= image_h)
  301. params = dict(valid=valid.as_subclass(torch.Tensor), labels=labels)
  302. flat_outputs = [
  303. # Even-though it may look like we're transforming all inputs, we don't:
  304. # _transform() will only care about BoundingBoxeses and the labels
  305. self._transform(inpt, params)
  306. for inpt in flat_inputs
  307. ]
  308. return tree_unflatten(flat_outputs, spec)
  309. def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
  310. is_label = inpt is not None and inpt is params["labels"]
  311. is_bounding_boxes_or_mask = isinstance(inpt, (tv_tensors.BoundingBoxes, tv_tensors.Mask))
  312. if not (is_label or is_bounding_boxes_or_mask):
  313. return inpt
  314. output = inpt[params["valid"]]
  315. if is_label:
  316. return output
  317. return tv_tensors.wrap(output, like=inpt)