transforms_v2_legacy_utils.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633
  1. """
  2. As the name implies, these are legacy utilities that are hopefully removed soon. The future of
  3. transforms v2 testing is in test/test_transforms_v2_refactored.py. All new test should be
  4. implemented there and must not use any of the utilities here.
  5. The following legacy modules depend on this module
  6. - transforms_v2_kernel_infos.py
  7. - transforms_v2_dispatcher_infos.py
  8. - test_transforms_v2_functional.py
  9. - test_transforms_v2_consistency.py
  10. - test_transforms.py
  11. When all the logic is ported from the files above to test_transforms_v2_refactored.py, delete
  12. all the legacy modules including this one and drop the _refactored prefix from the name.
  13. """
  14. import collections.abc
  15. import dataclasses
  16. import enum
  17. import itertools
  18. import pathlib
  19. from collections import defaultdict
  20. from typing import Callable, Sequence, Tuple, Union
  21. import PIL.Image
  22. import pytest
  23. import torch
  24. from torchvision import tv_tensors
  25. from torchvision.transforms._functional_tensor import _max_value as get_max_value
  26. from torchvision.transforms.v2.functional import to_dtype_image, to_image, to_pil_image
  27. def combinations_grid(**kwargs):
  28. """Creates a grid of input combinations.
  29. Each element in the returned sequence is a dictionary containing one possible combination as values.
  30. Example:
  31. >>> combinations_grid(foo=("bar", "baz"), spam=("eggs", "ham"))
  32. [
  33. {'foo': 'bar', 'spam': 'eggs'},
  34. {'foo': 'bar', 'spam': 'ham'},
  35. {'foo': 'baz', 'spam': 'eggs'},
  36. {'foo': 'baz', 'spam': 'ham'}
  37. ]
  38. """
  39. return [dict(zip(kwargs.keys(), values)) for values in itertools.product(*kwargs.values())]
  40. DEFAULT_SIZE = (17, 11)
  41. NUM_CHANNELS_MAP = {
  42. "GRAY": 1,
  43. "GRAY_ALPHA": 2,
  44. "RGB": 3,
  45. "RGBA": 4,
  46. }
  47. def make_image(
  48. size=DEFAULT_SIZE,
  49. *,
  50. color_space="RGB",
  51. batch_dims=(),
  52. dtype=None,
  53. device="cpu",
  54. memory_format=torch.contiguous_format,
  55. ):
  56. num_channels = NUM_CHANNELS_MAP[color_space]
  57. dtype = dtype or torch.uint8
  58. max_value = get_max_value(dtype)
  59. data = torch.testing.make_tensor(
  60. (*batch_dims, num_channels, *size),
  61. low=0,
  62. high=max_value,
  63. dtype=dtype,
  64. device=device,
  65. memory_format=memory_format,
  66. )
  67. if color_space in {"GRAY_ALPHA", "RGBA"}:
  68. data[..., -1, :, :] = max_value
  69. return tv_tensors.Image(data)
  70. def make_image_tensor(*args, **kwargs):
  71. return make_image(*args, **kwargs).as_subclass(torch.Tensor)
  72. def make_image_pil(*args, **kwargs):
  73. return to_pil_image(make_image(*args, **kwargs))
  74. def make_bounding_boxes(
  75. canvas_size=DEFAULT_SIZE,
  76. *,
  77. format=tv_tensors.BoundingBoxFormat.XYXY,
  78. batch_dims=(),
  79. dtype=None,
  80. device="cpu",
  81. ):
  82. def sample_position(values, max_value):
  83. # We cannot use torch.randint directly here, because it only allows integer scalars as values for low and high.
  84. # However, if we have batch_dims, we need tensors as limits.
  85. return torch.stack([torch.randint(max_value - v, ()) for v in values.flatten().tolist()]).reshape(values.shape)
  86. if isinstance(format, str):
  87. format = tv_tensors.BoundingBoxFormat[format]
  88. dtype = dtype or torch.float32
  89. if any(dim == 0 for dim in batch_dims):
  90. return tv_tensors.BoundingBoxes(
  91. torch.empty(*batch_dims, 4, dtype=dtype, device=device), format=format, canvas_size=canvas_size
  92. )
  93. h, w = [torch.randint(1, c, batch_dims) for c in canvas_size]
  94. y = sample_position(h, canvas_size[0])
  95. x = sample_position(w, canvas_size[1])
  96. if format is tv_tensors.BoundingBoxFormat.XYWH:
  97. parts = (x, y, w, h)
  98. elif format is tv_tensors.BoundingBoxFormat.XYXY:
  99. x1, y1 = x, y
  100. x2 = x1 + w
  101. y2 = y1 + h
  102. parts = (x1, y1, x2, y2)
  103. elif format is tv_tensors.BoundingBoxFormat.CXCYWH:
  104. cx = x + w / 2
  105. cy = y + h / 2
  106. parts = (cx, cy, w, h)
  107. else:
  108. raise ValueError(f"Format {format} is not supported")
  109. return tv_tensors.BoundingBoxes(
  110. torch.stack(parts, dim=-1).to(dtype=dtype, device=device), format=format, canvas_size=canvas_size
  111. )
  112. def make_detection_mask(size=DEFAULT_SIZE, *, num_objects=5, batch_dims=(), dtype=None, device="cpu"):
  113. """Make a "detection" mask, i.e. (*, N, H, W), where each object is encoded as one of N boolean masks"""
  114. return tv_tensors.Mask(
  115. torch.testing.make_tensor(
  116. (*batch_dims, num_objects, *size),
  117. low=0,
  118. high=2,
  119. dtype=dtype or torch.bool,
  120. device=device,
  121. )
  122. )
  123. def make_segmentation_mask(size=DEFAULT_SIZE, *, num_categories=10, batch_dims=(), dtype=None, device="cpu"):
  124. """Make a "segmentation" mask, i.e. (*, H, W), where the category is encoded as pixel value"""
  125. return tv_tensors.Mask(
  126. torch.testing.make_tensor(
  127. (*batch_dims, *size),
  128. low=0,
  129. high=num_categories,
  130. dtype=dtype or torch.uint8,
  131. device=device,
  132. )
  133. )
  134. def make_video(size=DEFAULT_SIZE, *, num_frames=3, batch_dims=(), **kwargs):
  135. return tv_tensors.Video(make_image(size, batch_dims=(*batch_dims, num_frames), **kwargs))
  136. def make_video_tensor(*args, **kwargs):
  137. return make_video(*args, **kwargs).as_subclass(torch.Tensor)
  138. DEFAULT_SQUARE_SPATIAL_SIZE = 15
  139. DEFAULT_LANDSCAPE_SPATIAL_SIZE = (7, 33)
  140. DEFAULT_PORTRAIT_SPATIAL_SIZE = (31, 9)
  141. DEFAULT_SPATIAL_SIZES = (
  142. DEFAULT_LANDSCAPE_SPATIAL_SIZE,
  143. DEFAULT_PORTRAIT_SPATIAL_SIZE,
  144. DEFAULT_SQUARE_SPATIAL_SIZE,
  145. )
  146. def _parse_size(size, *, name="size"):
  147. if size == "random":
  148. raise ValueError("This should never happen")
  149. elif isinstance(size, int) and size > 0:
  150. return (size, size)
  151. elif (
  152. isinstance(size, collections.abc.Sequence)
  153. and len(size) == 2
  154. and all(isinstance(length, int) and length > 0 for length in size)
  155. ):
  156. return tuple(size)
  157. else:
  158. raise pytest.UsageError(
  159. f"'{name}' can either be `'random'`, a positive integer, or a sequence of two positive integers,"
  160. f"but got {size} instead."
  161. )
  162. def get_num_channels(color_space):
  163. num_channels = NUM_CHANNELS_MAP.get(color_space)
  164. if not num_channels:
  165. raise pytest.UsageError(f"Can't determine the number of channels for color space {color_space}")
  166. return num_channels
  167. VALID_EXTRA_DIMS = ((), (4,), (2, 3))
  168. DEGENERATE_BATCH_DIMS = ((0,), (5, 0), (0, 5))
  169. DEFAULT_EXTRA_DIMS = (*VALID_EXTRA_DIMS, *DEGENERATE_BATCH_DIMS)
  170. def from_loader(loader_fn):
  171. def wrapper(*args, **kwargs):
  172. device = kwargs.pop("device", "cpu")
  173. loader = loader_fn(*args, **kwargs)
  174. return loader.load(device)
  175. return wrapper
  176. def from_loaders(loaders_fn):
  177. def wrapper(*args, **kwargs):
  178. device = kwargs.pop("device", "cpu")
  179. loaders = loaders_fn(*args, **kwargs)
  180. for loader in loaders:
  181. yield loader.load(device)
  182. return wrapper
  183. @dataclasses.dataclass
  184. class TensorLoader:
  185. fn: Callable[[Sequence[int], torch.dtype, Union[str, torch.device]], torch.Tensor]
  186. shape: Sequence[int]
  187. dtype: torch.dtype
  188. def load(self, device):
  189. return self.fn(self.shape, self.dtype, device)
  190. @dataclasses.dataclass
  191. class ImageLoader(TensorLoader):
  192. spatial_size: Tuple[int, int] = dataclasses.field(init=False)
  193. num_channels: int = dataclasses.field(init=False)
  194. memory_format: torch.memory_format = torch.contiguous_format
  195. canvas_size: Tuple[int, int] = dataclasses.field(init=False)
  196. def __post_init__(self):
  197. self.spatial_size = self.canvas_size = self.shape[-2:]
  198. self.num_channels = self.shape[-3]
  199. def load(self, device):
  200. return self.fn(self.shape, self.dtype, device, memory_format=self.memory_format)
  201. def make_image_loader(
  202. size=DEFAULT_PORTRAIT_SPATIAL_SIZE,
  203. *,
  204. color_space="RGB",
  205. extra_dims=(),
  206. dtype=torch.float32,
  207. constant_alpha=True,
  208. memory_format=torch.contiguous_format,
  209. ):
  210. if not constant_alpha:
  211. raise ValueError("This should never happen")
  212. size = _parse_size(size)
  213. num_channels = get_num_channels(color_space)
  214. def fn(shape, dtype, device, memory_format):
  215. *batch_dims, _, height, width = shape
  216. return make_image(
  217. (height, width),
  218. color_space=color_space,
  219. batch_dims=batch_dims,
  220. dtype=dtype,
  221. device=device,
  222. memory_format=memory_format,
  223. )
  224. return ImageLoader(fn, shape=(*extra_dims, num_channels, *size), dtype=dtype, memory_format=memory_format)
  225. def make_image_loaders(
  226. *,
  227. sizes=DEFAULT_SPATIAL_SIZES,
  228. color_spaces=(
  229. "GRAY",
  230. "GRAY_ALPHA",
  231. "RGB",
  232. "RGBA",
  233. ),
  234. extra_dims=DEFAULT_EXTRA_DIMS,
  235. dtypes=(torch.float32, torch.float64, torch.uint8),
  236. constant_alpha=True,
  237. ):
  238. for params in combinations_grid(size=sizes, color_space=color_spaces, extra_dims=extra_dims, dtype=dtypes):
  239. yield make_image_loader(**params, constant_alpha=constant_alpha)
  240. make_images = from_loaders(make_image_loaders)
  241. def make_image_loader_for_interpolation(
  242. size=(233, 147), *, color_space="RGB", dtype=torch.uint8, memory_format=torch.contiguous_format
  243. ):
  244. size = _parse_size(size)
  245. num_channels = get_num_channels(color_space)
  246. def fn(shape, dtype, device, memory_format):
  247. height, width = shape[-2:]
  248. image_pil = (
  249. PIL.Image.open(pathlib.Path(__file__).parent / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg")
  250. .resize((width, height))
  251. .convert(
  252. {
  253. "GRAY": "L",
  254. "GRAY_ALPHA": "LA",
  255. "RGB": "RGB",
  256. "RGBA": "RGBA",
  257. }[color_space]
  258. )
  259. )
  260. image_tensor = to_image(image_pil)
  261. if memory_format == torch.contiguous_format:
  262. image_tensor = image_tensor.to(device=device, memory_format=memory_format, copy=True)
  263. else:
  264. image_tensor = image_tensor.to(device=device)
  265. image_tensor = to_dtype_image(image_tensor, dtype=dtype, scale=True)
  266. return tv_tensors.Image(image_tensor)
  267. return ImageLoader(fn, shape=(num_channels, *size), dtype=dtype, memory_format=memory_format)
  268. def make_image_loaders_for_interpolation(
  269. sizes=((233, 147),),
  270. color_spaces=("RGB",),
  271. dtypes=(torch.uint8,),
  272. memory_formats=(torch.contiguous_format, torch.channels_last),
  273. ):
  274. for params in combinations_grid(size=sizes, color_space=color_spaces, dtype=dtypes, memory_format=memory_formats):
  275. yield make_image_loader_for_interpolation(**params)
  276. @dataclasses.dataclass
  277. class BoundingBoxesLoader(TensorLoader):
  278. format: tv_tensors.BoundingBoxFormat
  279. spatial_size: Tuple[int, int]
  280. canvas_size: Tuple[int, int] = dataclasses.field(init=False)
  281. def __post_init__(self):
  282. self.canvas_size = self.spatial_size
  283. def make_bounding_box_loader(*, extra_dims=(), format, spatial_size=DEFAULT_PORTRAIT_SPATIAL_SIZE, dtype=torch.float32):
  284. if isinstance(format, str):
  285. format = tv_tensors.BoundingBoxFormat[format]
  286. spatial_size = _parse_size(spatial_size, name="spatial_size")
  287. def fn(shape, dtype, device):
  288. *batch_dims, num_coordinates = shape
  289. if num_coordinates != 4:
  290. raise pytest.UsageError()
  291. return make_bounding_boxes(
  292. format=format, canvas_size=spatial_size, batch_dims=batch_dims, dtype=dtype, device=device
  293. )
  294. return BoundingBoxesLoader(fn, shape=(*extra_dims[-1:], 4), dtype=dtype, format=format, spatial_size=spatial_size)
  295. def make_bounding_box_loaders(
  296. *,
  297. extra_dims=tuple(d for d in DEFAULT_EXTRA_DIMS if len(d) < 2),
  298. formats=tuple(tv_tensors.BoundingBoxFormat),
  299. spatial_size=DEFAULT_PORTRAIT_SPATIAL_SIZE,
  300. dtypes=(torch.float32, torch.float64, torch.int64),
  301. ):
  302. for params in combinations_grid(extra_dims=extra_dims, format=formats, dtype=dtypes):
  303. yield make_bounding_box_loader(**params, spatial_size=spatial_size)
  304. make_multiple_bounding_boxes = from_loaders(make_bounding_box_loaders)
  305. class MaskLoader(TensorLoader):
  306. pass
  307. def make_detection_mask_loader(size=DEFAULT_PORTRAIT_SPATIAL_SIZE, *, num_objects=5, extra_dims=(), dtype=torch.uint8):
  308. # This produces "detection" masks, i.e. `(*, N, H, W)`, where `N` denotes the number of objects
  309. size = _parse_size(size)
  310. def fn(shape, dtype, device):
  311. *batch_dims, num_objects, height, width = shape
  312. return make_detection_mask(
  313. (height, width), num_objects=num_objects, batch_dims=batch_dims, dtype=dtype, device=device
  314. )
  315. return MaskLoader(fn, shape=(*extra_dims, num_objects, *size), dtype=dtype)
  316. def make_detection_mask_loaders(
  317. sizes=DEFAULT_SPATIAL_SIZES,
  318. num_objects=(1, 0, 5),
  319. extra_dims=DEFAULT_EXTRA_DIMS,
  320. dtypes=(torch.uint8,),
  321. ):
  322. for params in combinations_grid(size=sizes, num_objects=num_objects, extra_dims=extra_dims, dtype=dtypes):
  323. yield make_detection_mask_loader(**params)
  324. make_detection_masks = from_loaders(make_detection_mask_loaders)
  325. def make_segmentation_mask_loader(
  326. size=DEFAULT_PORTRAIT_SPATIAL_SIZE, *, num_categories=10, extra_dims=(), dtype=torch.uint8
  327. ):
  328. # This produces "segmentation" masks, i.e. `(*, H, W)`, where the category is encoded in the values
  329. size = _parse_size(size)
  330. def fn(shape, dtype, device):
  331. *batch_dims, height, width = shape
  332. return make_segmentation_mask(
  333. (height, width), num_categories=num_categories, batch_dims=batch_dims, dtype=dtype, device=device
  334. )
  335. return MaskLoader(fn, shape=(*extra_dims, *size), dtype=dtype)
  336. def make_segmentation_mask_loaders(
  337. *,
  338. sizes=DEFAULT_SPATIAL_SIZES,
  339. num_categories=(1, 2, 10),
  340. extra_dims=DEFAULT_EXTRA_DIMS,
  341. dtypes=(torch.uint8,),
  342. ):
  343. for params in combinations_grid(size=sizes, num_categories=num_categories, extra_dims=extra_dims, dtype=dtypes):
  344. yield make_segmentation_mask_loader(**params)
  345. make_segmentation_masks = from_loaders(make_segmentation_mask_loaders)
  346. def make_mask_loaders(
  347. *,
  348. sizes=DEFAULT_SPATIAL_SIZES,
  349. num_objects=(1, 0, 5),
  350. num_categories=(1, 2, 10),
  351. extra_dims=DEFAULT_EXTRA_DIMS,
  352. dtypes=(torch.uint8,),
  353. ):
  354. yield from make_detection_mask_loaders(sizes=sizes, num_objects=num_objects, extra_dims=extra_dims, dtypes=dtypes)
  355. yield from make_segmentation_mask_loaders(
  356. sizes=sizes, num_categories=num_categories, extra_dims=extra_dims, dtypes=dtypes
  357. )
  358. make_masks = from_loaders(make_mask_loaders)
  359. class VideoLoader(ImageLoader):
  360. pass
  361. def make_video_loader(
  362. size=DEFAULT_PORTRAIT_SPATIAL_SIZE,
  363. *,
  364. color_space="RGB",
  365. num_frames=3,
  366. extra_dims=(),
  367. dtype=torch.uint8,
  368. ):
  369. size = _parse_size(size)
  370. def fn(shape, dtype, device, memory_format):
  371. *batch_dims, num_frames, _, height, width = shape
  372. return make_video(
  373. (height, width),
  374. num_frames=num_frames,
  375. batch_dims=batch_dims,
  376. color_space=color_space,
  377. dtype=dtype,
  378. device=device,
  379. memory_format=memory_format,
  380. )
  381. return VideoLoader(fn, shape=(*extra_dims, num_frames, get_num_channels(color_space), *size), dtype=dtype)
  382. def make_video_loaders(
  383. *,
  384. sizes=DEFAULT_SPATIAL_SIZES,
  385. color_spaces=(
  386. "GRAY",
  387. "RGB",
  388. ),
  389. num_frames=(1, 0, 3),
  390. extra_dims=DEFAULT_EXTRA_DIMS,
  391. dtypes=(torch.uint8, torch.float32, torch.float64),
  392. ):
  393. for params in combinations_grid(
  394. size=sizes, color_space=color_spaces, num_frames=num_frames, extra_dims=extra_dims, dtype=dtypes
  395. ):
  396. yield make_video_loader(**params)
  397. make_videos = from_loaders(make_video_loaders)
  398. class TestMark:
  399. def __init__(
  400. self,
  401. # Tuple of test class name and test function name that identifies the test the mark is applied to. If there is
  402. # no test class, i.e. a standalone test function, use `None`.
  403. test_id,
  404. # `pytest.mark.*` to apply, e.g. `pytest.mark.skip` or `pytest.mark.xfail`
  405. mark,
  406. *,
  407. # Callable, that will be passed an `ArgsKwargs` and should return a boolean to indicate if the mark will be
  408. # applied. If omitted, defaults to always apply.
  409. condition=None,
  410. ):
  411. self.test_id = test_id
  412. self.mark = mark
  413. self.condition = condition or (lambda args_kwargs: True)
  414. def mark_framework_limitation(test_id, reason, condition=None):
  415. # The purpose of this function is to have a single entry point for skip marks that are only there, because the test
  416. # framework cannot handle the kernel in general or a specific parameter combination.
  417. # As development progresses, we can change the `mark.skip` to `mark.xfail` from time to time to see if the skip is
  418. # still justified.
  419. # We don't want to use `mark.xfail` all the time, because that actually runs the test until an error happens. Thus,
  420. # we are wasting CI resources for no reason for most of the time
  421. return TestMark(test_id, pytest.mark.skip(reason=reason), condition=condition)
  422. class InfoBase:
  423. def __init__(
  424. self,
  425. *,
  426. # Identifier if the info that shows up the parametrization.
  427. id,
  428. # Test markers that will be (conditionally) applied to an `ArgsKwargs` parametrization.
  429. # See the `TestMark` class for details
  430. test_marks=None,
  431. # Additional parameters, e.g. `rtol=1e-3`, passed to `assert_close`. Keys are a 3-tuple of `test_id` (see
  432. # `TestMark`), the dtype, and the device.
  433. closeness_kwargs=None,
  434. ):
  435. self.id = id
  436. self.test_marks = test_marks or []
  437. test_marks_map = defaultdict(list)
  438. for test_mark in self.test_marks:
  439. test_marks_map[test_mark.test_id].append(test_mark)
  440. self._test_marks_map = dict(test_marks_map)
  441. self.closeness_kwargs = closeness_kwargs or dict()
  442. def get_marks(self, test_id, args_kwargs):
  443. return [
  444. test_mark.mark for test_mark in self._test_marks_map.get(test_id, []) if test_mark.condition(args_kwargs)
  445. ]
  446. def get_closeness_kwargs(self, test_id, *, dtype, device):
  447. if not (isinstance(test_id, tuple) and len(test_id) == 2):
  448. msg = "`test_id` should be a `Tuple[Optional[str], str]` denoting the test class and function name"
  449. if callable(test_id):
  450. msg += ". Did you forget to add the `test_id` fixture to parameters of the test?"
  451. else:
  452. msg += f", but got {test_id} instead."
  453. raise pytest.UsageError(msg)
  454. if isinstance(device, torch.device):
  455. device = device.type
  456. return self.closeness_kwargs.get((test_id, dtype, device), dict())
  457. class ArgsKwargs:
  458. def __init__(self, *args, **kwargs):
  459. self.args = args
  460. self.kwargs = kwargs
  461. def __iter__(self):
  462. yield self.args
  463. yield self.kwargs
  464. def load(self, device="cpu"):
  465. return ArgsKwargs(
  466. *(arg.load(device) if isinstance(arg, TensorLoader) else arg for arg in self.args),
  467. **{
  468. keyword: arg.load(device) if isinstance(arg, TensorLoader) else arg
  469. for keyword, arg in self.kwargs.items()
  470. },
  471. )
  472. def parametrized_error_message(*args, **kwargs):
  473. def to_str(obj):
  474. if isinstance(obj, torch.Tensor) and obj.numel() > 30:
  475. return f"tensor(shape={list(obj.shape)}, dtype={obj.dtype}, device={obj.device})"
  476. elif isinstance(obj, enum.Enum):
  477. return f"{type(obj).__name__}.{obj.name}"
  478. else:
  479. return repr(obj)
  480. if args or kwargs:
  481. postfix = "\n".join(
  482. [
  483. "",
  484. "Failure happened for the following parameters:",
  485. "",
  486. *[to_str(arg) for arg in args],
  487. *[f"{name}={to_str(kwarg)}" for name, kwarg in kwargs.items()],
  488. ]
  489. )
  490. else:
  491. postfix = ""
  492. def wrapper(msg):
  493. return msg + postfix
  494. return wrapper