v2_extras.py 3.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283
  1. """This file only exists to be lazy-imported and avoid V2-related import warnings when just using V1."""
  2. import torch
  3. from torchvision import tv_tensors
  4. from torchvision.transforms import v2
  5. class PadIfSmaller(v2.Transform):
  6. def __init__(self, size, fill=0):
  7. super().__init__()
  8. self.size = size
  9. self.fill = v2._utils._setup_fill_arg(fill)
  10. def _get_params(self, sample):
  11. _, height, width = v2._utils.query_chw(sample)
  12. padding = [0, 0, max(self.size - width, 0), max(self.size - height, 0)]
  13. needs_padding = any(padding)
  14. return dict(padding=padding, needs_padding=needs_padding)
  15. def _transform(self, inpt, params):
  16. if not params["needs_padding"]:
  17. return inpt
  18. fill = v2._utils._get_fill(self.fill, type(inpt))
  19. fill = v2._utils._convert_fill_arg(fill)
  20. return v2.functional.pad(inpt, padding=params["padding"], fill=fill)
  21. class CocoDetectionToVOCSegmentation(v2.Transform):
  22. """Turn samples from datasets.CocoDetection into the same format as VOCSegmentation.
  23. This is achieved in two steps:
  24. 1. COCO differentiates between 91 categories while VOC only supports 21, including background for both. Fortunately,
  25. the COCO categories are a superset of the VOC ones and thus can be mapped. Instances of the 70 categories not
  26. present in VOC are dropped and replaced by background.
  27. 2. COCO only offers detection masks, i.e. a (N, H, W) bool-ish tensor, where the truthy values in each individual
  28. mask denote the instance. However, a segmentation mask is a (H, W) integer tensor (typically torch.uint8), where
  29. the value of each pixel denotes the category it belongs to. The detection masks are merged into one segmentation
  30. mask while pixels that belong to multiple detection masks are marked as invalid.
  31. """
  32. COCO_TO_VOC_LABEL_MAP = dict(
  33. zip(
  34. [0, 5, 2, 16, 9, 44, 6, 3, 17, 62, 21, 67, 18, 19, 4, 1, 64, 20, 63, 7, 72],
  35. range(21),
  36. )
  37. )
  38. INVALID_VALUE = 255
  39. def _coco_detection_masks_to_voc_segmentation_mask(self, target):
  40. if "masks" not in target:
  41. return None
  42. instance_masks, instance_labels_coco = target["masks"], target["labels"]
  43. valid_labels_voc = [
  44. (idx, label_voc)
  45. for idx, label_coco in enumerate(instance_labels_coco.tolist())
  46. if (label_voc := self.COCO_TO_VOC_LABEL_MAP.get(label_coco)) is not None
  47. ]
  48. if not valid_labels_voc:
  49. return None
  50. valid_voc_category_idcs, instance_labels_voc = zip(*valid_labels_voc)
  51. instance_masks = instance_masks[list(valid_voc_category_idcs)].to(torch.uint8)
  52. instance_labels_voc = torch.tensor(instance_labels_voc, dtype=torch.uint8)
  53. # Calling `.max()` on the stacked detection masks works fine to separate background from foreground as long as
  54. # there is at most a single instance per pixel. Overlapping instances will be filtered out in the next step.
  55. segmentation_mask, _ = (instance_masks * instance_labels_voc.reshape(-1, 1, 1)).max(dim=0)
  56. segmentation_mask[instance_masks.sum(dim=0) > 1] = self.INVALID_VALUE
  57. return segmentation_mask
  58. def forward(self, image, target):
  59. segmentation_mask = self._coco_detection_masks_to_voc_segmentation_mask(target)
  60. if segmentation_mask is None:
  61. segmentation_mask = torch.zeros(v2.functional.get_size(image), dtype=torch.uint8)
  62. return image, tv_tensors.Mask(segmentation_mask)