presets.py 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109
  1. import torch
  2. def get_modules(use_v2):
  3. # We need a protected import to avoid the V2 warning in case just V1 is used
  4. if use_v2:
  5. import torchvision.transforms.v2
  6. import torchvision.tv_tensors
  7. import v2_extras
  8. return torchvision.transforms.v2, torchvision.tv_tensors, v2_extras
  9. else:
  10. import transforms
  11. return transforms, None, None
  12. class SegmentationPresetTrain:
  13. def __init__(
  14. self,
  15. *,
  16. base_size,
  17. crop_size,
  18. hflip_prob=0.5,
  19. mean=(0.485, 0.456, 0.406),
  20. std=(0.229, 0.224, 0.225),
  21. backend="pil",
  22. use_v2=False,
  23. ):
  24. T, tv_tensors, v2_extras = get_modules(use_v2)
  25. transforms = []
  26. backend = backend.lower()
  27. if backend == "tv_tensor":
  28. transforms.append(T.ToImage())
  29. elif backend == "tensor":
  30. transforms.append(T.PILToTensor())
  31. elif backend != "pil":
  32. raise ValueError(f"backend can be 'tv_tensor', 'tensor' or 'pil', but got {backend}")
  33. transforms += [T.RandomResize(min_size=int(0.5 * base_size), max_size=int(2.0 * base_size))]
  34. if hflip_prob > 0:
  35. transforms += [T.RandomHorizontalFlip(hflip_prob)]
  36. if use_v2:
  37. # We need a custom pad transform here, since the padding we want to perform here is fundamentally
  38. # different from the padding in `RandomCrop` if `pad_if_needed=True`.
  39. transforms += [v2_extras.PadIfSmaller(crop_size, fill={tv_tensors.Mask: 255, "others": 0})]
  40. transforms += [T.RandomCrop(crop_size)]
  41. if backend == "pil":
  42. transforms += [T.PILToTensor()]
  43. if use_v2:
  44. img_type = tv_tensors.Image if backend == "tv_tensor" else torch.Tensor
  45. transforms += [
  46. T.ToDtype(dtype={img_type: torch.float32, tv_tensors.Mask: torch.int64, "others": None}, scale=True)
  47. ]
  48. else:
  49. # No need to explicitly convert masks as they're magically int64 already
  50. transforms += [T.ToDtype(torch.float, scale=True)]
  51. transforms += [T.Normalize(mean=mean, std=std)]
  52. if use_v2:
  53. transforms += [T.ToPureTensor()]
  54. self.transforms = T.Compose(transforms)
  55. def __call__(self, img, target):
  56. return self.transforms(img, target)
  57. class SegmentationPresetEval:
  58. def __init__(
  59. self, *, base_size, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), backend="pil", use_v2=False
  60. ):
  61. T, _, _ = get_modules(use_v2)
  62. transforms = []
  63. backend = backend.lower()
  64. if backend == "tensor":
  65. transforms += [T.PILToTensor()]
  66. elif backend == "tv_tensor":
  67. transforms += [T.ToImage()]
  68. elif backend != "pil":
  69. raise ValueError(f"backend can be 'tv_tensor', 'tensor' or 'pil', but got {backend}")
  70. if use_v2:
  71. transforms += [T.Resize(size=(base_size, base_size))]
  72. else:
  73. transforms += [T.RandomResize(min_size=base_size, max_size=base_size)]
  74. if backend == "pil":
  75. # Note: we could just convert to pure tensors even in v2?
  76. transforms += [T.ToImage() if use_v2 else T.PILToTensor()]
  77. transforms += [
  78. T.ToDtype(torch.float, scale=True),
  79. T.Normalize(mean=mean, std=std),
  80. ]
  81. if use_v2:
  82. transforms += [T.ToPureTensor()]
  83. self.transforms = T.Compose(transforms)
  84. def __call__(self, img, target):
  85. return self.transforms(img, target)