deeplabv3.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390
  1. from functools import partial
  2. from typing import Any, List, Optional
  3. import torch
  4. from torch import nn
  5. from torch.nn import functional as F
  6. from ...transforms._presets import SemanticSegmentation
  7. from .._api import register_model, Weights, WeightsEnum
  8. from .._meta import _VOC_CATEGORIES
  9. from .._utils import _ovewrite_value_param, handle_legacy_interface, IntermediateLayerGetter
  10. from ..mobilenetv3 import mobilenet_v3_large, MobileNet_V3_Large_Weights, MobileNetV3
  11. from ..resnet import ResNet, resnet101, ResNet101_Weights, resnet50, ResNet50_Weights
  12. from ._utils import _SimpleSegmentationModel
  13. from .fcn import FCNHead
  14. __all__ = [
  15. "DeepLabV3",
  16. "DeepLabV3_ResNet50_Weights",
  17. "DeepLabV3_ResNet101_Weights",
  18. "DeepLabV3_MobileNet_V3_Large_Weights",
  19. "deeplabv3_mobilenet_v3_large",
  20. "deeplabv3_resnet50",
  21. "deeplabv3_resnet101",
  22. ]
  23. class DeepLabV3(_SimpleSegmentationModel):
  24. """
  25. Implements DeepLabV3 model from
  26. `"Rethinking Atrous Convolution for Semantic Image Segmentation"
  27. <https://arxiv.org/abs/1706.05587>`_.
  28. Args:
  29. backbone (nn.Module): the network used to compute the features for the model.
  30. The backbone should return an OrderedDict[Tensor], with the key being
  31. "out" for the last feature map used, and "aux" if an auxiliary classifier
  32. is used.
  33. classifier (nn.Module): module that takes the "out" element returned from
  34. the backbone and returns a dense prediction.
  35. aux_classifier (nn.Module, optional): auxiliary classifier used during training
  36. """
  37. pass
  38. class DeepLabHead(nn.Sequential):
  39. def __init__(self, in_channels: int, num_classes: int) -> None:
  40. super().__init__(
  41. ASPP(in_channels, [12, 24, 36]),
  42. nn.Conv2d(256, 256, 3, padding=1, bias=False),
  43. nn.BatchNorm2d(256),
  44. nn.ReLU(),
  45. nn.Conv2d(256, num_classes, 1),
  46. )
  47. class ASPPConv(nn.Sequential):
  48. def __init__(self, in_channels: int, out_channels: int, dilation: int) -> None:
  49. modules = [
  50. nn.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False),
  51. nn.BatchNorm2d(out_channels),
  52. nn.ReLU(),
  53. ]
  54. super().__init__(*modules)
  55. class ASPPPooling(nn.Sequential):
  56. def __init__(self, in_channels: int, out_channels: int) -> None:
  57. super().__init__(
  58. nn.AdaptiveAvgPool2d(1),
  59. nn.Conv2d(in_channels, out_channels, 1, bias=False),
  60. nn.BatchNorm2d(out_channels),
  61. nn.ReLU(),
  62. )
  63. def forward(self, x: torch.Tensor) -> torch.Tensor:
  64. size = x.shape[-2:]
  65. for mod in self:
  66. x = mod(x)
  67. return F.interpolate(x, size=size, mode="bilinear", align_corners=False)
  68. class ASPP(nn.Module):
  69. def __init__(self, in_channels: int, atrous_rates: List[int], out_channels: int = 256) -> None:
  70. super().__init__()
  71. modules = []
  72. modules.append(
  73. nn.Sequential(nn.Conv2d(in_channels, out_channels, 1, bias=False), nn.BatchNorm2d(out_channels), nn.ReLU())
  74. )
  75. rates = tuple(atrous_rates)
  76. for rate in rates:
  77. modules.append(ASPPConv(in_channels, out_channels, rate))
  78. modules.append(ASPPPooling(in_channels, out_channels))
  79. self.convs = nn.ModuleList(modules)
  80. self.project = nn.Sequential(
  81. nn.Conv2d(len(self.convs) * out_channels, out_channels, 1, bias=False),
  82. nn.BatchNorm2d(out_channels),
  83. nn.ReLU(),
  84. nn.Dropout(0.5),
  85. )
  86. def forward(self, x: torch.Tensor) -> torch.Tensor:
  87. _res = []
  88. for conv in self.convs:
  89. _res.append(conv(x))
  90. res = torch.cat(_res, dim=1)
  91. return self.project(res)
  92. def _deeplabv3_resnet(
  93. backbone: ResNet,
  94. num_classes: int,
  95. aux: Optional[bool],
  96. ) -> DeepLabV3:
  97. return_layers = {"layer4": "out"}
  98. if aux:
  99. return_layers["layer3"] = "aux"
  100. backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)
  101. aux_classifier = FCNHead(1024, num_classes) if aux else None
  102. classifier = DeepLabHead(2048, num_classes)
  103. return DeepLabV3(backbone, classifier, aux_classifier)
  104. _COMMON_META = {
  105. "categories": _VOC_CATEGORIES,
  106. "min_size": (1, 1),
  107. "_docs": """
  108. These weights were trained on a subset of COCO, using only the 20 categories that are present in the Pascal VOC
  109. dataset.
  110. """,
  111. }
  112. class DeepLabV3_ResNet50_Weights(WeightsEnum):
  113. COCO_WITH_VOC_LABELS_V1 = Weights(
  114. url="https://download.pytorch.org/models/deeplabv3_resnet50_coco-cd0a2569.pth",
  115. transforms=partial(SemanticSegmentation, resize_size=520),
  116. meta={
  117. **_COMMON_META,
  118. "num_params": 42004074,
  119. "recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#deeplabv3_resnet50",
  120. "_metrics": {
  121. "COCO-val2017-VOC-labels": {
  122. "miou": 66.4,
  123. "pixel_acc": 92.4,
  124. }
  125. },
  126. "_ops": 178.722,
  127. "_file_size": 160.515,
  128. },
  129. )
  130. DEFAULT = COCO_WITH_VOC_LABELS_V1
  131. class DeepLabV3_ResNet101_Weights(WeightsEnum):
  132. COCO_WITH_VOC_LABELS_V1 = Weights(
  133. url="https://download.pytorch.org/models/deeplabv3_resnet101_coco-586e9e4e.pth",
  134. transforms=partial(SemanticSegmentation, resize_size=520),
  135. meta={
  136. **_COMMON_META,
  137. "num_params": 60996202,
  138. "recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#fcn_resnet101",
  139. "_metrics": {
  140. "COCO-val2017-VOC-labels": {
  141. "miou": 67.4,
  142. "pixel_acc": 92.4,
  143. }
  144. },
  145. "_ops": 258.743,
  146. "_file_size": 233.217,
  147. },
  148. )
  149. DEFAULT = COCO_WITH_VOC_LABELS_V1
  150. class DeepLabV3_MobileNet_V3_Large_Weights(WeightsEnum):
  151. COCO_WITH_VOC_LABELS_V1 = Weights(
  152. url="https://download.pytorch.org/models/deeplabv3_mobilenet_v3_large-fc3c493d.pth",
  153. transforms=partial(SemanticSegmentation, resize_size=520),
  154. meta={
  155. **_COMMON_META,
  156. "num_params": 11029328,
  157. "recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#deeplabv3_mobilenet_v3_large",
  158. "_metrics": {
  159. "COCO-val2017-VOC-labels": {
  160. "miou": 60.3,
  161. "pixel_acc": 91.2,
  162. }
  163. },
  164. "_ops": 10.452,
  165. "_file_size": 42.301,
  166. },
  167. )
  168. DEFAULT = COCO_WITH_VOC_LABELS_V1
  169. def _deeplabv3_mobilenetv3(
  170. backbone: MobileNetV3,
  171. num_classes: int,
  172. aux: Optional[bool],
  173. ) -> DeepLabV3:
  174. backbone = backbone.features
  175. # Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks.
  176. # The first and last blocks are always included because they are the C0 (conv1) and Cn.
  177. stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, "_is_cn", False)] + [len(backbone) - 1]
  178. out_pos = stage_indices[-1] # use C5 which has output_stride = 16
  179. out_inplanes = backbone[out_pos].out_channels
  180. aux_pos = stage_indices[-4] # use C2 here which has output_stride = 8
  181. aux_inplanes = backbone[aux_pos].out_channels
  182. return_layers = {str(out_pos): "out"}
  183. if aux:
  184. return_layers[str(aux_pos)] = "aux"
  185. backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)
  186. aux_classifier = FCNHead(aux_inplanes, num_classes) if aux else None
  187. classifier = DeepLabHead(out_inplanes, num_classes)
  188. return DeepLabV3(backbone, classifier, aux_classifier)
  189. @register_model()
  190. @handle_legacy_interface(
  191. weights=("pretrained", DeepLabV3_ResNet50_Weights.COCO_WITH_VOC_LABELS_V1),
  192. weights_backbone=("pretrained_backbone", ResNet50_Weights.IMAGENET1K_V1),
  193. )
  194. def deeplabv3_resnet50(
  195. *,
  196. weights: Optional[DeepLabV3_ResNet50_Weights] = None,
  197. progress: bool = True,
  198. num_classes: Optional[int] = None,
  199. aux_loss: Optional[bool] = None,
  200. weights_backbone: Optional[ResNet50_Weights] = ResNet50_Weights.IMAGENET1K_V1,
  201. **kwargs: Any,
  202. ) -> DeepLabV3:
  203. """Constructs a DeepLabV3 model with a ResNet-50 backbone.
  204. .. betastatus:: segmentation module
  205. Reference: `Rethinking Atrous Convolution for Semantic Image Segmentation <https://arxiv.org/abs/1706.05587>`__.
  206. Args:
  207. weights (:class:`~torchvision.models.segmentation.DeepLabV3_ResNet50_Weights`, optional): The
  208. pretrained weights to use. See
  209. :class:`~torchvision.models.segmentation.DeepLabV3_ResNet50_Weights` below for
  210. more details, and possible values. By default, no pre-trained
  211. weights are used.
  212. progress (bool, optional): If True, displays a progress bar of the
  213. download to stderr. Default is True.
  214. num_classes (int, optional): number of output classes of the model (including the background)
  215. aux_loss (bool, optional): If True, it uses an auxiliary loss
  216. weights_backbone (:class:`~torchvision.models.ResNet50_Weights`, optional): The pretrained weights for the
  217. backbone
  218. **kwargs: unused
  219. .. autoclass:: torchvision.models.segmentation.DeepLabV3_ResNet50_Weights
  220. :members:
  221. """
  222. weights = DeepLabV3_ResNet50_Weights.verify(weights)
  223. weights_backbone = ResNet50_Weights.verify(weights_backbone)
  224. if weights is not None:
  225. weights_backbone = None
  226. num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"]))
  227. aux_loss = _ovewrite_value_param("aux_loss", aux_loss, True)
  228. elif num_classes is None:
  229. num_classes = 21
  230. backbone = resnet50(weights=weights_backbone, replace_stride_with_dilation=[False, True, True])
  231. model = _deeplabv3_resnet(backbone, num_classes, aux_loss)
  232. if weights is not None:
  233. model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
  234. return model
  235. @register_model()
  236. @handle_legacy_interface(
  237. weights=("pretrained", DeepLabV3_ResNet101_Weights.COCO_WITH_VOC_LABELS_V1),
  238. weights_backbone=("pretrained_backbone", ResNet101_Weights.IMAGENET1K_V1),
  239. )
  240. def deeplabv3_resnet101(
  241. *,
  242. weights: Optional[DeepLabV3_ResNet101_Weights] = None,
  243. progress: bool = True,
  244. num_classes: Optional[int] = None,
  245. aux_loss: Optional[bool] = None,
  246. weights_backbone: Optional[ResNet101_Weights] = ResNet101_Weights.IMAGENET1K_V1,
  247. **kwargs: Any,
  248. ) -> DeepLabV3:
  249. """Constructs a DeepLabV3 model with a ResNet-101 backbone.
  250. .. betastatus:: segmentation module
  251. Reference: `Rethinking Atrous Convolution for Semantic Image Segmentation <https://arxiv.org/abs/1706.05587>`__.
  252. Args:
  253. weights (:class:`~torchvision.models.segmentation.DeepLabV3_ResNet101_Weights`, optional): The
  254. pretrained weights to use. See
  255. :class:`~torchvision.models.segmentation.DeepLabV3_ResNet101_Weights` below for
  256. more details, and possible values. By default, no pre-trained
  257. weights are used.
  258. progress (bool, optional): If True, displays a progress bar of the
  259. download to stderr. Default is True.
  260. num_classes (int, optional): number of output classes of the model (including the background)
  261. aux_loss (bool, optional): If True, it uses an auxiliary loss
  262. weights_backbone (:class:`~torchvision.models.ResNet101_Weights`, optional): The pretrained weights for the
  263. backbone
  264. **kwargs: unused
  265. .. autoclass:: torchvision.models.segmentation.DeepLabV3_ResNet101_Weights
  266. :members:
  267. """
  268. weights = DeepLabV3_ResNet101_Weights.verify(weights)
  269. weights_backbone = ResNet101_Weights.verify(weights_backbone)
  270. if weights is not None:
  271. weights_backbone = None
  272. num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"]))
  273. aux_loss = _ovewrite_value_param("aux_loss", aux_loss, True)
  274. elif num_classes is None:
  275. num_classes = 21
  276. backbone = resnet101(weights=weights_backbone, replace_stride_with_dilation=[False, True, True])
  277. model = _deeplabv3_resnet(backbone, num_classes, aux_loss)
  278. if weights is not None:
  279. model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
  280. return model
  281. @register_model()
  282. @handle_legacy_interface(
  283. weights=("pretrained", DeepLabV3_MobileNet_V3_Large_Weights.COCO_WITH_VOC_LABELS_V1),
  284. weights_backbone=("pretrained_backbone", MobileNet_V3_Large_Weights.IMAGENET1K_V1),
  285. )
  286. def deeplabv3_mobilenet_v3_large(
  287. *,
  288. weights: Optional[DeepLabV3_MobileNet_V3_Large_Weights] = None,
  289. progress: bool = True,
  290. num_classes: Optional[int] = None,
  291. aux_loss: Optional[bool] = None,
  292. weights_backbone: Optional[MobileNet_V3_Large_Weights] = MobileNet_V3_Large_Weights.IMAGENET1K_V1,
  293. **kwargs: Any,
  294. ) -> DeepLabV3:
  295. """Constructs a DeepLabV3 model with a MobileNetV3-Large backbone.
  296. Reference: `Rethinking Atrous Convolution for Semantic Image Segmentation <https://arxiv.org/abs/1706.05587>`__.
  297. Args:
  298. weights (:class:`~torchvision.models.segmentation.DeepLabV3_MobileNet_V3_Large_Weights`, optional): The
  299. pretrained weights to use. See
  300. :class:`~torchvision.models.segmentation.DeepLabV3_MobileNet_V3_Large_Weights` below for
  301. more details, and possible values. By default, no pre-trained
  302. weights are used.
  303. progress (bool, optional): If True, displays a progress bar of the
  304. download to stderr. Default is True.
  305. num_classes (int, optional): number of output classes of the model (including the background)
  306. aux_loss (bool, optional): If True, it uses an auxiliary loss
  307. weights_backbone (:class:`~torchvision.models.MobileNet_V3_Large_Weights`, optional): The pretrained weights
  308. for the backbone
  309. **kwargs: unused
  310. .. autoclass:: torchvision.models.segmentation.DeepLabV3_MobileNet_V3_Large_Weights
  311. :members:
  312. """
  313. weights = DeepLabV3_MobileNet_V3_Large_Weights.verify(weights)
  314. weights_backbone = MobileNet_V3_Large_Weights.verify(weights_backbone)
  315. if weights is not None:
  316. weights_backbone = None
  317. num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"]))
  318. aux_loss = _ovewrite_value_param("aux_loss", aux_loss, True)
  319. elif num_classes is None:
  320. num_classes = 21
  321. backbone = mobilenet_v3_large(weights=weights_backbone, dilated=True)
  322. model = _deeplabv3_mobilenetv3(backbone, num_classes, aux_loss)
  323. if weights is not None:
  324. model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
  325. return model