mask_rcnn.py 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587
  1. from collections import OrderedDict
  2. from typing import Any, Callable, Optional
  3. from torch import nn
  4. from torchvision.ops import MultiScaleRoIAlign
  5. from ...ops import misc as misc_nn_ops
  6. from ...transforms._presets import ObjectDetection
  7. from .._api import register_model, Weights, WeightsEnum
  8. from .._meta import _COCO_CATEGORIES
  9. from .._utils import _ovewrite_value_param, handle_legacy_interface
  10. from ..resnet import resnet50, ResNet50_Weights
  11. from ._utils import overwrite_eps
  12. from .backbone_utils import _resnet_fpn_extractor, _validate_trainable_layers
  13. from .faster_rcnn import _default_anchorgen, FasterRCNN, FastRCNNConvFCHead, RPNHead
  14. __all__ = [
  15. "MaskRCNN",
  16. "MaskRCNN_ResNet50_FPN_Weights",
  17. "MaskRCNN_ResNet50_FPN_V2_Weights",
  18. "maskrcnn_resnet50_fpn",
  19. "maskrcnn_resnet50_fpn_v2",
  20. ]
  21. class MaskRCNN(FasterRCNN):
  22. """
  23. Implements Mask R-CNN.
  24. The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each
  25. image, and should be in 0-1 range. Different images can have different sizes.
  26. The behavior of the model changes depending on if it is in training or evaluation mode.
  27. During training, the model expects both the input tensors and targets (list of dictionary),
  28. containing:
  29. - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with
  30. ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
  31. - labels (Int64Tensor[N]): the class label for each ground-truth box
  32. - masks (UInt8Tensor[N, H, W]): the segmentation binary masks for each instance
  33. The model returns a Dict[Tensor] during training, containing the classification and regression
  34. losses for both the RPN and the R-CNN, and the mask loss.
  35. During inference, the model requires only the input tensors, and returns the post-processed
  36. predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as
  37. follows:
  38. - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with
  39. ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
  40. - labels (Int64Tensor[N]): the predicted labels for each image
  41. - scores (Tensor[N]): the scores or each prediction
  42. - masks (UInt8Tensor[N, 1, H, W]): the predicted masks for each instance, in 0-1 range. In order to
  43. obtain the final segmentation masks, the soft masks can be thresholded, generally
  44. with a value of 0.5 (mask >= 0.5)
  45. Args:
  46. backbone (nn.Module): the network used to compute the features for the model.
  47. It should contain an out_channels attribute, which indicates the number of output
  48. channels that each feature map has (and it should be the same for all feature maps).
  49. The backbone should return a single Tensor or and OrderedDict[Tensor].
  50. num_classes (int): number of output classes of the model (including the background).
  51. If box_predictor is specified, num_classes should be None.
  52. min_size (int): minimum size of the image to be rescaled before feeding it to the backbone
  53. max_size (int): maximum size of the image to be rescaled before feeding it to the backbone
  54. image_mean (Tuple[float, float, float]): mean values used for input normalization.
  55. They are generally the mean values of the dataset on which the backbone has been trained
  56. on
  57. image_std (Tuple[float, float, float]): std values used for input normalization.
  58. They are generally the std values of the dataset on which the backbone has been trained on
  59. rpn_anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature
  60. maps.
  61. rpn_head (nn.Module): module that computes the objectness and regression deltas from the RPN
  62. rpn_pre_nms_top_n_train (int): number of proposals to keep before applying NMS during training
  63. rpn_pre_nms_top_n_test (int): number of proposals to keep before applying NMS during testing
  64. rpn_post_nms_top_n_train (int): number of proposals to keep after applying NMS during training
  65. rpn_post_nms_top_n_test (int): number of proposals to keep after applying NMS during testing
  66. rpn_nms_thresh (float): NMS threshold used for postprocessing the RPN proposals
  67. rpn_fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be
  68. considered as positive during training of the RPN.
  69. rpn_bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be
  70. considered as negative during training of the RPN.
  71. rpn_batch_size_per_image (int): number of anchors that are sampled during training of the RPN
  72. for computing the loss
  73. rpn_positive_fraction (float): proportion of positive anchors in a mini-batch during training
  74. of the RPN
  75. rpn_score_thresh (float): during inference, only return proposals with a classification score
  76. greater than rpn_score_thresh
  77. box_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in
  78. the locations indicated by the bounding boxes
  79. box_head (nn.Module): module that takes the cropped feature maps as input
  80. box_predictor (nn.Module): module that takes the output of box_head and returns the
  81. classification logits and box regression deltas.
  82. box_score_thresh (float): during inference, only return proposals with a classification score
  83. greater than box_score_thresh
  84. box_nms_thresh (float): NMS threshold for the prediction head. Used during inference
  85. box_detections_per_img (int): maximum number of detections per image, for all classes.
  86. box_fg_iou_thresh (float): minimum IoU between the proposals and the GT box so that they can be
  87. considered as positive during training of the classification head
  88. box_bg_iou_thresh (float): maximum IoU between the proposals and the GT box so that they can be
  89. considered as negative during training of the classification head
  90. box_batch_size_per_image (int): number of proposals that are sampled during training of the
  91. classification head
  92. box_positive_fraction (float): proportion of positive proposals in a mini-batch during training
  93. of the classification head
  94. bbox_reg_weights (Tuple[float, float, float, float]): weights for the encoding/decoding of the
  95. bounding boxes
  96. mask_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in
  97. the locations indicated by the bounding boxes, which will be used for the mask head.
  98. mask_head (nn.Module): module that takes the cropped feature maps as input
  99. mask_predictor (nn.Module): module that takes the output of the mask_head and returns the
  100. segmentation mask logits
  101. Example::
  102. >>> import torch
  103. >>> import torchvision
  104. >>> from torchvision.models.detection import MaskRCNN
  105. >>> from torchvision.models.detection.anchor_utils import AnchorGenerator
  106. >>>
  107. >>> # load a pre-trained model for classification and return
  108. >>> # only the features
  109. >>> backbone = torchvision.models.mobilenet_v2(weights=MobileNet_V2_Weights.DEFAULT).features
  110. >>> # MaskRCNN needs to know the number of
  111. >>> # output channels in a backbone. For mobilenet_v2, it's 1280
  112. >>> # so we need to add it here,
  113. >>> backbone.out_channels = 1280
  114. >>>
  115. >>> # let's make the RPN generate 5 x 3 anchors per spatial
  116. >>> # location, with 5 different sizes and 3 different aspect
  117. >>> # ratios. We have a Tuple[Tuple[int]] because each feature
  118. >>> # map could potentially have different sizes and
  119. >>> # aspect ratios
  120. >>> anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),
  121. >>> aspect_ratios=((0.5, 1.0, 2.0),))
  122. >>>
  123. >>> # let's define what are the feature maps that we will
  124. >>> # use to perform the region of interest cropping, as well as
  125. >>> # the size of the crop after rescaling.
  126. >>> # if your backbone returns a Tensor, featmap_names is expected to
  127. >>> # be ['0']. More generally, the backbone should return an
  128. >>> # OrderedDict[Tensor], and in featmap_names you can choose which
  129. >>> # feature maps to use.
  130. >>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],
  131. >>> output_size=7,
  132. >>> sampling_ratio=2)
  133. >>>
  134. >>> mask_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],
  135. >>> output_size=14,
  136. >>> sampling_ratio=2)
  137. >>> # put the pieces together inside a MaskRCNN model
  138. >>> model = MaskRCNN(backbone,
  139. >>> num_classes=2,
  140. >>> rpn_anchor_generator=anchor_generator,
  141. >>> box_roi_pool=roi_pooler,
  142. >>> mask_roi_pool=mask_roi_pooler)
  143. >>> model.eval()
  144. >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
  145. >>> predictions = model(x)
  146. """
  147. def __init__(
  148. self,
  149. backbone,
  150. num_classes=None,
  151. # transform parameters
  152. min_size=800,
  153. max_size=1333,
  154. image_mean=None,
  155. image_std=None,
  156. # RPN parameters
  157. rpn_anchor_generator=None,
  158. rpn_head=None,
  159. rpn_pre_nms_top_n_train=2000,
  160. rpn_pre_nms_top_n_test=1000,
  161. rpn_post_nms_top_n_train=2000,
  162. rpn_post_nms_top_n_test=1000,
  163. rpn_nms_thresh=0.7,
  164. rpn_fg_iou_thresh=0.7,
  165. rpn_bg_iou_thresh=0.3,
  166. rpn_batch_size_per_image=256,
  167. rpn_positive_fraction=0.5,
  168. rpn_score_thresh=0.0,
  169. # Box parameters
  170. box_roi_pool=None,
  171. box_head=None,
  172. box_predictor=None,
  173. box_score_thresh=0.05,
  174. box_nms_thresh=0.5,
  175. box_detections_per_img=100,
  176. box_fg_iou_thresh=0.5,
  177. box_bg_iou_thresh=0.5,
  178. box_batch_size_per_image=512,
  179. box_positive_fraction=0.25,
  180. bbox_reg_weights=None,
  181. # Mask parameters
  182. mask_roi_pool=None,
  183. mask_head=None,
  184. mask_predictor=None,
  185. **kwargs,
  186. ):
  187. if not isinstance(mask_roi_pool, (MultiScaleRoIAlign, type(None))):
  188. raise TypeError(
  189. f"mask_roi_pool should be of type MultiScaleRoIAlign or None instead of {type(mask_roi_pool)}"
  190. )
  191. if num_classes is not None:
  192. if mask_predictor is not None:
  193. raise ValueError("num_classes should be None when mask_predictor is specified")
  194. out_channels = backbone.out_channels
  195. if mask_roi_pool is None:
  196. mask_roi_pool = MultiScaleRoIAlign(featmap_names=["0", "1", "2", "3"], output_size=14, sampling_ratio=2)
  197. if mask_head is None:
  198. mask_layers = (256, 256, 256, 256)
  199. mask_dilation = 1
  200. mask_head = MaskRCNNHeads(out_channels, mask_layers, mask_dilation)
  201. if mask_predictor is None:
  202. mask_predictor_in_channels = 256 # == mask_layers[-1]
  203. mask_dim_reduced = 256
  204. mask_predictor = MaskRCNNPredictor(mask_predictor_in_channels, mask_dim_reduced, num_classes)
  205. super().__init__(
  206. backbone,
  207. num_classes,
  208. # transform parameters
  209. min_size,
  210. max_size,
  211. image_mean,
  212. image_std,
  213. # RPN-specific parameters
  214. rpn_anchor_generator,
  215. rpn_head,
  216. rpn_pre_nms_top_n_train,
  217. rpn_pre_nms_top_n_test,
  218. rpn_post_nms_top_n_train,
  219. rpn_post_nms_top_n_test,
  220. rpn_nms_thresh,
  221. rpn_fg_iou_thresh,
  222. rpn_bg_iou_thresh,
  223. rpn_batch_size_per_image,
  224. rpn_positive_fraction,
  225. rpn_score_thresh,
  226. # Box parameters
  227. box_roi_pool,
  228. box_head,
  229. box_predictor,
  230. box_score_thresh,
  231. box_nms_thresh,
  232. box_detections_per_img,
  233. box_fg_iou_thresh,
  234. box_bg_iou_thresh,
  235. box_batch_size_per_image,
  236. box_positive_fraction,
  237. bbox_reg_weights,
  238. **kwargs,
  239. )
  240. self.roi_heads.mask_roi_pool = mask_roi_pool
  241. self.roi_heads.mask_head = mask_head
  242. self.roi_heads.mask_predictor = mask_predictor
  243. class MaskRCNNHeads(nn.Sequential):
  244. _version = 2
  245. def __init__(self, in_channels, layers, dilation, norm_layer: Optional[Callable[..., nn.Module]] = None):
  246. """
  247. Args:
  248. in_channels (int): number of input channels
  249. layers (list): feature dimensions of each FCN layer
  250. dilation (int): dilation rate of kernel
  251. norm_layer (callable, optional): Module specifying the normalization layer to use. Default: None
  252. """
  253. blocks = []
  254. next_feature = in_channels
  255. for layer_features in layers:
  256. blocks.append(
  257. misc_nn_ops.Conv2dNormActivation(
  258. next_feature,
  259. layer_features,
  260. kernel_size=3,
  261. stride=1,
  262. padding=dilation,
  263. dilation=dilation,
  264. norm_layer=norm_layer,
  265. )
  266. )
  267. next_feature = layer_features
  268. super().__init__(*blocks)
  269. for layer in self.modules():
  270. if isinstance(layer, nn.Conv2d):
  271. nn.init.kaiming_normal_(layer.weight, mode="fan_out", nonlinearity="relu")
  272. if layer.bias is not None:
  273. nn.init.zeros_(layer.bias)
  274. def _load_from_state_dict(
  275. self,
  276. state_dict,
  277. prefix,
  278. local_metadata,
  279. strict,
  280. missing_keys,
  281. unexpected_keys,
  282. error_msgs,
  283. ):
  284. version = local_metadata.get("version", None)
  285. if version is None or version < 2:
  286. num_blocks = len(self)
  287. for i in range(num_blocks):
  288. for type in ["weight", "bias"]:
  289. old_key = f"{prefix}mask_fcn{i+1}.{type}"
  290. new_key = f"{prefix}{i}.0.{type}"
  291. if old_key in state_dict:
  292. state_dict[new_key] = state_dict.pop(old_key)
  293. super()._load_from_state_dict(
  294. state_dict,
  295. prefix,
  296. local_metadata,
  297. strict,
  298. missing_keys,
  299. unexpected_keys,
  300. error_msgs,
  301. )
  302. class MaskRCNNPredictor(nn.Sequential):
  303. def __init__(self, in_channels, dim_reduced, num_classes):
  304. super().__init__(
  305. OrderedDict(
  306. [
  307. ("conv5_mask", nn.ConvTranspose2d(in_channels, dim_reduced, 2, 2, 0)),
  308. ("relu", nn.ReLU(inplace=True)),
  309. ("mask_fcn_logits", nn.Conv2d(dim_reduced, num_classes, 1, 1, 0)),
  310. ]
  311. )
  312. )
  313. for name, param in self.named_parameters():
  314. if "weight" in name:
  315. nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
  316. # elif "bias" in name:
  317. # nn.init.constant_(param, 0)
  318. _COMMON_META = {
  319. "categories": _COCO_CATEGORIES,
  320. "min_size": (1, 1),
  321. }
  322. class MaskRCNN_ResNet50_FPN_Weights(WeightsEnum):
  323. COCO_V1 = Weights(
  324. url="https://download.pytorch.org/models/maskrcnn_resnet50_fpn_coco-bf2d0c1e.pth",
  325. transforms=ObjectDetection,
  326. meta={
  327. **_COMMON_META,
  328. "num_params": 44401393,
  329. "recipe": "https://github.com/pytorch/vision/tree/main/references/detection#mask-r-cnn",
  330. "_metrics": {
  331. "COCO-val2017": {
  332. "box_map": 37.9,
  333. "mask_map": 34.6,
  334. }
  335. },
  336. "_ops": 134.38,
  337. "_file_size": 169.84,
  338. "_docs": """These weights were produced by following a similar training recipe as on the paper.""",
  339. },
  340. )
  341. DEFAULT = COCO_V1
  342. class MaskRCNN_ResNet50_FPN_V2_Weights(WeightsEnum):
  343. COCO_V1 = Weights(
  344. url="https://download.pytorch.org/models/maskrcnn_resnet50_fpn_v2_coco-73cbd019.pth",
  345. transforms=ObjectDetection,
  346. meta={
  347. **_COMMON_META,
  348. "num_params": 46359409,
  349. "recipe": "https://github.com/pytorch/vision/pull/5773",
  350. "_metrics": {
  351. "COCO-val2017": {
  352. "box_map": 47.4,
  353. "mask_map": 41.8,
  354. }
  355. },
  356. "_ops": 333.577,
  357. "_file_size": 177.219,
  358. "_docs": """These weights were produced using an enhanced training recipe to boost the model accuracy.""",
  359. },
  360. )
  361. DEFAULT = COCO_V1
  362. @register_model()
  363. @handle_legacy_interface(
  364. weights=("pretrained", MaskRCNN_ResNet50_FPN_Weights.COCO_V1),
  365. weights_backbone=("pretrained_backbone", ResNet50_Weights.IMAGENET1K_V1),
  366. )
  367. def maskrcnn_resnet50_fpn(
  368. *,
  369. weights: Optional[MaskRCNN_ResNet50_FPN_Weights] = None,
  370. progress: bool = True,
  371. num_classes: Optional[int] = None,
  372. weights_backbone: Optional[ResNet50_Weights] = ResNet50_Weights.IMAGENET1K_V1,
  373. trainable_backbone_layers: Optional[int] = None,
  374. **kwargs: Any,
  375. ) -> MaskRCNN:
  376. """Mask R-CNN model with a ResNet-50-FPN backbone from the `Mask R-CNN
  377. <https://arxiv.org/abs/1703.06870>`_ paper.
  378. .. betastatus:: detection module
  379. The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each
  380. image, and should be in ``0-1`` range. Different images can have different sizes.
  381. The behavior of the model changes depending on if it is in training or evaluation mode.
  382. During training, the model expects both the input tensors and targets (list of dictionary),
  383. containing:
  384. - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with
  385. ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
  386. - labels (``Int64Tensor[N]``): the class label for each ground-truth box
  387. - masks (``UInt8Tensor[N, H, W]``): the segmentation binary masks for each instance
  388. The model returns a ``Dict[Tensor]`` during training, containing the classification and regression
  389. losses for both the RPN and the R-CNN, and the mask loss.
  390. During inference, the model requires only the input tensors, and returns the post-processed
  391. predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as
  392. follows, where ``N`` is the number of detected instances:
  393. - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with
  394. ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
  395. - labels (``Int64Tensor[N]``): the predicted labels for each instance
  396. - scores (``Tensor[N]``): the scores or each instance
  397. - masks (``UInt8Tensor[N, 1, H, W]``): the predicted masks for each instance, in ``0-1`` range. In order to
  398. obtain the final segmentation masks, the soft masks can be thresholded, generally
  399. with a value of 0.5 (``mask >= 0.5``)
  400. For more details on the output and on how to plot the masks, you may refer to :ref:`instance_seg_output`.
  401. Mask R-CNN is exportable to ONNX for a fixed batch size with inputs images of fixed size.
  402. Example::
  403. >>> model = torchvision.models.detection.maskrcnn_resnet50_fpn(weights=MaskRCNN_ResNet50_FPN_Weights.DEFAULT)
  404. >>> model.eval()
  405. >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
  406. >>> predictions = model(x)
  407. >>>
  408. >>> # optionally, if you want to export the model to ONNX:
  409. >>> torch.onnx.export(model, x, "mask_rcnn.onnx", opset_version = 11)
  410. Args:
  411. weights (:class:`~torchvision.models.detection.MaskRCNN_ResNet50_FPN_Weights`, optional): The
  412. pretrained weights to use. See
  413. :class:`~torchvision.models.detection.MaskRCNN_ResNet50_FPN_Weights` below for
  414. more details, and possible values. By default, no pre-trained
  415. weights are used.
  416. progress (bool, optional): If True, displays a progress bar of the
  417. download to stderr. Default is True.
  418. num_classes (int, optional): number of output classes of the model (including the background)
  419. weights_backbone (:class:`~torchvision.models.ResNet50_Weights`, optional): The
  420. pretrained weights for the backbone.
  421. trainable_backbone_layers (int, optional): number of trainable (not frozen) layers starting from
  422. final block. Valid values are between 0 and 5, with 5 meaning all backbone layers are
  423. trainable. If ``None`` is passed (the default) this value is set to 3.
  424. **kwargs: parameters passed to the ``torchvision.models.detection.mask_rcnn.MaskRCNN``
  425. base class. Please refer to the `source code
  426. <https://github.com/pytorch/vision/blob/main/torchvision/models/detection/mask_rcnn.py>`_
  427. for more details about this class.
  428. .. autoclass:: torchvision.models.detection.MaskRCNN_ResNet50_FPN_Weights
  429. :members:
  430. """
  431. weights = MaskRCNN_ResNet50_FPN_Weights.verify(weights)
  432. weights_backbone = ResNet50_Weights.verify(weights_backbone)
  433. if weights is not None:
  434. weights_backbone = None
  435. num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"]))
  436. elif num_classes is None:
  437. num_classes = 91
  438. is_trained = weights is not None or weights_backbone is not None
  439. trainable_backbone_layers = _validate_trainable_layers(is_trained, trainable_backbone_layers, 5, 3)
  440. norm_layer = misc_nn_ops.FrozenBatchNorm2d if is_trained else nn.BatchNorm2d
  441. backbone = resnet50(weights=weights_backbone, progress=progress, norm_layer=norm_layer)
  442. backbone = _resnet_fpn_extractor(backbone, trainable_backbone_layers)
  443. model = MaskRCNN(backbone, num_classes=num_classes, **kwargs)
  444. if weights is not None:
  445. model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
  446. if weights == MaskRCNN_ResNet50_FPN_Weights.COCO_V1:
  447. overwrite_eps(model, 0.0)
  448. return model
  449. @register_model()
  450. @handle_legacy_interface(
  451. weights=("pretrained", MaskRCNN_ResNet50_FPN_V2_Weights.COCO_V1),
  452. weights_backbone=("pretrained_backbone", ResNet50_Weights.IMAGENET1K_V1),
  453. )
  454. def maskrcnn_resnet50_fpn_v2(
  455. *,
  456. weights: Optional[MaskRCNN_ResNet50_FPN_V2_Weights] = None,
  457. progress: bool = True,
  458. num_classes: Optional[int] = None,
  459. weights_backbone: Optional[ResNet50_Weights] = None,
  460. trainable_backbone_layers: Optional[int] = None,
  461. **kwargs: Any,
  462. ) -> MaskRCNN:
  463. """Improved Mask R-CNN model with a ResNet-50-FPN backbone from the `Benchmarking Detection Transfer
  464. Learning with Vision Transformers <https://arxiv.org/abs/2111.11429>`_ paper.
  465. .. betastatus:: detection module
  466. :func:`~torchvision.models.detection.maskrcnn_resnet50_fpn` for more details.
  467. Args:
  468. weights (:class:`~torchvision.models.detection.MaskRCNN_ResNet50_FPN_V2_Weights`, optional): The
  469. pretrained weights to use. See
  470. :class:`~torchvision.models.detection.MaskRCNN_ResNet50_FPN_V2_Weights` below for
  471. more details, and possible values. By default, no pre-trained
  472. weights are used.
  473. progress (bool, optional): If True, displays a progress bar of the
  474. download to stderr. Default is True.
  475. num_classes (int, optional): number of output classes of the model (including the background)
  476. weights_backbone (:class:`~torchvision.models.ResNet50_Weights`, optional): The
  477. pretrained weights for the backbone.
  478. trainable_backbone_layers (int, optional): number of trainable (not frozen) layers starting from
  479. final block. Valid values are between 0 and 5, with 5 meaning all backbone layers are
  480. trainable. If ``None`` is passed (the default) this value is set to 3.
  481. **kwargs: parameters passed to the ``torchvision.models.detection.mask_rcnn.MaskRCNN``
  482. base class. Please refer to the `source code
  483. <https://github.com/pytorch/vision/blob/main/torchvision/models/detection/mask_rcnn.py>`_
  484. for more details about this class.
  485. .. autoclass:: torchvision.models.detection.MaskRCNN_ResNet50_FPN_V2_Weights
  486. :members:
  487. """
  488. weights = MaskRCNN_ResNet50_FPN_V2_Weights.verify(weights)
  489. weights_backbone = ResNet50_Weights.verify(weights_backbone)
  490. if weights is not None:
  491. weights_backbone = None
  492. num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"]))
  493. elif num_classes is None:
  494. num_classes = 91
  495. is_trained = weights is not None or weights_backbone is not None
  496. trainable_backbone_layers = _validate_trainable_layers(is_trained, trainable_backbone_layers, 5, 3)
  497. backbone = resnet50(weights=weights_backbone, progress=progress)
  498. backbone = _resnet_fpn_extractor(backbone, trainable_backbone_layers, norm_layer=nn.BatchNorm2d)
  499. rpn_anchor_generator = _default_anchorgen()
  500. rpn_head = RPNHead(backbone.out_channels, rpn_anchor_generator.num_anchors_per_location()[0], conv_depth=2)
  501. box_head = FastRCNNConvFCHead(
  502. (backbone.out_channels, 7, 7), [256, 256, 256, 256], [1024], norm_layer=nn.BatchNorm2d
  503. )
  504. mask_head = MaskRCNNHeads(backbone.out_channels, [256, 256, 256, 256], 1, norm_layer=nn.BatchNorm2d)
  505. model = MaskRCNN(
  506. backbone,
  507. num_classes=num_classes,
  508. rpn_anchor_generator=rpn_anchor_generator,
  509. rpn_head=rpn_head,
  510. box_head=box_head,
  511. mask_head=mask_head,
  512. **kwargs,
  513. )
  514. if weights is not None:
  515. model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
  516. return model