misc.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320
  1. import warnings
  2. from typing import Callable, List, Optional, Sequence, Tuple, Union
  3. import torch
  4. from torch import Tensor
  5. from ..utils import _log_api_usage_once, _make_ntuple
  6. interpolate = torch.nn.functional.interpolate
  7. class FrozenBatchNorm2d(torch.nn.Module):
  8. """
  9. BatchNorm2d where the batch statistics and the affine parameters are fixed
  10. Args:
  11. num_features (int): Number of features ``C`` from an expected input of size ``(N, C, H, W)``
  12. eps (float): a value added to the denominator for numerical stability. Default: 1e-5
  13. """
  14. def __init__(
  15. self,
  16. num_features: int,
  17. eps: float = 1e-5,
  18. ):
  19. super().__init__()
  20. _log_api_usage_once(self)
  21. self.eps = eps
  22. self.register_buffer("weight", torch.ones(num_features))
  23. self.register_buffer("bias", torch.zeros(num_features))
  24. self.register_buffer("running_mean", torch.zeros(num_features))
  25. self.register_buffer("running_var", torch.ones(num_features))
  26. def _load_from_state_dict(
  27. self,
  28. state_dict: dict,
  29. prefix: str,
  30. local_metadata: dict,
  31. strict: bool,
  32. missing_keys: List[str],
  33. unexpected_keys: List[str],
  34. error_msgs: List[str],
  35. ):
  36. num_batches_tracked_key = prefix + "num_batches_tracked"
  37. if num_batches_tracked_key in state_dict:
  38. del state_dict[num_batches_tracked_key]
  39. super()._load_from_state_dict(
  40. state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
  41. )
  42. def forward(self, x: Tensor) -> Tensor:
  43. # move reshapes to the beginning
  44. # to make it fuser-friendly
  45. w = self.weight.reshape(1, -1, 1, 1)
  46. b = self.bias.reshape(1, -1, 1, 1)
  47. rv = self.running_var.reshape(1, -1, 1, 1)
  48. rm = self.running_mean.reshape(1, -1, 1, 1)
  49. scale = w * (rv + self.eps).rsqrt()
  50. bias = b - rm * scale
  51. return x * scale + bias
  52. def __repr__(self) -> str:
  53. return f"{self.__class__.__name__}({self.weight.shape[0]}, eps={self.eps})"
  54. class ConvNormActivation(torch.nn.Sequential):
  55. def __init__(
  56. self,
  57. in_channels: int,
  58. out_channels: int,
  59. kernel_size: Union[int, Tuple[int, ...]] = 3,
  60. stride: Union[int, Tuple[int, ...]] = 1,
  61. padding: Optional[Union[int, Tuple[int, ...], str]] = None,
  62. groups: int = 1,
  63. norm_layer: Optional[Callable[..., torch.nn.Module]] = torch.nn.BatchNorm2d,
  64. activation_layer: Optional[Callable[..., torch.nn.Module]] = torch.nn.ReLU,
  65. dilation: Union[int, Tuple[int, ...]] = 1,
  66. inplace: Optional[bool] = True,
  67. bias: Optional[bool] = None,
  68. conv_layer: Callable[..., torch.nn.Module] = torch.nn.Conv2d,
  69. ) -> None:
  70. if padding is None:
  71. if isinstance(kernel_size, int) and isinstance(dilation, int):
  72. padding = (kernel_size - 1) // 2 * dilation
  73. else:
  74. _conv_dim = len(kernel_size) if isinstance(kernel_size, Sequence) else len(dilation)
  75. kernel_size = _make_ntuple(kernel_size, _conv_dim)
  76. dilation = _make_ntuple(dilation, _conv_dim)
  77. padding = tuple((kernel_size[i] - 1) // 2 * dilation[i] for i in range(_conv_dim))
  78. if bias is None:
  79. bias = norm_layer is None
  80. layers = [
  81. conv_layer(
  82. in_channels,
  83. out_channels,
  84. kernel_size,
  85. stride,
  86. padding,
  87. dilation=dilation,
  88. groups=groups,
  89. bias=bias,
  90. )
  91. ]
  92. if norm_layer is not None:
  93. layers.append(norm_layer(out_channels))
  94. if activation_layer is not None:
  95. params = {} if inplace is None else {"inplace": inplace}
  96. layers.append(activation_layer(**params))
  97. super().__init__(*layers)
  98. _log_api_usage_once(self)
  99. self.out_channels = out_channels
  100. if self.__class__ == ConvNormActivation:
  101. warnings.warn(
  102. "Don't use ConvNormActivation directly, please use Conv2dNormActivation and Conv3dNormActivation instead."
  103. )
  104. class Conv2dNormActivation(ConvNormActivation):
  105. """
  106. Configurable block used for Convolution2d-Normalization-Activation blocks.
  107. Args:
  108. in_channels (int): Number of channels in the input image
  109. out_channels (int): Number of channels produced by the Convolution-Normalization-Activation block
  110. kernel_size: (int, optional): Size of the convolving kernel. Default: 3
  111. stride (int, optional): Stride of the convolution. Default: 1
  112. padding (int, tuple or str, optional): Padding added to all four sides of the input. Default: None, in which case it will be calculated as ``padding = (kernel_size - 1) // 2 * dilation``
  113. groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
  114. norm_layer (Callable[..., torch.nn.Module], optional): Norm layer that will be stacked on top of the convolution layer. If ``None`` this layer won't be used. Default: ``torch.nn.BatchNorm2d``
  115. activation_layer (Callable[..., torch.nn.Module], optional): Activation function which will be stacked on top of the normalization layer (if not None), otherwise on top of the conv layer. If ``None`` this layer won't be used. Default: ``torch.nn.ReLU``
  116. dilation (int): Spacing between kernel elements. Default: 1
  117. inplace (bool): Parameter for the activation layer, which can optionally do the operation in-place. Default ``True``
  118. bias (bool, optional): Whether to use bias in the convolution layer. By default, biases are included if ``norm_layer is None``.
  119. """
  120. def __init__(
  121. self,
  122. in_channels: int,
  123. out_channels: int,
  124. kernel_size: Union[int, Tuple[int, int]] = 3,
  125. stride: Union[int, Tuple[int, int]] = 1,
  126. padding: Optional[Union[int, Tuple[int, int], str]] = None,
  127. groups: int = 1,
  128. norm_layer: Optional[Callable[..., torch.nn.Module]] = torch.nn.BatchNorm2d,
  129. activation_layer: Optional[Callable[..., torch.nn.Module]] = torch.nn.ReLU,
  130. dilation: Union[int, Tuple[int, int]] = 1,
  131. inplace: Optional[bool] = True,
  132. bias: Optional[bool] = None,
  133. ) -> None:
  134. super().__init__(
  135. in_channels,
  136. out_channels,
  137. kernel_size,
  138. stride,
  139. padding,
  140. groups,
  141. norm_layer,
  142. activation_layer,
  143. dilation,
  144. inplace,
  145. bias,
  146. torch.nn.Conv2d,
  147. )
  148. class Conv3dNormActivation(ConvNormActivation):
  149. """
  150. Configurable block used for Convolution3d-Normalization-Activation blocks.
  151. Args:
  152. in_channels (int): Number of channels in the input video.
  153. out_channels (int): Number of channels produced by the Convolution-Normalization-Activation block
  154. kernel_size: (int, optional): Size of the convolving kernel. Default: 3
  155. stride (int, optional): Stride of the convolution. Default: 1
  156. padding (int, tuple or str, optional): Padding added to all four sides of the input. Default: None, in which case it will be calculated as ``padding = (kernel_size - 1) // 2 * dilation``
  157. groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
  158. norm_layer (Callable[..., torch.nn.Module], optional): Norm layer that will be stacked on top of the convolution layer. If ``None`` this layer won't be used. Default: ``torch.nn.BatchNorm3d``
  159. activation_layer (Callable[..., torch.nn.Module], optional): Activation function which will be stacked on top of the normalization layer (if not None), otherwise on top of the conv layer. If ``None`` this layer won't be used. Default: ``torch.nn.ReLU``
  160. dilation (int): Spacing between kernel elements. Default: 1
  161. inplace (bool): Parameter for the activation layer, which can optionally do the operation in-place. Default ``True``
  162. bias (bool, optional): Whether to use bias in the convolution layer. By default, biases are included if ``norm_layer is None``.
  163. """
  164. def __init__(
  165. self,
  166. in_channels: int,
  167. out_channels: int,
  168. kernel_size: Union[int, Tuple[int, int, int]] = 3,
  169. stride: Union[int, Tuple[int, int, int]] = 1,
  170. padding: Optional[Union[int, Tuple[int, int, int], str]] = None,
  171. groups: int = 1,
  172. norm_layer: Optional[Callable[..., torch.nn.Module]] = torch.nn.BatchNorm3d,
  173. activation_layer: Optional[Callable[..., torch.nn.Module]] = torch.nn.ReLU,
  174. dilation: Union[int, Tuple[int, int, int]] = 1,
  175. inplace: Optional[bool] = True,
  176. bias: Optional[bool] = None,
  177. ) -> None:
  178. super().__init__(
  179. in_channels,
  180. out_channels,
  181. kernel_size,
  182. stride,
  183. padding,
  184. groups,
  185. norm_layer,
  186. activation_layer,
  187. dilation,
  188. inplace,
  189. bias,
  190. torch.nn.Conv3d,
  191. )
  192. class SqueezeExcitation(torch.nn.Module):
  193. """
  194. This block implements the Squeeze-and-Excitation block from https://arxiv.org/abs/1709.01507 (see Fig. 1).
  195. Parameters ``activation``, and ``scale_activation`` correspond to ``delta`` and ``sigma`` in eq. 3.
  196. Args:
  197. input_channels (int): Number of channels in the input image
  198. squeeze_channels (int): Number of squeeze channels
  199. activation (Callable[..., torch.nn.Module], optional): ``delta`` activation. Default: ``torch.nn.ReLU``
  200. scale_activation (Callable[..., torch.nn.Module]): ``sigma`` activation. Default: ``torch.nn.Sigmoid``
  201. """
  202. def __init__(
  203. self,
  204. input_channels: int,
  205. squeeze_channels: int,
  206. activation: Callable[..., torch.nn.Module] = torch.nn.ReLU,
  207. scale_activation: Callable[..., torch.nn.Module] = torch.nn.Sigmoid,
  208. ) -> None:
  209. super().__init__()
  210. _log_api_usage_once(self)
  211. self.avgpool = torch.nn.AdaptiveAvgPool2d(1)
  212. self.fc1 = torch.nn.Conv2d(input_channels, squeeze_channels, 1)
  213. self.fc2 = torch.nn.Conv2d(squeeze_channels, input_channels, 1)
  214. self.activation = activation()
  215. self.scale_activation = scale_activation()
  216. def _scale(self, input: Tensor) -> Tensor:
  217. scale = self.avgpool(input)
  218. scale = self.fc1(scale)
  219. scale = self.activation(scale)
  220. scale = self.fc2(scale)
  221. return self.scale_activation(scale)
  222. def forward(self, input: Tensor) -> Tensor:
  223. scale = self._scale(input)
  224. return scale * input
  225. class MLP(torch.nn.Sequential):
  226. """This block implements the multi-layer perceptron (MLP) module.
  227. Args:
  228. in_channels (int): Number of channels of the input
  229. hidden_channels (List[int]): List of the hidden channel dimensions
  230. norm_layer (Callable[..., torch.nn.Module], optional): Norm layer that will be stacked on top of the linear layer. If ``None`` this layer won't be used. Default: ``None``
  231. activation_layer (Callable[..., torch.nn.Module], optional): Activation function which will be stacked on top of the normalization layer (if not None), otherwise on top of the linear layer. If ``None`` this layer won't be used. Default: ``torch.nn.ReLU``
  232. inplace (bool, optional): Parameter for the activation layer, which can optionally do the operation in-place.
  233. Default is ``None``, which uses the respective default values of the ``activation_layer`` and Dropout layer.
  234. bias (bool): Whether to use bias in the linear layer. Default ``True``
  235. dropout (float): The probability for the dropout layer. Default: 0.0
  236. """
  237. def __init__(
  238. self,
  239. in_channels: int,
  240. hidden_channels: List[int],
  241. norm_layer: Optional[Callable[..., torch.nn.Module]] = None,
  242. activation_layer: Optional[Callable[..., torch.nn.Module]] = torch.nn.ReLU,
  243. inplace: Optional[bool] = None,
  244. bias: bool = True,
  245. dropout: float = 0.0,
  246. ):
  247. # The addition of `norm_layer` is inspired from the implementation of TorchMultimodal:
  248. # https://github.com/facebookresearch/multimodal/blob/5dec8a/torchmultimodal/modules/layers/mlp.py
  249. params = {} if inplace is None else {"inplace": inplace}
  250. layers = []
  251. in_dim = in_channels
  252. for hidden_dim in hidden_channels[:-1]:
  253. layers.append(torch.nn.Linear(in_dim, hidden_dim, bias=bias))
  254. if norm_layer is not None:
  255. layers.append(norm_layer(hidden_dim))
  256. layers.append(activation_layer(**params))
  257. layers.append(torch.nn.Dropout(dropout, **params))
  258. in_dim = hidden_dim
  259. layers.append(torch.nn.Linear(in_dim, hidden_channels[-1], bias=bias))
  260. layers.append(torch.nn.Dropout(dropout, **params))
  261. super().__init__(*layers)
  262. _log_api_usage_once(self)
  263. class Permute(torch.nn.Module):
  264. """This module returns a view of the tensor input with its dimensions permuted.
  265. Args:
  266. dims (List[int]): The desired ordering of dimensions
  267. """
  268. def __init__(self, dims: List[int]):
  269. super().__init__()
  270. self.dims = dims
  271. def forward(self, x: Tensor) -> Tensor:
  272. return torch.permute(x, self.dims)