pooling.py 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235
  1. from typing import List, Optional
  2. from torch import Tensor
  3. from .module import Module
  4. from .utils import _single, _pair, _triple
  5. from .. import functional as F
  6. from ..common_types import (_size_any_t, _size_1_t, _size_2_t, _size_3_t,
  7. _ratio_3_t, _ratio_2_t, _size_any_opt_t, _size_2_opt_t, _size_3_opt_t)
  8. __all__ = ['MaxPool1d', 'MaxPool2d', 'MaxPool3d', 'MaxUnpool1d', 'MaxUnpool2d', 'MaxUnpool3d',
  9. 'AvgPool1d', 'AvgPool2d', 'AvgPool3d', 'FractionalMaxPool2d', 'FractionalMaxPool3d', 'LPPool1d',
  10. 'LPPool2d', 'AdaptiveMaxPool1d', 'AdaptiveMaxPool2d', 'AdaptiveMaxPool3d', 'AdaptiveAvgPool1d',
  11. 'AdaptiveAvgPool2d', 'AdaptiveAvgPool3d']
  12. class _MaxPoolNd(Module):
  13. __constants__ = ['kernel_size', 'stride', 'padding', 'dilation',
  14. 'return_indices', 'ceil_mode']
  15. return_indices: bool
  16. ceil_mode: bool
  17. def __init__(self, kernel_size: _size_any_t, stride: Optional[_size_any_t] = None,
  18. padding: _size_any_t = 0, dilation: _size_any_t = 1,
  19. return_indices: bool = False, ceil_mode: bool = False) -> None:
  20. super().__init__()
  21. self.kernel_size = kernel_size
  22. self.stride = stride if (stride is not None) else kernel_size
  23. self.padding = padding
  24. self.dilation = dilation
  25. self.return_indices = return_indices
  26. self.ceil_mode = ceil_mode
  27. def extra_repr(self) -> str:
  28. return 'kernel_size={kernel_size}, stride={stride}, padding={padding}' \
  29. ', dilation={dilation}, ceil_mode={ceil_mode}'.format(**self.__dict__)
  30. class MaxPool1d(_MaxPoolNd):
  31. r"""Applies a 1D max pooling over an input signal composed of several input
  32. planes.
  33. In the simplest case, the output value of the layer with input size :math:`(N, C, L)`
  34. and output :math:`(N, C, L_{out})` can be precisely described as:
  35. .. math::
  36. out(N_i, C_j, k) = \max_{m=0, \ldots, \text{kernel\_size} - 1}
  37. input(N_i, C_j, stride \times k + m)
  38. If :attr:`padding` is non-zero, then the input is implicitly padded with negative infinity on both sides
  39. for :attr:`padding` number of points. :attr:`dilation` is the stride between the elements within the
  40. sliding window. This `link`_ has a nice visualization of the pooling parameters.
  41. Note:
  42. When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
  43. or the input. Sliding windows that would start in the right padded region are ignored.
  44. Args:
  45. kernel_size: The size of the sliding window, must be > 0.
  46. stride: The stride of the sliding window, must be > 0. Default value is :attr:`kernel_size`.
  47. padding: Implicit negative infinity padding to be added on both sides, must be >= 0 and <= kernel_size / 2.
  48. dilation: The stride between elements within a sliding window, must be > 0.
  49. return_indices: If ``True``, will return the argmax along with the max values.
  50. Useful for :class:`torch.nn.MaxUnpool1d` later
  51. ceil_mode: If ``True``, will use `ceil` instead of `floor` to compute the output shape. This
  52. ensures that every element in the input tensor is covered by a sliding window.
  53. Shape:
  54. - Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
  55. - Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
  56. .. math::
  57. L_{out} = \left\lfloor \frac{L_{in} + 2 \times \text{padding} - \text{dilation}
  58. \times (\text{kernel\_size} - 1) - 1}{\text{stride}} + 1\right\rfloor
  59. Examples::
  60. >>> # pool of size=3, stride=2
  61. >>> m = nn.MaxPool1d(3, stride=2)
  62. >>> input = torch.randn(20, 16, 50)
  63. >>> output = m(input)
  64. .. _link:
  65. https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
  66. """
  67. kernel_size: _size_1_t
  68. stride: _size_1_t
  69. padding: _size_1_t
  70. dilation: _size_1_t
  71. def forward(self, input: Tensor):
  72. return F.max_pool1d(input, self.kernel_size, self.stride,
  73. self.padding, self.dilation, ceil_mode=self.ceil_mode,
  74. return_indices=self.return_indices)
  75. class MaxPool2d(_MaxPoolNd):
  76. r"""Applies a 2D max pooling over an input signal composed of several input
  77. planes.
  78. In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`,
  79. output :math:`(N, C, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kH, kW)`
  80. can be precisely described as:
  81. .. math::
  82. \begin{aligned}
  83. out(N_i, C_j, h, w) ={} & \max_{m=0, \ldots, kH-1} \max_{n=0, \ldots, kW-1} \\
  84. & \text{input}(N_i, C_j, \text{stride[0]} \times h + m,
  85. \text{stride[1]} \times w + n)
  86. \end{aligned}
  87. If :attr:`padding` is non-zero, then the input is implicitly padded with negative infinity on both sides
  88. for :attr:`padding` number of points. :attr:`dilation` controls the spacing between the kernel points.
  89. It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
  90. Note:
  91. When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
  92. or the input. Sliding windows that would start in the right padded region are ignored.
  93. The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
  94. - a single ``int`` -- in which case the same value is used for the height and width dimension
  95. - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
  96. and the second `int` for the width dimension
  97. Args:
  98. kernel_size: the size of the window to take a max over
  99. stride: the stride of the window. Default value is :attr:`kernel_size`
  100. padding: Implicit negative infinity padding to be added on both sides
  101. dilation: a parameter that controls the stride of elements in the window
  102. return_indices: if ``True``, will return the max indices along with the outputs.
  103. Useful for :class:`torch.nn.MaxUnpool2d` later
  104. ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
  105. Shape:
  106. - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`
  107. - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
  108. .. math::
  109. H_{out} = \left\lfloor\frac{H_{in} + 2 * \text{padding[0]} - \text{dilation[0]}
  110. \times (\text{kernel\_size[0]} - 1) - 1}{\text{stride[0]}} + 1\right\rfloor
  111. .. math::
  112. W_{out} = \left\lfloor\frac{W_{in} + 2 * \text{padding[1]} - \text{dilation[1]}
  113. \times (\text{kernel\_size[1]} - 1) - 1}{\text{stride[1]}} + 1\right\rfloor
  114. Examples::
  115. >>> # pool of square window of size=3, stride=2
  116. >>> m = nn.MaxPool2d(3, stride=2)
  117. >>> # pool of non-square window
  118. >>> m = nn.MaxPool2d((3, 2), stride=(2, 1))
  119. >>> input = torch.randn(20, 16, 50, 32)
  120. >>> output = m(input)
  121. .. _link:
  122. https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
  123. """
  124. kernel_size: _size_2_t
  125. stride: _size_2_t
  126. padding: _size_2_t
  127. dilation: _size_2_t
  128. def forward(self, input: Tensor):
  129. return F.max_pool2d(input, self.kernel_size, self.stride,
  130. self.padding, self.dilation, ceil_mode=self.ceil_mode,
  131. return_indices=self.return_indices)
  132. class MaxPool3d(_MaxPoolNd):
  133. r"""Applies a 3D max pooling over an input signal composed of several input
  134. planes.
  135. In the simplest case, the output value of the layer with input size :math:`(N, C, D, H, W)`,
  136. output :math:`(N, C, D_{out}, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kD, kH, kW)`
  137. can be precisely described as:
  138. .. math::
  139. \begin{aligned}
  140. \text{out}(N_i, C_j, d, h, w) ={} & \max_{k=0, \ldots, kD-1} \max_{m=0, \ldots, kH-1} \max_{n=0, \ldots, kW-1} \\
  141. & \text{input}(N_i, C_j, \text{stride[0]} \times d + k,
  142. \text{stride[1]} \times h + m, \text{stride[2]} \times w + n)
  143. \end{aligned}
  144. If :attr:`padding` is non-zero, then the input is implicitly padded with negative infinity on both sides
  145. for :attr:`padding` number of points. :attr:`dilation` controls the spacing between the kernel points.
  146. It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
  147. Note:
  148. When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
  149. or the input. Sliding windows that would start in the right padded region are ignored.
  150. The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
  151. - a single ``int`` -- in which case the same value is used for the depth, height and width dimension
  152. - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
  153. the second `int` for the height dimension and the third `int` for the width dimension
  154. Args:
  155. kernel_size: the size of the window to take a max over
  156. stride: the stride of the window. Default value is :attr:`kernel_size`
  157. padding: Implicit negative infinity padding to be added on all three sides
  158. dilation: a parameter that controls the stride of elements in the window
  159. return_indices: if ``True``, will return the max indices along with the outputs.
  160. Useful for :class:`torch.nn.MaxUnpool3d` later
  161. ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
  162. Shape:
  163. - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
  164. - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`, where
  165. .. math::
  166. D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] - \text{dilation}[0] \times
  167. (\text{kernel\_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor
  168. .. math::
  169. H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] - \text{dilation}[1] \times
  170. (\text{kernel\_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor
  171. .. math::
  172. W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] - \text{dilation}[2] \times
  173. (\text{kernel\_size}[2] - 1) - 1}{\text{stride}[2]} + 1\right\rfloor
  174. Examples::
  175. >>> # pool of square window of size=3, stride=2
  176. >>> m = nn.MaxPool3d(3, stride=2)
  177. >>> # pool of non-square window
  178. >>> m = nn.MaxPool3d((3, 2, 2), stride=(2, 1, 2))
  179. >>> input = torch.randn(20, 16, 50, 44, 31)
  180. >>> output = m(input)
  181. .. _link:
  182. https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
  183. """ # noqa: E501
  184. kernel_size: _size_3_t
  185. stride: _size_3_t
  186. padding: _size_3_t
  187. dilation: _size_3_t
  188. def forward(self, input: Tensor):
  189. return F.max_pool3d(input, self.kernel_size, self.stride,
  190. self.padding, self.dilation, ceil_mode=self.ceil_mode,
  191. return_indices=self.return_indices)
  192. class _MaxUnpoolNd(Module):
  193. def extra_repr(self) -> str:
  194. return 'kernel_size={}, stride={}, padding={}'.format(
  195. self.kernel_size, self.stride, self.padding
  196. )
  197. class MaxUnpool1d(_MaxUnpoolNd):
  198. r"""Computes a partial inverse of :class:`MaxPool1d`.
  199. :class:`MaxPool1d` is not fully invertible, since the non-maximal values are lost.
  200. :class:`MaxUnpool1d` takes in as input the output of :class:`MaxPool1d`
  201. including the indices of the maximal values and computes a partial inverse
  202. in which all non-maximal values are set to zero.
  203. Note:
  204. This operation may behave nondeterministically when the input indices has repeat values.
  205. See https://github.com/pytorch/pytorch/issues/80827 and :doc:`/notes/randomness` for more information.
  206. .. note:: :class:`MaxPool1d` can map several input sizes to the same output
  207. sizes. Hence, the inversion process can get ambiguous.
  208. To accommodate this, you can provide the needed output size
  209. as an additional argument :attr:`output_size` in the forward call.
  210. See the Inputs and Example below.
  211. Args:
  212. kernel_size (int or tuple): Size of the max pooling window.
  213. stride (int or tuple): Stride of the max pooling window.
  214. It is set to :attr:`kernel_size` by default.
  215. padding (int or tuple): Padding that was added to the input
  216. Inputs:
  217. - `input`: the input Tensor to invert
  218. - `indices`: the indices given out by :class:`~torch.nn.MaxPool1d`
  219. - `output_size` (optional): the targeted output size
  220. Shape:
  221. - Input: :math:`(N, C, H_{in})` or :math:`(C, H_{in})`.
  222. - Output: :math:`(N, C, H_{out})` or :math:`(C, H_{out})`, where
  223. .. math::
  224. H_{out} = (H_{in} - 1) \times \text{stride}[0] - 2 \times \text{padding}[0] + \text{kernel\_size}[0]
  225. or as given by :attr:`output_size` in the call operator
  226. Example::
  227. >>> # xdoctest: +IGNORE_WANT("do other tests modify the global state?")
  228. >>> pool = nn.MaxPool1d(2, stride=2, return_indices=True)
  229. >>> unpool = nn.MaxUnpool1d(2, stride=2)
  230. >>> input = torch.tensor([[[1., 2, 3, 4, 5, 6, 7, 8]]])
  231. >>> output, indices = pool(input)
  232. >>> unpool(output, indices)
  233. tensor([[[ 0., 2., 0., 4., 0., 6., 0., 8.]]])
  234. >>> # Example showcasing the use of output_size
  235. >>> input = torch.tensor([[[1., 2, 3, 4, 5, 6, 7, 8, 9]]])
  236. >>> output, indices = pool(input)
  237. >>> unpool(output, indices, output_size=input.size())
  238. tensor([[[ 0., 2., 0., 4., 0., 6., 0., 8., 0.]]])
  239. >>> unpool(output, indices)
  240. tensor([[[ 0., 2., 0., 4., 0., 6., 0., 8.]]])
  241. """
  242. kernel_size: _size_1_t
  243. stride: _size_1_t
  244. padding: _size_1_t
  245. def __init__(self, kernel_size: _size_1_t, stride: Optional[_size_1_t] = None, padding: _size_1_t = 0) -> None:
  246. super().__init__()
  247. self.kernel_size = _single(kernel_size)
  248. self.stride = _single(stride if (stride is not None) else kernel_size)
  249. self.padding = _single(padding)
  250. def forward(self, input: Tensor, indices: Tensor, output_size: Optional[List[int]] = None) -> Tensor:
  251. return F.max_unpool1d(input, indices, self.kernel_size, self.stride,
  252. self.padding, output_size)
  253. class MaxUnpool2d(_MaxUnpoolNd):
  254. r"""Computes a partial inverse of :class:`MaxPool2d`.
  255. :class:`MaxPool2d` is not fully invertible, since the non-maximal values are lost.
  256. :class:`MaxUnpool2d` takes in as input the output of :class:`MaxPool2d`
  257. including the indices of the maximal values and computes a partial inverse
  258. in which all non-maximal values are set to zero.
  259. Note:
  260. This operation may behave nondeterministically when the input indices has repeat values.
  261. See https://github.com/pytorch/pytorch/issues/80827 and :doc:`/notes/randomness` for more information.
  262. .. note:: :class:`MaxPool2d` can map several input sizes to the same output
  263. sizes. Hence, the inversion process can get ambiguous.
  264. To accommodate this, you can provide the needed output size
  265. as an additional argument :attr:`output_size` in the forward call.
  266. See the Inputs and Example below.
  267. Args:
  268. kernel_size (int or tuple): Size of the max pooling window.
  269. stride (int or tuple): Stride of the max pooling window.
  270. It is set to :attr:`kernel_size` by default.
  271. padding (int or tuple): Padding that was added to the input
  272. Inputs:
  273. - `input`: the input Tensor to invert
  274. - `indices`: the indices given out by :class:`~torch.nn.MaxPool2d`
  275. - `output_size` (optional): the targeted output size
  276. Shape:
  277. - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
  278. - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
  279. .. math::
  280. H_{out} = (H_{in} - 1) \times \text{stride[0]} - 2 \times \text{padding[0]} + \text{kernel\_size[0]}
  281. .. math::
  282. W_{out} = (W_{in} - 1) \times \text{stride[1]} - 2 \times \text{padding[1]} + \text{kernel\_size[1]}
  283. or as given by :attr:`output_size` in the call operator
  284. Example::
  285. >>> pool = nn.MaxPool2d(2, stride=2, return_indices=True)
  286. >>> unpool = nn.MaxUnpool2d(2, stride=2)
  287. >>> input = torch.tensor([[[[ 1., 2., 3., 4.],
  288. [ 5., 6., 7., 8.],
  289. [ 9., 10., 11., 12.],
  290. [13., 14., 15., 16.]]]])
  291. >>> output, indices = pool(input)
  292. >>> unpool(output, indices)
  293. tensor([[[[ 0., 0., 0., 0.],
  294. [ 0., 6., 0., 8.],
  295. [ 0., 0., 0., 0.],
  296. [ 0., 14., 0., 16.]]]])
  297. >>> # Now using output_size to resolve an ambiguous size for the inverse
  298. >>> input = torch.torch.tensor([[[[ 1., 2., 3., 4., 5.],
  299. [ 6., 7., 8., 9., 10.],
  300. [11., 12., 13., 14., 15.],
  301. [16., 17., 18., 19., 20.]]]])
  302. >>> output, indices = pool(input)
  303. >>> # This call will not work without specifying output_size
  304. >>> unpool(output, indices, output_size=input.size())
  305. tensor([[[[ 0., 0., 0., 0., 0.],
  306. [ 0., 7., 0., 9., 0.],
  307. [ 0., 0., 0., 0., 0.],
  308. [ 0., 17., 0., 19., 0.]]]])
  309. """
  310. kernel_size: _size_2_t
  311. stride: _size_2_t
  312. padding: _size_2_t
  313. def __init__(self, kernel_size: _size_2_t, stride: Optional[_size_2_t] = None, padding: _size_2_t = 0) -> None:
  314. super().__init__()
  315. self.kernel_size = _pair(kernel_size)
  316. self.stride = _pair(stride if (stride is not None) else kernel_size)
  317. self.padding = _pair(padding)
  318. def forward(self, input: Tensor, indices: Tensor, output_size: Optional[List[int]] = None) -> Tensor:
  319. return F.max_unpool2d(input, indices, self.kernel_size, self.stride,
  320. self.padding, output_size)
  321. class MaxUnpool3d(_MaxUnpoolNd):
  322. r"""Computes a partial inverse of :class:`MaxPool3d`.
  323. :class:`MaxPool3d` is not fully invertible, since the non-maximal values are lost.
  324. :class:`MaxUnpool3d` takes in as input the output of :class:`MaxPool3d`
  325. including the indices of the maximal values and computes a partial inverse
  326. in which all non-maximal values are set to zero.
  327. Note:
  328. This operation may behave nondeterministically when the input indices has repeat values.
  329. See https://github.com/pytorch/pytorch/issues/80827 and :doc:`/notes/randomness` for more information.
  330. .. note:: :class:`MaxPool3d` can map several input sizes to the same output
  331. sizes. Hence, the inversion process can get ambiguous.
  332. To accommodate this, you can provide the needed output size
  333. as an additional argument :attr:`output_size` in the forward call.
  334. See the Inputs section below.
  335. Args:
  336. kernel_size (int or tuple): Size of the max pooling window.
  337. stride (int or tuple): Stride of the max pooling window.
  338. It is set to :attr:`kernel_size` by default.
  339. padding (int or tuple): Padding that was added to the input
  340. Inputs:
  341. - `input`: the input Tensor to invert
  342. - `indices`: the indices given out by :class:`~torch.nn.MaxPool3d`
  343. - `output_size` (optional): the targeted output size
  344. Shape:
  345. - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
  346. - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`, where
  347. .. math::
  348. D_{out} = (D_{in} - 1) \times \text{stride[0]} - 2 \times \text{padding[0]} + \text{kernel\_size[0]}
  349. .. math::
  350. H_{out} = (H_{in} - 1) \times \text{stride[1]} - 2 \times \text{padding[1]} + \text{kernel\_size[1]}
  351. .. math::
  352. W_{out} = (W_{in} - 1) \times \text{stride[2]} - 2 \times \text{padding[2]} + \text{kernel\_size[2]}
  353. or as given by :attr:`output_size` in the call operator
  354. Example::
  355. >>> # pool of square window of size=3, stride=2
  356. >>> pool = nn.MaxPool3d(3, stride=2, return_indices=True)
  357. >>> unpool = nn.MaxUnpool3d(3, stride=2)
  358. >>> output, indices = pool(torch.randn(20, 16, 51, 33, 15))
  359. >>> unpooled_output = unpool(output, indices)
  360. >>> unpooled_output.size()
  361. torch.Size([20, 16, 51, 33, 15])
  362. """
  363. kernel_size: _size_3_t
  364. stride: _size_3_t
  365. padding: _size_3_t
  366. def __init__(self, kernel_size: _size_3_t, stride: Optional[_size_3_t] = None, padding: _size_3_t = 0) -> None:
  367. super().__init__()
  368. self.kernel_size = _triple(kernel_size)
  369. self.stride = _triple(stride if (stride is not None) else kernel_size)
  370. self.padding = _triple(padding)
  371. def forward(self, input: Tensor, indices: Tensor, output_size: Optional[List[int]] = None) -> Tensor:
  372. return F.max_unpool3d(input, indices, self.kernel_size, self.stride,
  373. self.padding, output_size)
  374. class _AvgPoolNd(Module):
  375. __constants__ = ['kernel_size', 'stride', 'padding', 'ceil_mode', 'count_include_pad']
  376. def extra_repr(self) -> str:
  377. return 'kernel_size={}, stride={}, padding={}'.format(
  378. self.kernel_size, self.stride, self.padding
  379. )
  380. class AvgPool1d(_AvgPoolNd):
  381. r"""Applies a 1D average pooling over an input signal composed of several
  382. input planes.
  383. In the simplest case, the output value of the layer with input size :math:`(N, C, L)`,
  384. output :math:`(N, C, L_{out})` and :attr:`kernel_size` :math:`k`
  385. can be precisely described as:
  386. .. math::
  387. \text{out}(N_i, C_j, l) = \frac{1}{k} \sum_{m=0}^{k-1}
  388. \text{input}(N_i, C_j, \text{stride} \times l + m)
  389. If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides
  390. for :attr:`padding` number of points.
  391. Note:
  392. When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
  393. or the input. Sliding windows that would start in the right padded region are ignored.
  394. The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding` can each be
  395. an ``int`` or a one-element tuple.
  396. Args:
  397. kernel_size: the size of the window
  398. stride: the stride of the window. Default value is :attr:`kernel_size`
  399. padding: implicit zero padding to be added on both sides
  400. ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
  401. count_include_pad: when True, will include the zero-padding in the averaging calculation
  402. Shape:
  403. - Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
  404. - Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
  405. .. math::
  406. L_{out} = \left\lfloor \frac{L_{in} +
  407. 2 \times \text{padding} - \text{kernel\_size}}{\text{stride}} + 1\right\rfloor
  408. Examples::
  409. >>> # pool with window of size=3, stride=2
  410. >>> m = nn.AvgPool1d(3, stride=2)
  411. >>> m(torch.tensor([[[1., 2, 3, 4, 5, 6, 7]]]))
  412. tensor([[[2., 4., 6.]]])
  413. """
  414. kernel_size: _size_1_t
  415. stride: _size_1_t
  416. padding: _size_1_t
  417. ceil_mode: bool
  418. count_include_pad: bool
  419. def __init__(self, kernel_size: _size_1_t, stride: _size_1_t = None, padding: _size_1_t = 0, ceil_mode: bool = False,
  420. count_include_pad: bool = True) -> None:
  421. super().__init__()
  422. self.kernel_size = _single(kernel_size)
  423. self.stride = _single(stride if stride is not None else kernel_size)
  424. self.padding = _single(padding)
  425. self.ceil_mode = ceil_mode
  426. self.count_include_pad = count_include_pad
  427. def forward(self, input: Tensor) -> Tensor:
  428. return F.avg_pool1d(
  429. input, self.kernel_size, self.stride, self.padding, self.ceil_mode,
  430. self.count_include_pad)
  431. class AvgPool2d(_AvgPoolNd):
  432. r"""Applies a 2D average pooling over an input signal composed of several input
  433. planes.
  434. In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`,
  435. output :math:`(N, C, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kH, kW)`
  436. can be precisely described as:
  437. .. math::
  438. out(N_i, C_j, h, w) = \frac{1}{kH * kW} \sum_{m=0}^{kH-1} \sum_{n=0}^{kW-1}
  439. input(N_i, C_j, stride[0] \times h + m, stride[1] \times w + n)
  440. If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides
  441. for :attr:`padding` number of points.
  442. Note:
  443. When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
  444. or the input. Sliding windows that would start in the right padded region are ignored.
  445. The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding` can either be:
  446. - a single ``int`` -- in which case the same value is used for the height and width dimension
  447. - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
  448. and the second `int` for the width dimension
  449. Args:
  450. kernel_size: the size of the window
  451. stride: the stride of the window. Default value is :attr:`kernel_size`
  452. padding: implicit zero padding to be added on both sides
  453. ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
  454. count_include_pad: when True, will include the zero-padding in the averaging calculation
  455. divisor_override: if specified, it will be used as divisor, otherwise size of the pooling region will be used.
  456. Shape:
  457. - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
  458. - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
  459. .. math::
  460. H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[0] -
  461. \text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor
  462. .. math::
  463. W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[1] -
  464. \text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor
  465. Examples::
  466. >>> # pool of square window of size=3, stride=2
  467. >>> m = nn.AvgPool2d(3, stride=2)
  468. >>> # pool of non-square window
  469. >>> m = nn.AvgPool2d((3, 2), stride=(2, 1))
  470. >>> input = torch.randn(20, 16, 50, 32)
  471. >>> output = m(input)
  472. """
  473. __constants__ = ['kernel_size', 'stride', 'padding', 'ceil_mode', 'count_include_pad', 'divisor_override']
  474. kernel_size: _size_2_t
  475. stride: _size_2_t
  476. padding: _size_2_t
  477. ceil_mode: bool
  478. count_include_pad: bool
  479. def __init__(self, kernel_size: _size_2_t, stride: Optional[_size_2_t] = None, padding: _size_2_t = 0,
  480. ceil_mode: bool = False, count_include_pad: bool = True, divisor_override: Optional[int] = None) -> None:
  481. super().__init__()
  482. self.kernel_size = kernel_size
  483. self.stride = stride if (stride is not None) else kernel_size
  484. self.padding = padding
  485. self.ceil_mode = ceil_mode
  486. self.count_include_pad = count_include_pad
  487. self.divisor_override = divisor_override
  488. def forward(self, input: Tensor) -> Tensor:
  489. return F.avg_pool2d(input, self.kernel_size, self.stride,
  490. self.padding, self.ceil_mode, self.count_include_pad, self.divisor_override)
  491. class AvgPool3d(_AvgPoolNd):
  492. r"""Applies a 3D average pooling over an input signal composed of several input
  493. planes.
  494. In the simplest case, the output value of the layer with input size :math:`(N, C, D, H, W)`,
  495. output :math:`(N, C, D_{out}, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kD, kH, kW)`
  496. can be precisely described as:
  497. .. math::
  498. \begin{aligned}
  499. \text{out}(N_i, C_j, d, h, w) ={} & \sum_{k=0}^{kD-1} \sum_{m=0}^{kH-1} \sum_{n=0}^{kW-1} \\
  500. & \frac{\text{input}(N_i, C_j, \text{stride}[0] \times d + k,
  501. \text{stride}[1] \times h + m, \text{stride}[2] \times w + n)}
  502. {kD \times kH \times kW}
  503. \end{aligned}
  504. If :attr:`padding` is non-zero, then the input is implicitly zero-padded on all three sides
  505. for :attr:`padding` number of points.
  506. Note:
  507. When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
  508. or the input. Sliding windows that would start in the right padded region are ignored.
  509. The parameters :attr:`kernel_size`, :attr:`stride` can either be:
  510. - a single ``int`` -- in which case the same value is used for the depth, height and width dimension
  511. - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
  512. the second `int` for the height dimension and the third `int` for the width dimension
  513. Args:
  514. kernel_size: the size of the window
  515. stride: the stride of the window. Default value is :attr:`kernel_size`
  516. padding: implicit zero padding to be added on all three sides
  517. ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
  518. count_include_pad: when True, will include the zero-padding in the averaging calculation
  519. divisor_override: if specified, it will be used as divisor, otherwise :attr:`kernel_size` will be used
  520. Shape:
  521. - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
  522. - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or
  523. :math:`(C, D_{out}, H_{out}, W_{out})`, where
  524. .. math::
  525. D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] -
  526. \text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor
  527. .. math::
  528. H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] -
  529. \text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor
  530. .. math::
  531. W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] -
  532. \text{kernel\_size}[2]}{\text{stride}[2]} + 1\right\rfloor
  533. Examples::
  534. >>> # pool of square window of size=3, stride=2
  535. >>> m = nn.AvgPool3d(3, stride=2)
  536. >>> # pool of non-square window
  537. >>> m = nn.AvgPool3d((3, 2, 2), stride=(2, 1, 2))
  538. >>> input = torch.randn(20, 16, 50, 44, 31)
  539. >>> output = m(input)
  540. """
  541. __constants__ = ['kernel_size', 'stride', 'padding', 'ceil_mode', 'count_include_pad', 'divisor_override']
  542. kernel_size: _size_3_t
  543. stride: _size_3_t
  544. padding: _size_3_t
  545. ceil_mode: bool
  546. count_include_pad: bool
  547. def __init__(self, kernel_size: _size_3_t, stride: Optional[_size_3_t] = None, padding: _size_3_t = 0,
  548. ceil_mode: bool = False, count_include_pad: bool = True, divisor_override: Optional[int] = None) -> None:
  549. super().__init__()
  550. self.kernel_size = kernel_size
  551. self.stride = stride if (stride is not None) else kernel_size
  552. self.padding = padding
  553. self.ceil_mode = ceil_mode
  554. self.count_include_pad = count_include_pad
  555. self.divisor_override = divisor_override
  556. def forward(self, input: Tensor) -> Tensor:
  557. return F.avg_pool3d(input, self.kernel_size, self.stride,
  558. self.padding, self.ceil_mode, self.count_include_pad, self.divisor_override)
  559. def __setstate__(self, d):
  560. super().__setstate__(d)
  561. self.__dict__.setdefault('padding', 0)
  562. self.__dict__.setdefault('ceil_mode', False)
  563. self.__dict__.setdefault('count_include_pad', True)
  564. class FractionalMaxPool2d(Module):
  565. r"""Applies a 2D fractional max pooling over an input signal composed of several input planes.
  566. Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham
  567. The max-pooling operation is applied in :math:`kH \times kW` regions by a stochastic
  568. step size determined by the target output size.
  569. The number of output features is equal to the number of input planes.
  570. Args:
  571. kernel_size: the size of the window to take a max over.
  572. Can be a single number k (for a square kernel of k x k) or a tuple `(kh, kw)`
  573. output_size: the target output size of the image of the form `oH x oW`.
  574. Can be a tuple `(oH, oW)` or a single number oH for a square image `oH x oH`
  575. output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given.
  576. This has to be a number or tuple in the range (0, 1)
  577. return_indices: if ``True``, will return the indices along with the outputs.
  578. Useful to pass to :meth:`nn.MaxUnpool2d`. Default: ``False``
  579. Shape:
  580. - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
  581. - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
  582. :math:`(H_{out}, W_{out})=\text{output\_size}` or
  583. :math:`(H_{out}, W_{out})=\text{output\_ratio} \times (H_{in}, W_{in})`.
  584. Examples:
  585. >>> # pool of square window of size=3, and target output size 13x12
  586. >>> m = nn.FractionalMaxPool2d(3, output_size=(13, 12))
  587. >>> # pool of square window and target output size being half of input image size
  588. >>> m = nn.FractionalMaxPool2d(3, output_ratio=(0.5, 0.5))
  589. >>> input = torch.randn(20, 16, 50, 32)
  590. >>> output = m(input)
  591. .. _Fractional MaxPooling:
  592. https://arxiv.org/abs/1412.6071
  593. """
  594. __constants__ = ['kernel_size', 'return_indices', 'output_size',
  595. 'output_ratio']
  596. kernel_size: _size_2_t
  597. return_indices: bool
  598. output_size: _size_2_t
  599. output_ratio: _ratio_2_t
  600. def __init__(self, kernel_size: _size_2_t, output_size: Optional[_size_2_t] = None,
  601. output_ratio: Optional[_ratio_2_t] = None,
  602. return_indices: bool = False, _random_samples=None) -> None:
  603. super().__init__()
  604. self.kernel_size = _pair(kernel_size)
  605. self.return_indices = return_indices
  606. self.register_buffer('_random_samples', _random_samples)
  607. self.output_size = _pair(output_size) if output_size is not None else None
  608. self.output_ratio = _pair(output_ratio) if output_ratio is not None else None
  609. if output_size is None and output_ratio is None:
  610. raise ValueError("FractionalMaxPool2d requires specifying either "
  611. "an output size, or a pooling ratio")
  612. if output_size is not None and output_ratio is not None:
  613. raise ValueError("only one of output_size and output_ratio may be specified")
  614. if self.output_ratio is not None:
  615. if not (0 < self.output_ratio[0] < 1 and 0 < self.output_ratio[1] < 1):
  616. raise ValueError("output_ratio must be between 0 and 1 (got {})"
  617. .format(output_ratio))
  618. def forward(self, input: Tensor):
  619. return F.fractional_max_pool2d(
  620. input, self.kernel_size, self.output_size, self.output_ratio,
  621. self.return_indices,
  622. _random_samples=self._random_samples)
  623. class FractionalMaxPool3d(Module):
  624. r"""Applies a 3D fractional max pooling over an input signal composed of several input planes.
  625. Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham
  626. The max-pooling operation is applied in :math:`kT \times kH \times kW` regions by a stochastic
  627. step size determined by the target output size.
  628. The number of output features is equal to the number of input planes.
  629. Args:
  630. kernel_size: the size of the window to take a max over.
  631. Can be a single number k (for a square kernel of k x k x k) or a tuple `(kt x kh x kw)`
  632. output_size: the target output size of the image of the form `oT x oH x oW`.
  633. Can be a tuple `(oT, oH, oW)` or a single number oH for a square image `oH x oH x oH`
  634. output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given.
  635. This has to be a number or tuple in the range (0, 1)
  636. return_indices: if ``True``, will return the indices along with the outputs.
  637. Useful to pass to :meth:`nn.MaxUnpool3d`. Default: ``False``
  638. Shape:
  639. - Input: :math:`(N, C, T_{in}, H_{in}, W_{in})` or :math:`(C, T_{in}, H_{in}, W_{in})`.
  640. - Output: :math:`(N, C, T_{out}, H_{out}, W_{out})` or :math:`(C, T_{out}, H_{out}, W_{out})`, where
  641. :math:`(T_{out}, H_{out}, W_{out})=\text{output\_size}` or
  642. :math:`(T_{out}, H_{out}, W_{out})=\text{output\_ratio} \times (T_{in}, H_{in}, W_{in})`
  643. Examples:
  644. >>> # pool of cubic window of size=3, and target output size 13x12x11
  645. >>> m = nn.FractionalMaxPool3d(3, output_size=(13, 12, 11))
  646. >>> # pool of cubic window and target output size being half of input size
  647. >>> m = nn.FractionalMaxPool3d(3, output_ratio=(0.5, 0.5, 0.5))
  648. >>> input = torch.randn(20, 16, 50, 32, 16)
  649. >>> output = m(input)
  650. .. _Fractional MaxPooling:
  651. https://arxiv.org/abs/1412.6071
  652. """
  653. __constants__ = ['kernel_size', 'return_indices', 'output_size',
  654. 'output_ratio']
  655. kernel_size: _size_3_t
  656. return_indices: bool
  657. output_size: _size_3_t
  658. output_ratio: _ratio_3_t
  659. def __init__(self, kernel_size: _size_3_t, output_size: Optional[_size_3_t] = None,
  660. output_ratio: Optional[_ratio_3_t] = None,
  661. return_indices: bool = False, _random_samples=None) -> None:
  662. super().__init__()
  663. self.kernel_size = _triple(kernel_size)
  664. self.return_indices = return_indices
  665. self.register_buffer('_random_samples', _random_samples)
  666. self.output_size = _triple(output_size) if output_size is not None else None
  667. self.output_ratio = _triple(output_ratio) if output_ratio is not None else None
  668. if output_size is None and output_ratio is None:
  669. raise ValueError("FractionalMaxPool3d requires specifying either "
  670. "an output size, or a pooling ratio")
  671. if output_size is not None and output_ratio is not None:
  672. raise ValueError("only one of output_size and output_ratio may be specified")
  673. if self.output_ratio is not None:
  674. if not (0 < self.output_ratio[0] < 1 and 0 < self.output_ratio[1] < 1 and 0 < self.output_ratio[2] < 1):
  675. raise ValueError("output_ratio must be between 0 and 1 (got {})"
  676. .format(output_ratio))
  677. def forward(self, input: Tensor):
  678. return F.fractional_max_pool3d(
  679. input, self.kernel_size, self.output_size, self.output_ratio,
  680. self.return_indices,
  681. _random_samples=self._random_samples)
  682. class _LPPoolNd(Module):
  683. __constants__ = ['norm_type', 'kernel_size', 'stride', 'ceil_mode']
  684. norm_type: float
  685. ceil_mode: bool
  686. def __init__(self, norm_type: float, kernel_size: _size_any_t, stride: Optional[_size_any_t] = None,
  687. ceil_mode: bool = False) -> None:
  688. super().__init__()
  689. self.norm_type = norm_type
  690. self.kernel_size = kernel_size
  691. self.stride = stride
  692. self.ceil_mode = ceil_mode
  693. def extra_repr(self) -> str:
  694. return 'norm_type={norm_type}, kernel_size={kernel_size}, stride={stride}, ' \
  695. 'ceil_mode={ceil_mode}'.format(**self.__dict__)
  696. class LPPool1d(_LPPoolNd):
  697. r"""Applies a 1D power-average pooling over an input signal composed of several input
  698. planes.
  699. On each window, the function computed is:
  700. .. math::
  701. f(X) = \sqrt[p]{\sum_{x \in X} x^{p}}
  702. - At p = :math:`\infty`, one gets Max Pooling
  703. - At p = 1, one gets Sum Pooling (which is proportional to Average Pooling)
  704. .. note:: If the sum to the power of `p` is zero, the gradient of this function is
  705. not defined. This implementation will set the gradient to zero in this case.
  706. Args:
  707. kernel_size: a single int, the size of the window
  708. stride: a single int, the stride of the window. Default value is :attr:`kernel_size`
  709. ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
  710. Shape:
  711. - Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
  712. - Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
  713. .. math::
  714. L_{out} = \left\lfloor\frac{L_{in} - \text{kernel\_size}}{\text{stride}} + 1\right\rfloor
  715. Examples::
  716. >>> # power-2 pool of window of length 3, with stride 2.
  717. >>> m = nn.LPPool1d(2, 3, stride=2)
  718. >>> input = torch.randn(20, 16, 50)
  719. >>> output = m(input)
  720. """
  721. kernel_size: _size_1_t
  722. stride: _size_1_t
  723. def forward(self, input: Tensor) -> Tensor:
  724. return F.lp_pool1d(input, float(self.norm_type), self.kernel_size,
  725. self.stride, self.ceil_mode)
  726. class LPPool2d(_LPPoolNd):
  727. r"""Applies a 2D power-average pooling over an input signal composed of several input
  728. planes.
  729. On each window, the function computed is:
  730. .. math::
  731. f(X) = \sqrt[p]{\sum_{x \in X} x^{p}}
  732. - At p = :math:`\infty`, one gets Max Pooling
  733. - At p = 1, one gets Sum Pooling (which is proportional to average pooling)
  734. The parameters :attr:`kernel_size`, :attr:`stride` can either be:
  735. - a single ``int`` -- in which case the same value is used for the height and width dimension
  736. - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
  737. and the second `int` for the width dimension
  738. .. note:: If the sum to the power of `p` is zero, the gradient of this function is
  739. not defined. This implementation will set the gradient to zero in this case.
  740. Args:
  741. kernel_size: the size of the window
  742. stride: the stride of the window. Default value is :attr:`kernel_size`
  743. ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
  744. Shape:
  745. - Input: :math:`(N, C, H_{in}, W_{in})`
  746. - Output: :math:`(N, C, H_{out}, W_{out})`, where
  747. .. math::
  748. H_{out} = \left\lfloor\frac{H_{in} - \text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor
  749. .. math::
  750. W_{out} = \left\lfloor\frac{W_{in} - \text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor
  751. Examples::
  752. >>> # power-2 pool of square window of size=3, stride=2
  753. >>> m = nn.LPPool2d(2, 3, stride=2)
  754. >>> # pool of non-square window of power 1.2
  755. >>> m = nn.LPPool2d(1.2, (3, 2), stride=(2, 1))
  756. >>> input = torch.randn(20, 16, 50, 32)
  757. >>> output = m(input)
  758. """
  759. kernel_size: _size_2_t
  760. stride: _size_2_t
  761. def forward(self, input: Tensor) -> Tensor:
  762. return F.lp_pool2d(input, float(self.norm_type), self.kernel_size,
  763. self.stride, self.ceil_mode)
  764. class _AdaptiveMaxPoolNd(Module):
  765. __constants__ = ['output_size', 'return_indices']
  766. return_indices: bool
  767. def __init__(self, output_size: _size_any_opt_t, return_indices: bool = False) -> None:
  768. super().__init__()
  769. self.output_size = output_size
  770. self.return_indices = return_indices
  771. def extra_repr(self) -> str:
  772. return 'output_size={}'.format(self.output_size)
  773. # FIXME (by @ssnl): Improve adaptive pooling docs: specify what the input and
  774. # output shapes are, and how the operation computes output.
  775. class AdaptiveMaxPool1d(_AdaptiveMaxPoolNd):
  776. r"""Applies a 1D adaptive max pooling over an input signal composed of several input planes.
  777. The output size is :math:`L_{out}`, for any input size.
  778. The number of output features is equal to the number of input planes.
  779. Args:
  780. output_size: the target output size :math:`L_{out}`.
  781. return_indices: if ``True``, will return the indices along with the outputs.
  782. Useful to pass to nn.MaxUnpool1d. Default: ``False``
  783. Shape:
  784. - Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
  785. - Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
  786. :math:`L_{out}=\text{output\_size}`.
  787. Examples:
  788. >>> # target output size of 5
  789. >>> m = nn.AdaptiveMaxPool1d(5)
  790. >>> input = torch.randn(1, 64, 8)
  791. >>> output = m(input)
  792. """
  793. output_size: _size_1_t
  794. def forward(self, input: Tensor) -> Tensor:
  795. return F.adaptive_max_pool1d(input, self.output_size, self.return_indices)
  796. class AdaptiveMaxPool2d(_AdaptiveMaxPoolNd):
  797. r"""Applies a 2D adaptive max pooling over an input signal composed of several input planes.
  798. The output is of size :math:`H_{out} \times W_{out}`, for any input size.
  799. The number of output features is equal to the number of input planes.
  800. Args:
  801. output_size: the target output size of the image of the form :math:`H_{out} \times W_{out}`.
  802. Can be a tuple :math:`(H_{out}, W_{out})` or a single :math:`H_{out}` for a
  803. square image :math:`H_{out} \times H_{out}`. :math:`H_{out}` and :math:`W_{out}`
  804. can be either a ``int``, or ``None`` which means the size will be the same as that
  805. of the input.
  806. return_indices: if ``True``, will return the indices along with the outputs.
  807. Useful to pass to nn.MaxUnpool2d. Default: ``False``
  808. Shape:
  809. - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
  810. - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
  811. :math:`(H_{out}, W_{out})=\text{output\_size}`.
  812. Examples:
  813. >>> # target output size of 5x7
  814. >>> m = nn.AdaptiveMaxPool2d((5, 7))
  815. >>> input = torch.randn(1, 64, 8, 9)
  816. >>> output = m(input)
  817. >>> # target output size of 7x7 (square)
  818. >>> m = nn.AdaptiveMaxPool2d(7)
  819. >>> input = torch.randn(1, 64, 10, 9)
  820. >>> output = m(input)
  821. >>> # target output size of 10x7
  822. >>> m = nn.AdaptiveMaxPool2d((None, 7))
  823. >>> input = torch.randn(1, 64, 10, 9)
  824. >>> output = m(input)
  825. """
  826. output_size: _size_2_opt_t
  827. def forward(self, input: Tensor):
  828. return F.adaptive_max_pool2d(input, self.output_size, self.return_indices)
  829. class AdaptiveMaxPool3d(_AdaptiveMaxPoolNd):
  830. r"""Applies a 3D adaptive max pooling over an input signal composed of several input planes.
  831. The output is of size :math:`D_{out} \times H_{out} \times W_{out}`, for any input size.
  832. The number of output features is equal to the number of input planes.
  833. Args:
  834. output_size: the target output size of the image of the form :math:`D_{out} \times H_{out} \times W_{out}`.
  835. Can be a tuple :math:`(D_{out}, H_{out}, W_{out})` or a single
  836. :math:`D_{out}` for a cube :math:`D_{out} \times D_{out} \times D_{out}`.
  837. :math:`D_{out}`, :math:`H_{out}` and :math:`W_{out}` can be either a
  838. ``int``, or ``None`` which means the size will be the same as that of the input.
  839. return_indices: if ``True``, will return the indices along with the outputs.
  840. Useful to pass to nn.MaxUnpool3d. Default: ``False``
  841. Shape:
  842. - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
  843. - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`,
  844. where :math:`(D_{out}, H_{out}, W_{out})=\text{output\_size}`.
  845. Examples:
  846. >>> # target output size of 5x7x9
  847. >>> m = nn.AdaptiveMaxPool3d((5, 7, 9))
  848. >>> input = torch.randn(1, 64, 8, 9, 10)
  849. >>> output = m(input)
  850. >>> # target output size of 7x7x7 (cube)
  851. >>> m = nn.AdaptiveMaxPool3d(7)
  852. >>> input = torch.randn(1, 64, 10, 9, 8)
  853. >>> output = m(input)
  854. >>> # target output size of 7x9x8
  855. >>> m = nn.AdaptiveMaxPool3d((7, None, None))
  856. >>> input = torch.randn(1, 64, 10, 9, 8)
  857. >>> output = m(input)
  858. """
  859. output_size: _size_3_opt_t
  860. def forward(self, input: Tensor):
  861. return F.adaptive_max_pool3d(input, self.output_size, self.return_indices)
  862. class _AdaptiveAvgPoolNd(Module):
  863. __constants__ = ['output_size']
  864. def __init__(self, output_size: _size_any_opt_t) -> None:
  865. super().__init__()
  866. self.output_size = output_size
  867. def extra_repr(self) -> str:
  868. return 'output_size={}'.format(self.output_size)
  869. class AdaptiveAvgPool1d(_AdaptiveAvgPoolNd):
  870. r"""Applies a 1D adaptive average pooling over an input signal composed of several input planes.
  871. The output size is :math:`L_{out}`, for any input size.
  872. The number of output features is equal to the number of input planes.
  873. Args:
  874. output_size: the target output size :math:`L_{out}`.
  875. Shape:
  876. - Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
  877. - Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
  878. :math:`L_{out}=\text{output\_size}`.
  879. Examples:
  880. >>> # target output size of 5
  881. >>> m = nn.AdaptiveAvgPool1d(5)
  882. >>> input = torch.randn(1, 64, 8)
  883. >>> output = m(input)
  884. """
  885. output_size: _size_1_t
  886. def forward(self, input: Tensor) -> Tensor:
  887. return F.adaptive_avg_pool1d(input, self.output_size)
  888. class AdaptiveAvgPool2d(_AdaptiveAvgPoolNd):
  889. r"""Applies a 2D adaptive average pooling over an input signal composed of several input planes.
  890. The output is of size H x W, for any input size.
  891. The number of output features is equal to the number of input planes.
  892. Args:
  893. output_size: the target output size of the image of the form H x W.
  894. Can be a tuple (H, W) or a single H for a square image H x H.
  895. H and W can be either a ``int``, or ``None`` which means the size will
  896. be the same as that of the input.
  897. Shape:
  898. - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
  899. - Output: :math:`(N, C, S_{0}, S_{1})` or :math:`(C, S_{0}, S_{1})`, where
  900. :math:`S=\text{output\_size}`.
  901. Examples:
  902. >>> # target output size of 5x7
  903. >>> m = nn.AdaptiveAvgPool2d((5, 7))
  904. >>> input = torch.randn(1, 64, 8, 9)
  905. >>> output = m(input)
  906. >>> # target output size of 7x7 (square)
  907. >>> m = nn.AdaptiveAvgPool2d(7)
  908. >>> input = torch.randn(1, 64, 10, 9)
  909. >>> output = m(input)
  910. >>> # target output size of 10x7
  911. >>> m = nn.AdaptiveAvgPool2d((None, 7))
  912. >>> input = torch.randn(1, 64, 10, 9)
  913. >>> output = m(input)
  914. """
  915. output_size: _size_2_opt_t
  916. def forward(self, input: Tensor) -> Tensor:
  917. return F.adaptive_avg_pool2d(input, self.output_size)
  918. class AdaptiveAvgPool3d(_AdaptiveAvgPoolNd):
  919. r"""Applies a 3D adaptive average pooling over an input signal composed of several input planes.
  920. The output is of size D x H x W, for any input size.
  921. The number of output features is equal to the number of input planes.
  922. Args:
  923. output_size: the target output size of the form D x H x W.
  924. Can be a tuple (D, H, W) or a single number D for a cube D x D x D.
  925. D, H and W can be either a ``int``, or ``None`` which means the size will
  926. be the same as that of the input.
  927. Shape:
  928. - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
  929. - Output: :math:`(N, C, S_{0}, S_{1}, S_{2})` or :math:`(C, S_{0}, S_{1}, S_{2})`,
  930. where :math:`S=\text{output\_size}`.
  931. Examples:
  932. >>> # target output size of 5x7x9
  933. >>> m = nn.AdaptiveAvgPool3d((5, 7, 9))
  934. >>> input = torch.randn(1, 64, 8, 9, 10)
  935. >>> output = m(input)
  936. >>> # target output size of 7x7x7 (cube)
  937. >>> m = nn.AdaptiveAvgPool3d(7)
  938. >>> input = torch.randn(1, 64, 10, 9, 8)
  939. >>> output = m(input)
  940. >>> # target output size of 7x9x8
  941. >>> m = nn.AdaptiveAvgPool3d((7, None, None))
  942. >>> input = torch.randn(1, 64, 10, 9, 8)
  943. >>> output = m(input)
  944. """
  945. output_size: _size_3_opt_t
  946. def forward(self, input: Tensor) -> Tensor:
  947. return F.adaptive_avg_pool3d(input, self.output_size)