__init__.py 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283
  1. import torch
  2. from torch._C import _add_docstr, _special # type: ignore[attr-defined]
  3. from torch._torch_docs import common_args, multi_dim_common
  4. __all__ = [
  5. 'airy_ai',
  6. 'bessel_j0',
  7. 'bessel_j1',
  8. 'bessel_y0',
  9. 'bessel_y1',
  10. 'chebyshev_polynomial_t',
  11. 'chebyshev_polynomial_u',
  12. 'chebyshev_polynomial_v',
  13. 'chebyshev_polynomial_w',
  14. 'digamma',
  15. 'entr',
  16. 'erf',
  17. 'erfc',
  18. 'erfcx',
  19. 'erfinv',
  20. 'exp2',
  21. 'expit',
  22. 'expm1',
  23. 'gammainc',
  24. 'gammaincc',
  25. 'gammaln',
  26. 'hermite_polynomial_h',
  27. 'hermite_polynomial_he',
  28. 'i0',
  29. 'i0e',
  30. 'i1',
  31. 'i1e',
  32. 'laguerre_polynomial_l',
  33. 'legendre_polynomial_p',
  34. 'log1p',
  35. 'log_ndtr',
  36. 'log_softmax',
  37. 'logit',
  38. 'logsumexp',
  39. 'modified_bessel_i0',
  40. 'modified_bessel_i1',
  41. 'modified_bessel_k0',
  42. 'modified_bessel_k1',
  43. 'multigammaln',
  44. 'ndtr',
  45. 'ndtri',
  46. 'polygamma',
  47. 'psi',
  48. 'round',
  49. 'shifted_chebyshev_polynomial_t',
  50. 'shifted_chebyshev_polynomial_u',
  51. 'shifted_chebyshev_polynomial_v',
  52. 'shifted_chebyshev_polynomial_w',
  53. 'scaled_modified_bessel_k0',
  54. 'scaled_modified_bessel_k1',
  55. 'sinc',
  56. 'softmax',
  57. 'spherical_bessel_j0',
  58. 'xlog1py',
  59. 'xlogy',
  60. 'zeta',
  61. ]
  62. Tensor = torch.Tensor
  63. entr = _add_docstr(_special.special_entr,
  64. r"""
  65. entr(input, *, out=None) -> Tensor
  66. Computes the entropy on :attr:`input` (as defined below), elementwise.
  67. .. math::
  68. \begin{align}
  69. \text{entr(x)} = \begin{cases}
  70. -x * \ln(x) & x > 0 \\
  71. 0 & x = 0.0 \\
  72. -\infty & x < 0
  73. \end{cases}
  74. \end{align}
  75. """ + """
  76. Args:
  77. input (Tensor): the input tensor.
  78. Keyword args:
  79. out (Tensor, optional): the output tensor.
  80. Example::
  81. >>> a = torch.arange(-0.5, 1, 0.5)
  82. >>> a
  83. tensor([-0.5000, 0.0000, 0.5000])
  84. >>> torch.special.entr(a)
  85. tensor([ -inf, 0.0000, 0.3466])
  86. """)
  87. psi = _add_docstr(_special.special_psi,
  88. r"""
  89. psi(input, *, out=None) -> Tensor
  90. Alias for :func:`torch.special.digamma`.
  91. """)
  92. digamma = _add_docstr(_special.special_digamma,
  93. r"""
  94. digamma(input, *, out=None) -> Tensor
  95. Computes the logarithmic derivative of the gamma function on `input`.
  96. .. math::
  97. \digamma(x) = \frac{d}{dx} \ln\left(\Gamma\left(x\right)\right) = \frac{\Gamma'(x)}{\Gamma(x)}
  98. """ + r"""
  99. Args:
  100. input (Tensor): the tensor to compute the digamma function on
  101. Keyword args:
  102. {out}
  103. .. note:: This function is similar to SciPy's `scipy.special.digamma`.
  104. .. note:: From PyTorch 1.8 onwards, the digamma function returns `-Inf` for `0`.
  105. Previously it returned `NaN` for `0`.
  106. Example::
  107. >>> a = torch.tensor([1, 0.5])
  108. >>> torch.special.digamma(a)
  109. tensor([-0.5772, -1.9635])
  110. """.format(**common_args))
  111. gammaln = _add_docstr(_special.special_gammaln,
  112. r"""
  113. gammaln(input, *, out=None) -> Tensor
  114. Computes the natural logarithm of the absolute value of the gamma function on :attr:`input`.
  115. .. math::
  116. \text{out}_{i} = \ln \Gamma(|\text{input}_{i}|)
  117. """ + """
  118. Args:
  119. {input}
  120. Keyword args:
  121. {out}
  122. Example::
  123. >>> a = torch.arange(0.5, 2, 0.5)
  124. >>> torch.special.gammaln(a)
  125. tensor([ 0.5724, 0.0000, -0.1208])
  126. """.format(**common_args))
  127. polygamma = _add_docstr(_special.special_polygamma,
  128. r"""
  129. polygamma(n, input, *, out=None) -> Tensor
  130. Computes the :math:`n^{th}` derivative of the digamma function on :attr:`input`.
  131. :math:`n \geq 0` is called the order of the polygamma function.
  132. .. math::
  133. \psi^{(n)}(x) = \frac{d^{(n)}}{dx^{(n)}} \psi(x)
  134. .. note::
  135. This function is implemented only for nonnegative integers :math:`n \geq 0`.
  136. """ + """
  137. Args:
  138. n (int): the order of the polygamma function
  139. {input}
  140. Keyword args:
  141. {out}
  142. Example::
  143. >>> a = torch.tensor([1, 0.5])
  144. >>> torch.special.polygamma(1, a)
  145. tensor([1.64493, 4.9348])
  146. >>> torch.special.polygamma(2, a)
  147. tensor([ -2.4041, -16.8288])
  148. >>> torch.special.polygamma(3, a)
  149. tensor([ 6.4939, 97.4091])
  150. >>> torch.special.polygamma(4, a)
  151. tensor([ -24.8863, -771.4742])
  152. """.format(**common_args))
  153. erf = _add_docstr(_special.special_erf,
  154. r"""
  155. erf(input, *, out=None) -> Tensor
  156. Computes the error function of :attr:`input`. The error function is defined as follows:
  157. .. math::
  158. \mathrm{erf}(x) = \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt
  159. """ + r"""
  160. Args:
  161. {input}
  162. Keyword args:
  163. {out}
  164. Example::
  165. >>> torch.special.erf(torch.tensor([0, -1., 10.]))
  166. tensor([ 0.0000, -0.8427, 1.0000])
  167. """.format(**common_args))
  168. erfc = _add_docstr(_special.special_erfc,
  169. r"""
  170. erfc(input, *, out=None) -> Tensor
  171. Computes the complementary error function of :attr:`input`.
  172. The complementary error function is defined as follows:
  173. .. math::
  174. \mathrm{erfc}(x) = 1 - \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt
  175. """ + r"""
  176. Args:
  177. {input}
  178. Keyword args:
  179. {out}
  180. Example::
  181. >>> torch.special.erfc(torch.tensor([0, -1., 10.]))
  182. tensor([ 1.0000, 1.8427, 0.0000])
  183. """.format(**common_args))
  184. erfcx = _add_docstr(_special.special_erfcx,
  185. r"""
  186. erfcx(input, *, out=None) -> Tensor
  187. Computes the scaled complementary error function for each element of :attr:`input`.
  188. The scaled complementary error function is defined as follows:
  189. .. math::
  190. \mathrm{erfcx}(x) = e^{x^2} \mathrm{erfc}(x)
  191. """ + r"""
  192. """ + r"""
  193. Args:
  194. {input}
  195. Keyword args:
  196. {out}
  197. Example::
  198. >>> torch.special.erfcx(torch.tensor([0, -1., 10.]))
  199. tensor([ 1.0000, 5.0090, 0.0561])
  200. """.format(**common_args))
  201. erfinv = _add_docstr(_special.special_erfinv,
  202. r"""
  203. erfinv(input, *, out=None) -> Tensor
  204. Computes the inverse error function of :attr:`input`.
  205. The inverse error function is defined in the range :math:`(-1, 1)` as:
  206. .. math::
  207. \mathrm{erfinv}(\mathrm{erf}(x)) = x
  208. """ + r"""
  209. Args:
  210. {input}
  211. Keyword args:
  212. {out}
  213. Example::
  214. >>> torch.special.erfinv(torch.tensor([0, 0.5, -1.]))
  215. tensor([ 0.0000, 0.4769, -inf])
  216. """.format(**common_args))
  217. logit = _add_docstr(_special.special_logit,
  218. r"""
  219. logit(input, eps=None, *, out=None) -> Tensor
  220. Returns a new tensor with the logit of the elements of :attr:`input`.
  221. :attr:`input` is clamped to [eps, 1 - eps] when eps is not None.
  222. When eps is None and :attr:`input` < 0 or :attr:`input` > 1, the function will yields NaN.
  223. .. math::
  224. \begin{align}
  225. y_{i} &= \ln(\frac{z_{i}}{1 - z_{i}}) \\
  226. z_{i} &= \begin{cases}
  227. x_{i} & \text{if eps is None} \\
  228. \text{eps} & \text{if } x_{i} < \text{eps} \\
  229. x_{i} & \text{if } \text{eps} \leq x_{i} \leq 1 - \text{eps} \\
  230. 1 - \text{eps} & \text{if } x_{i} > 1 - \text{eps}
  231. \end{cases}
  232. \end{align}
  233. """ + r"""
  234. Args:
  235. {input}
  236. eps (float, optional): the epsilon for input clamp bound. Default: ``None``
  237. Keyword args:
  238. {out}
  239. Example::
  240. >>> a = torch.rand(5)
  241. >>> a
  242. tensor([0.2796, 0.9331, 0.6486, 0.1523, 0.6516])
  243. >>> torch.special.logit(a, eps=1e-6)
  244. tensor([-0.9466, 2.6352, 0.6131, -1.7169, 0.6261])
  245. """.format(**common_args))
  246. logsumexp = _add_docstr(_special.special_logsumexp,
  247. r"""
  248. logsumexp(input, dim, keepdim=False, *, out=None)
  249. Alias for :func:`torch.logsumexp`.
  250. """.format(**multi_dim_common))
  251. expit = _add_docstr(_special.special_expit,
  252. r"""
  253. expit(input, *, out=None) -> Tensor
  254. Computes the expit (also known as the logistic sigmoid function) of the elements of :attr:`input`.
  255. .. math::
  256. \text{out}_{i} = \frac{1}{1 + e^{-\text{input}_{i}}}
  257. """ + r"""
  258. Args:
  259. {input}
  260. Keyword args:
  261. {out}
  262. Example::
  263. >>> t = torch.randn(4)
  264. >>> t
  265. tensor([ 0.9213, 1.0887, -0.8858, -1.7683])
  266. >>> torch.special.expit(t)
  267. tensor([ 0.7153, 0.7481, 0.2920, 0.1458])
  268. """.format(**common_args))
  269. exp2 = _add_docstr(_special.special_exp2,
  270. r"""
  271. exp2(input, *, out=None) -> Tensor
  272. Computes the base two exponential function of :attr:`input`.
  273. .. math::
  274. y_{i} = 2^{x_{i}}
  275. """ + r"""
  276. Args:
  277. {input}
  278. Keyword args:
  279. {out}
  280. Example::
  281. >>> torch.special.exp2(torch.tensor([0, math.log2(2.), 3, 4]))
  282. tensor([ 1., 2., 8., 16.])
  283. """.format(**common_args))
  284. expm1 = _add_docstr(_special.special_expm1,
  285. r"""
  286. expm1(input, *, out=None) -> Tensor
  287. Computes the exponential of the elements minus 1
  288. of :attr:`input`.
  289. .. math::
  290. y_{i} = e^{x_{i}} - 1
  291. .. note:: This function provides greater precision than exp(x) - 1 for small values of x.
  292. """ + r"""
  293. Args:
  294. {input}
  295. Keyword args:
  296. {out}
  297. Example::
  298. >>> torch.special.expm1(torch.tensor([0, math.log(2.)]))
  299. tensor([ 0., 1.])
  300. """.format(**common_args))
  301. xlog1py = _add_docstr(_special.special_xlog1py,
  302. r"""
  303. xlog1py(input, other, *, out=None) -> Tensor
  304. Computes ``input * log1p(other)`` with the following cases.
  305. .. math::
  306. \text{out}_{i} = \begin{cases}
  307. \text{NaN} & \text{if } \text{other}_{i} = \text{NaN} \\
  308. 0 & \text{if } \text{input}_{i} = 0.0 \text{ and } \text{other}_{i} != \text{NaN} \\
  309. \text{input}_{i} * \text{log1p}(\text{other}_{i})& \text{otherwise}
  310. \end{cases}
  311. Similar to SciPy's `scipy.special.xlog1py`.
  312. """ + r"""
  313. Args:
  314. input (Number or Tensor) : Multiplier
  315. other (Number or Tensor) : Argument
  316. .. note:: At least one of :attr:`input` or :attr:`other` must be a tensor.
  317. Keyword args:
  318. {out}
  319. Example::
  320. >>> x = torch.zeros(5,)
  321. >>> y = torch.tensor([-1, 0, 1, float('inf'), float('nan')])
  322. >>> torch.special.xlog1py(x, y)
  323. tensor([0., 0., 0., 0., nan])
  324. >>> x = torch.tensor([1, 2, 3])
  325. >>> y = torch.tensor([3, 2, 1])
  326. >>> torch.special.xlog1py(x, y)
  327. tensor([1.3863, 2.1972, 2.0794])
  328. >>> torch.special.xlog1py(x, 4)
  329. tensor([1.6094, 3.2189, 4.8283])
  330. >>> torch.special.xlog1py(2, y)
  331. tensor([2.7726, 2.1972, 1.3863])
  332. """.format(**common_args))
  333. xlogy = _add_docstr(_special.special_xlogy,
  334. r"""
  335. xlogy(input, other, *, out=None) -> Tensor
  336. Computes ``input * log(other)`` with the following cases.
  337. .. math::
  338. \text{out}_{i} = \begin{cases}
  339. \text{NaN} & \text{if } \text{other}_{i} = \text{NaN} \\
  340. 0 & \text{if } \text{input}_{i} = 0.0 \\
  341. \text{input}_{i} * \log{(\text{other}_{i})} & \text{otherwise}
  342. \end{cases}
  343. Similar to SciPy's `scipy.special.xlogy`.
  344. """ + r"""
  345. Args:
  346. input (Number or Tensor) : Multiplier
  347. other (Number or Tensor) : Argument
  348. .. note:: At least one of :attr:`input` or :attr:`other` must be a tensor.
  349. Keyword args:
  350. {out}
  351. Example::
  352. >>> x = torch.zeros(5,)
  353. >>> y = torch.tensor([-1, 0, 1, float('inf'), float('nan')])
  354. >>> torch.special.xlogy(x, y)
  355. tensor([0., 0., 0., 0., nan])
  356. >>> x = torch.tensor([1, 2, 3])
  357. >>> y = torch.tensor([3, 2, 1])
  358. >>> torch.special.xlogy(x, y)
  359. tensor([1.0986, 1.3863, 0.0000])
  360. >>> torch.special.xlogy(x, 4)
  361. tensor([1.3863, 2.7726, 4.1589])
  362. >>> torch.special.xlogy(2, y)
  363. tensor([2.1972, 1.3863, 0.0000])
  364. """.format(**common_args))
  365. i0 = _add_docstr(_special.special_i0,
  366. r"""
  367. i0(input, *, out=None) -> Tensor
  368. Computes the zeroth order modified Bessel function of the first kind for each element of :attr:`input`.
  369. .. math::
  370. \text{out}_{i} = I_0(\text{input}_{i}) = \sum_{k=0}^{\infty} \frac{(\text{input}_{i}^2/4)^k}{(k!)^2}
  371. """ + r"""
  372. Args:
  373. input (Tensor): the input tensor
  374. Keyword args:
  375. {out}
  376. Example::
  377. >>> torch.i0(torch.arange(5, dtype=torch.float32))
  378. tensor([ 1.0000, 1.2661, 2.2796, 4.8808, 11.3019])
  379. """.format(**common_args))
  380. i0e = _add_docstr(_special.special_i0e,
  381. r"""
  382. i0e(input, *, out=None) -> Tensor
  383. Computes the exponentially scaled zeroth order modified Bessel function of the first kind (as defined below)
  384. for each element of :attr:`input`.
  385. .. math::
  386. \text{out}_{i} = \exp(-|x|) * i0(x) = \exp(-|x|) * \sum_{k=0}^{\infty} \frac{(\text{input}_{i}^2/4)^k}{(k!)^2}
  387. """ + r"""
  388. Args:
  389. {input}
  390. Keyword args:
  391. {out}
  392. Example::
  393. >>> torch.special.i0e(torch.arange(5, dtype=torch.float32))
  394. tensor([1.0000, 0.4658, 0.3085, 0.2430, 0.2070])
  395. """.format(**common_args))
  396. i1 = _add_docstr(_special.special_i1,
  397. r"""
  398. i1(input, *, out=None) -> Tensor
  399. Computes the first order modified Bessel function of the first kind (as defined below)
  400. for each element of :attr:`input`.
  401. .. math::
  402. \text{out}_{i} = \frac{(\text{input}_{i})}{2} * \sum_{k=0}^{\infty} \frac{(\text{input}_{i}^2/4)^k}{(k!) * (k+1)!}
  403. """ + r"""
  404. Args:
  405. {input}
  406. Keyword args:
  407. {out}
  408. Example::
  409. >>> torch.special.i1(torch.arange(5, dtype=torch.float32))
  410. tensor([0.0000, 0.5652, 1.5906, 3.9534, 9.7595])
  411. """.format(**common_args))
  412. i1e = _add_docstr(_special.special_i1e,
  413. r"""
  414. i1e(input, *, out=None) -> Tensor
  415. Computes the exponentially scaled first order modified Bessel function of the first kind (as defined below)
  416. for each element of :attr:`input`.
  417. .. math::
  418. \text{out}_{i} = \exp(-|x|) * i1(x) =
  419. \exp(-|x|) * \frac{(\text{input}_{i})}{2} * \sum_{k=0}^{\infty} \frac{(\text{input}_{i}^2/4)^k}{(k!) * (k+1)!}
  420. """ + r"""
  421. Args:
  422. {input}
  423. Keyword args:
  424. {out}
  425. Example::
  426. >>> torch.special.i1e(torch.arange(5, dtype=torch.float32))
  427. tensor([0.0000, 0.2079, 0.2153, 0.1968, 0.1788])
  428. """.format(**common_args))
  429. ndtr = _add_docstr(_special.special_ndtr,
  430. r"""
  431. ndtr(input, *, out=None) -> Tensor
  432. Computes the area under the standard Gaussian probability density function,
  433. integrated from minus infinity to :attr:`input`, elementwise.
  434. .. math::
  435. \text{ndtr}(x) = \frac{1}{\sqrt{2 \pi}}\int_{-\infty}^{x} e^{-\frac{1}{2}t^2} dt
  436. """ + r"""
  437. Args:
  438. {input}
  439. Keyword args:
  440. {out}
  441. Example::
  442. >>> torch.special.ndtr(torch.tensor([-3., -2, -1, 0, 1, 2, 3]))
  443. tensor([0.0013, 0.0228, 0.1587, 0.5000, 0.8413, 0.9772, 0.9987])
  444. """.format(**common_args))
  445. ndtri = _add_docstr(_special.special_ndtri,
  446. r"""
  447. ndtri(input, *, out=None) -> Tensor
  448. Computes the argument, x, for which the area under the Gaussian probability density function
  449. (integrated from minus infinity to x) is equal to :attr:`input`, elementwise.
  450. .. math::
  451. \text{ndtri}(p) = \sqrt{2}\text{erf}^{-1}(2p - 1)
  452. .. note::
  453. Also known as quantile function for Normal Distribution.
  454. """ + r"""
  455. Args:
  456. {input}
  457. Keyword args:
  458. {out}
  459. Example::
  460. >>> torch.special.ndtri(torch.tensor([0, 0.25, 0.5, 0.75, 1]))
  461. tensor([ -inf, -0.6745, 0.0000, 0.6745, inf])
  462. """.format(**common_args))
  463. log_ndtr = _add_docstr(_special.special_log_ndtr,
  464. r"""
  465. log_ndtr(input, *, out=None) -> Tensor
  466. Computes the log of the area under the standard Gaussian probability density function,
  467. integrated from minus infinity to :attr:`input`, elementwise.
  468. .. math::
  469. \text{log\_ndtr}(x) = \log\left(\frac{1}{\sqrt{2 \pi}}\int_{-\infty}^{x} e^{-\frac{1}{2}t^2} dt \right)
  470. """ + r"""
  471. Args:
  472. {input}
  473. Keyword args:
  474. {out}
  475. Example::
  476. >>> torch.special.log_ndtr(torch.tensor([-3., -2, -1, 0, 1, 2, 3]))
  477. tensor([-6.6077 -3.7832 -1.841 -0.6931 -0.1728 -0.023 -0.0014])
  478. """.format(**common_args))
  479. log1p = _add_docstr(_special.special_log1p,
  480. r"""
  481. log1p(input, *, out=None) -> Tensor
  482. Alias for :func:`torch.log1p`.
  483. """)
  484. sinc = _add_docstr(_special.special_sinc,
  485. r"""
  486. sinc(input, *, out=None) -> Tensor
  487. Computes the normalized sinc of :attr:`input.`
  488. .. math::
  489. \text{out}_{i} =
  490. \begin{cases}
  491. 1, & \text{if}\ \text{input}_{i}=0 \\
  492. \sin(\pi \text{input}_{i}) / (\pi \text{input}_{i}), & \text{otherwise}
  493. \end{cases}
  494. """ + r"""
  495. Args:
  496. {input}
  497. Keyword args:
  498. {out}
  499. Example::
  500. >>> t = torch.randn(4)
  501. >>> t
  502. tensor([ 0.2252, -0.2948, 1.0267, -1.1566])
  503. >>> torch.special.sinc(t)
  504. tensor([ 0.9186, 0.8631, -0.0259, -0.1300])
  505. """.format(**common_args))
  506. round = _add_docstr(_special.special_round,
  507. r"""
  508. round(input, *, out=None) -> Tensor
  509. Alias for :func:`torch.round`.
  510. """)
  511. softmax = _add_docstr(_special.special_softmax,
  512. r"""
  513. softmax(input, dim, *, dtype=None) -> Tensor
  514. Computes the softmax function.
  515. Softmax is defined as:
  516. :math:`\text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}`
  517. It is applied to all slices along dim, and will re-scale them so that the elements
  518. lie in the range `[0, 1]` and sum to 1.
  519. Args:
  520. input (Tensor): input
  521. dim (int): A dimension along which softmax will be computed.
  522. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  523. If specified, the input tensor is cast to :attr:`dtype` before the operation
  524. is performed. This is useful for preventing data type overflows. Default: None.
  525. Examples::
  526. >>> t = torch.ones(2, 2)
  527. >>> torch.special.softmax(t, 0)
  528. tensor([[0.5000, 0.5000],
  529. [0.5000, 0.5000]])
  530. """)
  531. log_softmax = _add_docstr(_special.special_log_softmax,
  532. r"""
  533. log_softmax(input, dim, *, dtype=None) -> Tensor
  534. Computes softmax followed by a logarithm.
  535. While mathematically equivalent to log(softmax(x)), doing these two
  536. operations separately is slower and numerically unstable. This function
  537. is computed as:
  538. .. math::
  539. \text{log\_softmax}(x_{i}) = \log\left(\frac{\exp(x_i) }{ \sum_j \exp(x_j)} \right)
  540. """ + r"""
  541. Args:
  542. input (Tensor): input
  543. dim (int): A dimension along which log_softmax will be computed.
  544. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  545. If specified, the input tensor is cast to :attr:`dtype` before the operation
  546. is performed. This is useful for preventing data type overflows. Default: None.
  547. Example::
  548. >>> t = torch.ones(2, 2)
  549. >>> torch.special.log_softmax(t, 0)
  550. tensor([[-0.6931, -0.6931],
  551. [-0.6931, -0.6931]])
  552. """)
  553. zeta = _add_docstr(_special.special_zeta,
  554. r"""
  555. zeta(input, other, *, out=None) -> Tensor
  556. Computes the Hurwitz zeta function, elementwise.
  557. .. math::
  558. \zeta(x, q) = \sum_{k=0}^{\infty} \frac{1}{(k + q)^x}
  559. """ + r"""
  560. Args:
  561. input (Tensor): the input tensor corresponding to `x`.
  562. other (Tensor): the input tensor corresponding to `q`.
  563. .. note::
  564. The Riemann zeta function corresponds to the case when `q = 1`
  565. Keyword args:
  566. {out}
  567. Example::
  568. >>> x = torch.tensor([2., 4.])
  569. >>> torch.special.zeta(x, 1)
  570. tensor([1.6449, 1.0823])
  571. >>> torch.special.zeta(x, torch.tensor([1., 2.]))
  572. tensor([1.6449, 0.0823])
  573. >>> torch.special.zeta(2, torch.tensor([1., 2.]))
  574. tensor([1.6449, 0.6449])
  575. """.format(**common_args))
  576. multigammaln = _add_docstr(_special.special_multigammaln,
  577. r"""
  578. multigammaln(input, p, *, out=None) -> Tensor
  579. Computes the `multivariate log-gamma function
  580. <https://en.wikipedia.org/wiki/Multivariate_gamma_function>`_ with dimension
  581. :math:`p` element-wise, given by
  582. .. math::
  583. \log(\Gamma_{p}(a)) = C + \displaystyle \sum_{i=1}^{p} \log\left(\Gamma\left(a - \frac{i - 1}{2}\right)\right)
  584. where :math:`C = \log(\pi) \cdot \frac{p (p - 1)}{4}` and :math:`\Gamma(-)` is the Gamma function.
  585. All elements must be greater than :math:`\frac{p - 1}{2}`, otherwise the behavior is undefiend.
  586. """ + """
  587. Args:
  588. input (Tensor): the tensor to compute the multivariate log-gamma function
  589. p (int): the number of dimensions
  590. Keyword args:
  591. {out}
  592. Example::
  593. >>> a = torch.empty(2, 3).uniform_(1, 2)
  594. >>> a
  595. tensor([[1.6835, 1.8474, 1.1929],
  596. [1.0475, 1.7162, 1.4180]])
  597. >>> torch.special.multigammaln(a, 2)
  598. tensor([[0.3928, 0.4007, 0.7586],
  599. [1.0311, 0.3901, 0.5049]])
  600. """.format(**common_args))
  601. gammainc = _add_docstr(_special.special_gammainc,
  602. r"""
  603. gammainc(input, other, *, out=None) -> Tensor
  604. Computes the regularized lower incomplete gamma function:
  605. .. math::
  606. \text{out}_{i} = \frac{1}{\Gamma(\text{input}_i)} \int_0^{\text{other}_i} t^{\text{input}_i-1} e^{-t} dt
  607. where both :math:`\text{input}_i` and :math:`\text{other}_i` are weakly positive
  608. and at least one is strictly positive.
  609. If both are zero or either is negative then :math:`\text{out}_i=\text{nan}`.
  610. :math:`\Gamma(\cdot)` in the equation above is the gamma function,
  611. .. math::
  612. \Gamma(\text{input}_i) = \int_0^\infty t^{(\text{input}_i-1)} e^{-t} dt.
  613. See :func:`torch.special.gammaincc` and :func:`torch.special.gammaln` for related functions.
  614. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`
  615. and float inputs.
  616. .. note::
  617. The backward pass with respect to :attr:`input` is not yet supported.
  618. Please open an issue on PyTorch's Github to request it.
  619. """ + r"""
  620. Args:
  621. input (Tensor): the first non-negative input tensor
  622. other (Tensor): the second non-negative input tensor
  623. Keyword args:
  624. {out}
  625. Example::
  626. >>> a1 = torch.tensor([4.0])
  627. >>> a2 = torch.tensor([3.0, 4.0, 5.0])
  628. >>> a = torch.special.gammaincc(a1, a2)
  629. tensor([0.3528, 0.5665, 0.7350])
  630. tensor([0.3528, 0.5665, 0.7350])
  631. >>> b = torch.special.gammainc(a1, a2) + torch.special.gammaincc(a1, a2)
  632. tensor([1., 1., 1.])
  633. """.format(**common_args))
  634. gammaincc = _add_docstr(_special.special_gammaincc,
  635. r"""
  636. gammaincc(input, other, *, out=None) -> Tensor
  637. Computes the regularized upper incomplete gamma function:
  638. .. math::
  639. \text{out}_{i} = \frac{1}{\Gamma(\text{input}_i)} \int_{\text{other}_i}^{\infty} t^{\text{input}_i-1} e^{-t} dt
  640. where both :math:`\text{input}_i` and :math:`\text{other}_i` are weakly positive
  641. and at least one is strictly positive.
  642. If both are zero or either is negative then :math:`\text{out}_i=\text{nan}`.
  643. :math:`\Gamma(\cdot)` in the equation above is the gamma function,
  644. .. math::
  645. \Gamma(\text{input}_i) = \int_0^\infty t^{(\text{input}_i-1)} e^{-t} dt.
  646. See :func:`torch.special.gammainc` and :func:`torch.special.gammaln` for related functions.
  647. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`
  648. and float inputs.
  649. .. note::
  650. The backward pass with respect to :attr:`input` is not yet supported.
  651. Please open an issue on PyTorch's Github to request it.
  652. """ + r"""
  653. Args:
  654. input (Tensor): the first non-negative input tensor
  655. other (Tensor): the second non-negative input tensor
  656. Keyword args:
  657. {out}
  658. Example::
  659. >>> a1 = torch.tensor([4.0])
  660. >>> a2 = torch.tensor([3.0, 4.0, 5.0])
  661. >>> a = torch.special.gammaincc(a1, a2)
  662. tensor([0.6472, 0.4335, 0.2650])
  663. >>> b = torch.special.gammainc(a1, a2) + torch.special.gammaincc(a1, a2)
  664. tensor([1., 1., 1.])
  665. """.format(**common_args))
  666. airy_ai = _add_docstr(_special.special_airy_ai,
  667. r"""
  668. airy_ai(input, *, out=None) -> Tensor
  669. Airy function :math:`\text{Ai}\left(\text{input}\right)`.
  670. """ + r"""
  671. Args:
  672. {input}
  673. Keyword args:
  674. {out}
  675. """.format(**common_args))
  676. bessel_j0 = _add_docstr(_special.special_bessel_j0,
  677. r"""
  678. bessel_j0(input, *, out=None) -> Tensor
  679. Bessel function of the first kind of order :math:`0`.
  680. """ + r"""
  681. Args:
  682. {input}
  683. Keyword args:
  684. {out}
  685. """.format(**common_args))
  686. bessel_j1 = _add_docstr(_special.special_bessel_j1,
  687. r"""
  688. bessel_j1(input, *, out=None) -> Tensor
  689. Bessel function of the first kind of order :math:`1`.
  690. """ + r"""
  691. Args:
  692. {input}
  693. Keyword args:
  694. {out}
  695. """.format(**common_args))
  696. bessel_y0 = _add_docstr(_special.special_bessel_y0,
  697. r"""
  698. bessel_y0(input, *, out=None) -> Tensor
  699. Bessel function of the second kind of order :math:`0`.
  700. """ + r"""
  701. Args:
  702. {input}
  703. Keyword args:
  704. {out}
  705. """.format(**common_args))
  706. bessel_y1 = _add_docstr(_special.special_bessel_y1,
  707. r"""
  708. bessel_y1(input, *, out=None) -> Tensor
  709. Bessel function of the second kind of order :math:`1`.
  710. """ + r"""
  711. Args:
  712. {input}
  713. Keyword args:
  714. {out}
  715. """.format(**common_args))
  716. chebyshev_polynomial_t = _add_docstr(_special.special_chebyshev_polynomial_t,
  717. r"""
  718. chebyshev_polynomial_t(input, n, *, out=None) -> Tensor
  719. Chebyshev polynomial of the first kind :math:`T_{n}(\text{input})`.
  720. If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}`
  721. is returned. If :math:`n < 6` or :math:`|\text{input}| > 1` the recursion:
  722. .. math::
  723. T_{n + 1}(\text{input}) = 2 \times \text{input} \times T_{n}(\text{input}) - T_{n - 1}(\text{input})
  724. is evaluated. Otherwise, the explicit trigonometric formula:
  725. .. math::
  726. T_{n}(\text{input}) = \text{cos}(n \times \text{arccos}(x))
  727. is evaluated.
  728. """ + r"""
  729. Args:
  730. {input}
  731. n (Tensor): Degree of the polynomial.
  732. Keyword args:
  733. {out}
  734. """.format(**common_args))
  735. chebyshev_polynomial_u = _add_docstr(_special.special_chebyshev_polynomial_u,
  736. r"""
  737. chebyshev_polynomial_t(input, n, *, out=None) -> Tensor
  738. Chebyshev polynomial of the second kind :math:`U_{n}(\text{input})`.
  739. If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`,
  740. :math:`2 \times \text{input}` is returned. If :math:`n < 6` or
  741. :math:`|\text{input}| > 1`, the recursion:
  742. .. math::
  743. T_{n + 1}(\text{input}) = 2 \times \text{input} \times T_{n}(\text{input}) - T_{n - 1}(\text{input})
  744. is evaluated. Otherwise, the explicit trigonometric formula:
  745. .. math::
  746. \frac{\text{sin}((n + 1) \times \text{arccos}(\text{input}))}{\text{sin}(\text{arccos}(\text{input}))}
  747. is evaluated.
  748. """ + r"""
  749. Args:
  750. {input}
  751. n (Tensor): Degree of the polynomial.
  752. Keyword args:
  753. {out}
  754. """.format(**common_args))
  755. chebyshev_polynomial_v = _add_docstr(_special.special_chebyshev_polynomial_v,
  756. r"""
  757. chebyshev_polynomial_v(input, n, *, out=None) -> Tensor
  758. Chebyshev polynomial of the third kind :math:`V_{n}^{\ast}(\text{input})`.
  759. """ + r"""
  760. Args:
  761. {input}
  762. n (Tensor): Degree of the polynomial.
  763. Keyword args:
  764. {out}
  765. """.format(**common_args))
  766. chebyshev_polynomial_w = _add_docstr(_special.special_chebyshev_polynomial_w,
  767. r"""
  768. chebyshev_polynomial_w(input, n, *, out=None) -> Tensor
  769. Chebyshev polynomial of the fourth kind :math:`W_{n}^{\ast}(\text{input})`.
  770. """ + r"""
  771. Args:
  772. {input}
  773. n (Tensor): Degree of the polynomial.
  774. Keyword args:
  775. {out}
  776. """.format(**common_args))
  777. hermite_polynomial_h = _add_docstr(_special.special_hermite_polynomial_h,
  778. r"""
  779. hermite_polynomial_h(input, n, *, out=None) -> Tensor
  780. Physicist’s Hermite polynomial :math:`H_{n}(\text{input})`.
  781. If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}`
  782. is returned. Otherwise, the recursion:
  783. .. math::
  784. H_{n + 1}(\text{input}) = 2 \times \text{input} \times H_{n}(\text{input}) - H_{n - 1}(\text{input})
  785. is evaluated.
  786. """ + r"""
  787. Args:
  788. {input}
  789. n (Tensor): Degree of the polynomial.
  790. Keyword args:
  791. {out}
  792. """.format(**common_args))
  793. hermite_polynomial_he = _add_docstr(_special.special_hermite_polynomial_he,
  794. r"""
  795. hermite_polynomial_he(input, n, *, out=None) -> Tensor
  796. Probabilist’s Hermite polynomial :math:`He_{n}(\text{input})`.
  797. If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}`
  798. is returned. Otherwise, the recursion:
  799. .. math::
  800. He_{n + 1}(\text{input}) = 2 \times \text{input} \times He_{n}(\text{input}) - He_{n - 1}(\text{input})
  801. is evaluated.
  802. """ + r"""
  803. Args:
  804. {input}
  805. n (Tensor): Degree of the polynomial.
  806. Keyword args:
  807. {out}
  808. """.format(**common_args))
  809. laguerre_polynomial_l = _add_docstr(_special.special_laguerre_polynomial_l,
  810. r"""
  811. laguerre_polynomial_l(input, n, *, out=None) -> Tensor
  812. Laguerre polynomial :math:`L_{n}(\text{input})`.
  813. If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}`
  814. is returned. Otherwise, the recursion:
  815. .. math::
  816. L_{n + 1}(\text{input}) = 2 \times \text{input} \times L_{n}(\text{input}) - L_{n - 1}(\text{input})
  817. is evaluated.
  818. """ + r"""
  819. Args:
  820. {input}
  821. n (Tensor): Degree of the polynomial.
  822. Keyword args:
  823. {out}
  824. """.format(**common_args))
  825. legendre_polynomial_p = _add_docstr(_special.special_legendre_polynomial_p,
  826. r"""
  827. legendre_polynomial_p(input, n, *, out=None) -> Tensor
  828. Legendre polynomial :math:`P_{n}(\text{input})`.
  829. If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}`
  830. is returned. Otherwise, the recursion:
  831. .. math::
  832. P_{n + 1}(\text{input}) = 2 \times \text{input} \times P_{n}(\text{input}) - P_{n - 1}(\text{input})
  833. is evaluated.
  834. """ + r"""
  835. Args:
  836. {input}
  837. n (Tensor): Degree of the polynomial.
  838. Keyword args:
  839. {out}
  840. """.format(**common_args))
  841. modified_bessel_i0 = _add_docstr(_special.special_modified_bessel_i0,
  842. r"""
  843. modified_bessel_i0(input, *, out=None) -> Tensor
  844. Modified Bessel function of the first kind of order :math:`0`.
  845. """ + r"""
  846. Args:
  847. {input}
  848. Keyword args:
  849. {out}
  850. """.format(**common_args))
  851. modified_bessel_i1 = _add_docstr(_special.special_modified_bessel_i1,
  852. r"""
  853. modified_bessel_i1(input, *, out=None) -> Tensor
  854. Modified Bessel function of the first kind of order :math:`1`.
  855. """ + r"""
  856. Args:
  857. {input}
  858. Keyword args:
  859. {out}
  860. """.format(**common_args))
  861. modified_bessel_k0 = _add_docstr(_special.special_modified_bessel_k0,
  862. r"""
  863. modified_bessel_k0(input, *, out=None) -> Tensor
  864. Modified Bessel function of the second kind of order :math:`0`.
  865. """ + r"""
  866. Args:
  867. {input}
  868. Keyword args:
  869. {out}
  870. """.format(**common_args))
  871. modified_bessel_k1 = _add_docstr(_special.special_modified_bessel_k1,
  872. r"""
  873. modified_bessel_k1(input, *, out=None) -> Tensor
  874. Modified Bessel function of the second kind of order :math:`1`.
  875. """ + r"""
  876. Args:
  877. {input}
  878. Keyword args:
  879. {out}
  880. """.format(**common_args))
  881. scaled_modified_bessel_k0 = _add_docstr(_special.special_scaled_modified_bessel_k0,
  882. r"""
  883. scaled_modified_bessel_k0(input, *, out=None) -> Tensor
  884. Scaled modified Bessel function of the second kind of order :math:`0`.
  885. """ + r"""
  886. Args:
  887. {input}
  888. Keyword args:
  889. {out}
  890. """.format(**common_args))
  891. scaled_modified_bessel_k1 = _add_docstr(_special.special_scaled_modified_bessel_k1,
  892. r"""
  893. scaled_modified_bessel_k1(input, *, out=None) -> Tensor
  894. Scaled modified Bessel function of the second kind of order :math:`1`.
  895. """ + r"""
  896. Args:
  897. {input}
  898. Keyword args:
  899. {out}
  900. """.format(**common_args))
  901. shifted_chebyshev_polynomial_t = _add_docstr(_special.special_shifted_chebyshev_polynomial_t,
  902. r"""
  903. shifted_chebyshev_polynomial_t(input, n, *, out=None) -> Tensor
  904. Chebyshev polynomial of the first kind :math:`T_{n}^{\ast}(\text{input})`.
  905. """ + r"""
  906. Args:
  907. {input}
  908. n (Tensor): Degree of the polynomial.
  909. Keyword args:
  910. {out}
  911. """.format(**common_args))
  912. shifted_chebyshev_polynomial_u = _add_docstr(_special.special_shifted_chebyshev_polynomial_u,
  913. r"""
  914. shifted_chebyshev_polynomial_u(input, n, *, out=None) -> Tensor
  915. Chebyshev polynomial of the second kind :math:`U_{n}^{\ast}(\text{input})`.
  916. """ + r"""
  917. Args:
  918. {input}
  919. n (Tensor): Degree of the polynomial.
  920. Keyword args:
  921. {out}
  922. """.format(**common_args))
  923. shifted_chebyshev_polynomial_v = _add_docstr(_special.special_shifted_chebyshev_polynomial_v,
  924. r"""
  925. shifted_chebyshev_polynomial_v(input, n, *, out=None) -> Tensor
  926. Chebyshev polynomial of the third kind :math:`V_{n}^{\ast}(\text{input})`.
  927. """ + r"""
  928. Args:
  929. {input}
  930. n (Tensor): Degree of the polynomial.
  931. Keyword args:
  932. {out}
  933. """.format(**common_args))
  934. shifted_chebyshev_polynomial_w = _add_docstr(_special.special_shifted_chebyshev_polynomial_w,
  935. r"""
  936. shifted_chebyshev_polynomial_w(input, n, *, out=None) -> Tensor
  937. Chebyshev polynomial of the fourth kind :math:`W_{n}^{\ast}(\text{input})`.
  938. """ + r"""
  939. Args:
  940. {input}
  941. n (Tensor): Degree of the polynomial.
  942. Keyword args:
  943. {out}
  944. """.format(**common_args))
  945. spherical_bessel_j0 = _add_docstr(_special.special_spherical_bessel_j0,
  946. r"""
  947. spherical_bessel_j0(input, *, out=None) -> Tensor
  948. Spherical Bessel function of the first kind of order :math:`0`.
  949. """ + r"""
  950. Args:
  951. {input}
  952. Keyword args:
  953. {out}
  954. """.format(**common_args))