loss.py 88 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761
  1. import warnings
  2. from .distance import PairwiseDistance
  3. from .module import Module
  4. from .. import functional as F
  5. from .. import _reduction as _Reduction
  6. from torch import Tensor
  7. from typing import Callable, Optional
  8. __all__ = ['L1Loss', 'NLLLoss', 'NLLLoss2d', 'PoissonNLLLoss', 'GaussianNLLLoss', 'KLDivLoss',
  9. 'MSELoss', 'BCELoss', 'BCEWithLogitsLoss', 'HingeEmbeddingLoss', 'MultiLabelMarginLoss',
  10. 'SmoothL1Loss', 'HuberLoss', 'SoftMarginLoss', 'CrossEntropyLoss', 'MultiLabelSoftMarginLoss',
  11. 'CosineEmbeddingLoss', 'MarginRankingLoss', 'MultiMarginLoss', 'TripletMarginLoss',
  12. 'TripletMarginWithDistanceLoss', 'CTCLoss']
  13. class _Loss(Module):
  14. reduction: str
  15. def __init__(self, size_average=None, reduce=None, reduction: str = 'mean') -> None:
  16. super().__init__()
  17. if size_average is not None or reduce is not None:
  18. self.reduction: str = _Reduction.legacy_get_string(size_average, reduce)
  19. else:
  20. self.reduction = reduction
  21. class _WeightedLoss(_Loss):
  22. def __init__(self, weight: Optional[Tensor] = None, size_average=None, reduce=None, reduction: str = 'mean') -> None:
  23. super().__init__(size_average, reduce, reduction)
  24. self.register_buffer('weight', weight)
  25. self.weight: Optional[Tensor]
  26. class L1Loss(_Loss):
  27. r"""Creates a criterion that measures the mean absolute error (MAE) between each element in
  28. the input :math:`x` and target :math:`y`.
  29. The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as:
  30. .. math::
  31. \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
  32. l_n = \left| x_n - y_n \right|,
  33. where :math:`N` is the batch size. If :attr:`reduction` is not ``'none'``
  34. (default ``'mean'``), then:
  35. .. math::
  36. \ell(x, y) =
  37. \begin{cases}
  38. \operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
  39. \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
  40. \end{cases}
  41. :math:`x` and :math:`y` are tensors of arbitrary shapes with a total
  42. of :math:`n` elements each.
  43. The sum operation still operates over all the elements, and divides by :math:`n`.
  44. The division by :math:`n` can be avoided if one sets ``reduction = 'sum'``.
  45. Supports real-valued and complex-valued inputs.
  46. Args:
  47. size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
  48. the losses are averaged over each loss element in the batch. Note that for
  49. some losses, there are multiple elements per sample. If the field :attr:`size_average`
  50. is set to ``False``, the losses are instead summed for each minibatch. Ignored
  51. when :attr:`reduce` is ``False``. Default: ``True``
  52. reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
  53. losses are averaged or summed over observations for each minibatch depending
  54. on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
  55. batch element instead and ignores :attr:`size_average`. Default: ``True``
  56. reduction (str, optional): Specifies the reduction to apply to the output:
  57. ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
  58. ``'mean'``: the sum of the output will be divided by the number of
  59. elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
  60. and :attr:`reduce` are in the process of being deprecated, and in the meantime,
  61. specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
  62. Shape:
  63. - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
  64. - Target: :math:`(*)`, same shape as the input.
  65. - Output: scalar. If :attr:`reduction` is ``'none'``, then
  66. :math:`(*)`, same shape as the input.
  67. Examples::
  68. >>> loss = nn.L1Loss()
  69. >>> input = torch.randn(3, 5, requires_grad=True)
  70. >>> target = torch.randn(3, 5)
  71. >>> output = loss(input, target)
  72. >>> output.backward()
  73. """
  74. __constants__ = ['reduction']
  75. def __init__(self, size_average=None, reduce=None, reduction: str = 'mean') -> None:
  76. super().__init__(size_average, reduce, reduction)
  77. def forward(self, input: Tensor, target: Tensor) -> Tensor:
  78. return F.l1_loss(input, target, reduction=self.reduction)
  79. class NLLLoss(_WeightedLoss):
  80. r"""The negative log likelihood loss. It is useful to train a classification
  81. problem with `C` classes.
  82. If provided, the optional argument :attr:`weight` should be a 1D Tensor assigning
  83. weight to each of the classes. This is particularly useful when you have an
  84. unbalanced training set.
  85. The `input` given through a forward call is expected to contain
  86. log-probabilities of each class. `input` has to be a Tensor of size either
  87. :math:`(minibatch, C)` or :math:`(minibatch, C, d_1, d_2, ..., d_K)`
  88. with :math:`K \geq 1` for the `K`-dimensional case. The latter is useful for
  89. higher dimension inputs, such as computing NLL loss per-pixel for 2D images.
  90. Obtaining log-probabilities in a neural network is easily achieved by
  91. adding a `LogSoftmax` layer in the last layer of your network.
  92. You may use `CrossEntropyLoss` instead, if you prefer not to add an extra
  93. layer.
  94. The `target` that this loss expects should be a class index in the range :math:`[0, C-1]`
  95. where `C = number of classes`; if `ignore_index` is specified, this loss also accepts
  96. this class index (this index may not necessarily be in the class range).
  97. The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as:
  98. .. math::
  99. \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
  100. l_n = - w_{y_n} x_{n,y_n}, \quad
  101. w_{c} = \text{weight}[c] \cdot \mathbb{1}\{c \not= \text{ignore\_index}\},
  102. where :math:`x` is the input, :math:`y` is the target, :math:`w` is the weight, and
  103. :math:`N` is the batch size. If :attr:`reduction` is not ``'none'``
  104. (default ``'mean'``), then
  105. .. math::
  106. \ell(x, y) = \begin{cases}
  107. \sum_{n=1}^N \frac{1}{\sum_{n=1}^N w_{y_n}} l_n, &
  108. \text{if reduction} = \text{`mean';}\\
  109. \sum_{n=1}^N l_n, &
  110. \text{if reduction} = \text{`sum'.}
  111. \end{cases}
  112. Args:
  113. weight (Tensor, optional): a manual rescaling weight given to each
  114. class. If given, it has to be a Tensor of size `C`. Otherwise, it is
  115. treated as if having all ones.
  116. size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
  117. the losses are averaged over each loss element in the batch. Note that for
  118. some losses, there are multiple elements per sample. If the field :attr:`size_average`
  119. is set to ``False``, the losses are instead summed for each minibatch. Ignored
  120. when :attr:`reduce` is ``False``. Default: ``None``
  121. ignore_index (int, optional): Specifies a target value that is ignored
  122. and does not contribute to the input gradient. When
  123. :attr:`size_average` is ``True``, the loss is averaged over
  124. non-ignored targets.
  125. reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
  126. losses are averaged or summed over observations for each minibatch depending
  127. on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
  128. batch element instead and ignores :attr:`size_average`. Default: ``None``
  129. reduction (str, optional): Specifies the reduction to apply to the output:
  130. ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will
  131. be applied, ``'mean'``: the weighted mean of the output is taken,
  132. ``'sum'``: the output will be summed. Note: :attr:`size_average`
  133. and :attr:`reduce` are in the process of being deprecated, and in
  134. the meantime, specifying either of those two args will override
  135. :attr:`reduction`. Default: ``'mean'``
  136. Shape:
  137. - Input: :math:`(N, C)` or :math:`(C)`, where `C = number of classes`, or
  138. :math:`(N, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1`
  139. in the case of `K`-dimensional loss.
  140. - Target: :math:`(N)` or :math:`()`, where each value is
  141. :math:`0 \leq \text{targets}[i] \leq C-1`, or
  142. :math:`(N, d_1, d_2, ..., d_K)` with :math:`K \geq 1` in the case of
  143. K-dimensional loss.
  144. - Output: If :attr:`reduction` is ``'none'``, shape :math:`(N)` or
  145. :math:`(N, d_1, d_2, ..., d_K)` with :math:`K \geq 1` in the case of K-dimensional loss.
  146. Otherwise, scalar.
  147. Examples::
  148. >>> m = nn.LogSoftmax(dim=1)
  149. >>> loss = nn.NLLLoss()
  150. >>> # input is of size N x C = 3 x 5
  151. >>> input = torch.randn(3, 5, requires_grad=True)
  152. >>> # each element in target has to have 0 <= value < C
  153. >>> target = torch.tensor([1, 0, 4])
  154. >>> output = loss(m(input), target)
  155. >>> output.backward()
  156. >>>
  157. >>>
  158. >>> # 2D loss example (used, for example, with image inputs)
  159. >>> N, C = 5, 4
  160. >>> loss = nn.NLLLoss()
  161. >>> # input is of size N x C x height x width
  162. >>> data = torch.randn(N, 16, 10, 10)
  163. >>> conv = nn.Conv2d(16, C, (3, 3))
  164. >>> m = nn.LogSoftmax(dim=1)
  165. >>> # each element in target has to have 0 <= value < C
  166. >>> target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)
  167. >>> output = loss(m(conv(data)), target)
  168. >>> output.backward()
  169. """
  170. __constants__ = ['ignore_index', 'reduction']
  171. ignore_index: int
  172. def __init__(self, weight: Optional[Tensor] = None, size_average=None, ignore_index: int = -100,
  173. reduce=None, reduction: str = 'mean') -> None:
  174. super().__init__(weight, size_average, reduce, reduction)
  175. self.ignore_index = ignore_index
  176. def forward(self, input: Tensor, target: Tensor) -> Tensor:
  177. return F.nll_loss(input, target, weight=self.weight, ignore_index=self.ignore_index, reduction=self.reduction)
  178. class NLLLoss2d(NLLLoss):
  179. def __init__(self, weight: Optional[Tensor] = None, size_average=None, ignore_index: int = -100,
  180. reduce=None, reduction: str = 'mean') -> None:
  181. warnings.warn("NLLLoss2d has been deprecated. "
  182. "Please use NLLLoss instead as a drop-in replacement and see "
  183. "https://pytorch.org/docs/master/nn.html#torch.nn.NLLLoss for more details.")
  184. super().__init__(weight, size_average, ignore_index, reduce, reduction)
  185. class PoissonNLLLoss(_Loss):
  186. r"""Negative log likelihood loss with Poisson distribution of target.
  187. The loss can be described as:
  188. .. math::
  189. \text{target} \sim \mathrm{Poisson}(\text{input})
  190. \text{loss}(\text{input}, \text{target}) = \text{input} - \text{target} * \log(\text{input})
  191. + \log(\text{target!})
  192. The last term can be omitted or approximated with Stirling formula. The
  193. approximation is used for target values more than 1. For targets less or
  194. equal to 1 zeros are added to the loss.
  195. Args:
  196. log_input (bool, optional): if ``True`` the loss is computed as
  197. :math:`\exp(\text{input}) - \text{target}*\text{input}`, if ``False`` the loss is
  198. :math:`\text{input} - \text{target}*\log(\text{input}+\text{eps})`.
  199. full (bool, optional): whether to compute full loss, i. e. to add the
  200. Stirling approximation term
  201. .. math::
  202. \text{target}*\log(\text{target}) - \text{target} + 0.5 * \log(2\pi\text{target}).
  203. size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
  204. the losses are averaged over each loss element in the batch. Note that for
  205. some losses, there are multiple elements per sample. If the field :attr:`size_average`
  206. is set to ``False``, the losses are instead summed for each minibatch. Ignored
  207. when :attr:`reduce` is ``False``. Default: ``True``
  208. eps (float, optional): Small value to avoid evaluation of :math:`\log(0)` when
  209. :attr:`log_input = False`. Default: 1e-8
  210. reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
  211. losses are averaged or summed over observations for each minibatch depending
  212. on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
  213. batch element instead and ignores :attr:`size_average`. Default: ``True``
  214. reduction (str, optional): Specifies the reduction to apply to the output:
  215. ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
  216. ``'mean'``: the sum of the output will be divided by the number of
  217. elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
  218. and :attr:`reduce` are in the process of being deprecated, and in the meantime,
  219. specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
  220. Examples::
  221. >>> loss = nn.PoissonNLLLoss()
  222. >>> log_input = torch.randn(5, 2, requires_grad=True)
  223. >>> target = torch.randn(5, 2)
  224. >>> output = loss(log_input, target)
  225. >>> output.backward()
  226. Shape:
  227. - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
  228. - Target: :math:`(*)`, same shape as the input.
  229. - Output: scalar by default. If :attr:`reduction` is ``'none'``, then :math:`(*)`,
  230. the same shape as the input.
  231. """
  232. __constants__ = ['log_input', 'full', 'eps', 'reduction']
  233. log_input: bool
  234. full: bool
  235. eps: float
  236. def __init__(self, log_input: bool = True, full: bool = False, size_average=None,
  237. eps: float = 1e-8, reduce=None, reduction: str = 'mean') -> None:
  238. super().__init__(size_average, reduce, reduction)
  239. self.log_input = log_input
  240. self.full = full
  241. self.eps = eps
  242. def forward(self, log_input: Tensor, target: Tensor) -> Tensor:
  243. return F.poisson_nll_loss(log_input, target, log_input=self.log_input, full=self.full,
  244. eps=self.eps, reduction=self.reduction)
  245. class GaussianNLLLoss(_Loss):
  246. r"""Gaussian negative log likelihood loss.
  247. The targets are treated as samples from Gaussian distributions with
  248. expectations and variances predicted by the neural network. For a
  249. ``target`` tensor modelled as having Gaussian distribution with a tensor
  250. of expectations ``input`` and a tensor of positive variances ``var`` the loss is:
  251. .. math::
  252. \text{loss} = \frac{1}{2}\left(\log\left(\text{max}\left(\text{var},
  253. \ \text{eps}\right)\right) + \frac{\left(\text{input} - \text{target}\right)^2}
  254. {\text{max}\left(\text{var}, \ \text{eps}\right)}\right) + \text{const.}
  255. where :attr:`eps` is used for stability. By default, the constant term of
  256. the loss function is omitted unless :attr:`full` is ``True``. If ``var`` is not the same
  257. size as ``input`` (due to a homoscedastic assumption), it must either have a final dimension
  258. of 1 or have one fewer dimension (with all other sizes being the same) for correct broadcasting.
  259. Args:
  260. full (bool, optional): include the constant term in the loss
  261. calculation. Default: ``False``.
  262. eps (float, optional): value used to clamp ``var`` (see note below), for
  263. stability. Default: 1e-6.
  264. reduction (str, optional): specifies the reduction to apply to the
  265. output:``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction
  266. will be applied, ``'mean'``: the output is the average of all batch
  267. member losses, ``'sum'``: the output is the sum of all batch member
  268. losses. Default: ``'mean'``.
  269. Shape:
  270. - Input: :math:`(N, *)` or :math:`(*)` where :math:`*` means any number of additional
  271. dimensions
  272. - Target: :math:`(N, *)` or :math:`(*)`, same shape as the input, or same shape as the input
  273. but with one dimension equal to 1 (to allow for broadcasting)
  274. - Var: :math:`(N, *)` or :math:`(*)`, same shape as the input, or same shape as the input but
  275. with one dimension equal to 1, or same shape as the input but with one fewer
  276. dimension (to allow for broadcasting)
  277. - Output: scalar if :attr:`reduction` is ``'mean'`` (default) or
  278. ``'sum'``. If :attr:`reduction` is ``'none'``, then :math:`(N, *)`, same
  279. shape as the input
  280. Examples::
  281. >>> loss = nn.GaussianNLLLoss()
  282. >>> input = torch.randn(5, 2, requires_grad=True)
  283. >>> target = torch.randn(5, 2)
  284. >>> var = torch.ones(5, 2, requires_grad=True) # heteroscedastic
  285. >>> output = loss(input, target, var)
  286. >>> output.backward()
  287. >>> loss = nn.GaussianNLLLoss()
  288. >>> input = torch.randn(5, 2, requires_grad=True)
  289. >>> target = torch.randn(5, 2)
  290. >>> var = torch.ones(5, 1, requires_grad=True) # homoscedastic
  291. >>> output = loss(input, target, var)
  292. >>> output.backward()
  293. Note:
  294. The clamping of ``var`` is ignored with respect to autograd, and so the
  295. gradients are unaffected by it.
  296. Reference:
  297. Nix, D. A. and Weigend, A. S., "Estimating the mean and variance of the
  298. target probability distribution", Proceedings of 1994 IEEE International
  299. Conference on Neural Networks (ICNN'94), Orlando, FL, USA, 1994, pp. 55-60
  300. vol.1, doi: 10.1109/ICNN.1994.374138.
  301. """
  302. __constants__ = ['full', 'eps', 'reduction']
  303. full: bool
  304. eps: float
  305. def __init__(self, *, full: bool = False, eps: float = 1e-6, reduction: str = 'mean') -> None:
  306. super().__init__(None, None, reduction)
  307. self.full = full
  308. self.eps = eps
  309. def forward(self, input: Tensor, target: Tensor, var: Tensor) -> Tensor:
  310. return F.gaussian_nll_loss(input, target, var, full=self.full, eps=self.eps, reduction=self.reduction)
  311. class KLDivLoss(_Loss):
  312. r"""The Kullback-Leibler divergence loss.
  313. For tensors of the same shape :math:`y_{\text{pred}},\ y_{\text{true}}`,
  314. where :math:`y_{\text{pred}}` is the :attr:`input` and :math:`y_{\text{true}}` is the
  315. :attr:`target`, we define the **pointwise KL-divergence** as
  316. .. math::
  317. L(y_{\text{pred}},\ y_{\text{true}})
  318. = y_{\text{true}} \cdot \log \frac{y_{\text{true}}}{y_{\text{pred}}}
  319. = y_{\text{true}} \cdot (\log y_{\text{true}} - \log y_{\text{pred}})
  320. To avoid underflow issues when computing this quantity, this loss expects the argument
  321. :attr:`input` in the log-space. The argument :attr:`target` may also be provided in the
  322. log-space if :attr:`log_target`\ `= True`.
  323. To summarise, this function is roughly equivalent to computing
  324. .. code-block:: python
  325. if not log_target: # default
  326. loss_pointwise = target * (target.log() - input)
  327. else:
  328. loss_pointwise = target.exp() * (target - input)
  329. and then reducing this result depending on the argument :attr:`reduction` as
  330. .. code-block:: python
  331. if reduction == "mean": # default
  332. loss = loss_pointwise.mean()
  333. elif reduction == "batchmean": # mathematically correct
  334. loss = loss_pointwise.sum() / input.size(0)
  335. elif reduction == "sum":
  336. loss = loss_pointwise.sum()
  337. else: # reduction == "none"
  338. loss = loss_pointwise
  339. .. note::
  340. As all the other losses in PyTorch, this function expects the first argument,
  341. :attr:`input`, to be the output of the model (e.g. the neural network)
  342. and the second, :attr:`target`, to be the observations in the dataset.
  343. This differs from the standard mathematical notation :math:`KL(P\ ||\ Q)` where
  344. :math:`P` denotes the distribution of the observations and :math:`Q` denotes the model.
  345. .. warning::
  346. :attr:`reduction`\ `= "mean"` doesn't return the true KL divergence value, please use
  347. :attr:`reduction`\ `= "batchmean"` which aligns with the mathematical definition.
  348. In a future release, `"mean"` will be changed to be the same as `"batchmean"`.
  349. Args:
  350. size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
  351. the losses are averaged over each loss element in the batch. Note that for
  352. some losses, there are multiple elements per sample. If the field :attr:`size_average`
  353. is set to `False`, the losses are instead summed for each minibatch. Ignored
  354. when :attr:`reduce` is `False`. Default: `True`
  355. reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
  356. losses are averaged or summed over observations for each minibatch depending
  357. on :attr:`size_average`. When :attr:`reduce` is `False`, returns a loss per
  358. batch element instead and ignores :attr:`size_average`. Default: `True`
  359. reduction (str, optional): Specifies the reduction to apply to the output. Default: `"mean"`
  360. log_target (bool, optional): Specifies whether `target` is the log space. Default: `False`
  361. Shape:
  362. - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
  363. - Target: :math:`(*)`, same shape as the input.
  364. - Output: scalar by default. If :attr:`reduction` is `'none'`, then :math:`(*)`,
  365. same shape as the input.
  366. Examples::
  367. >>> import torch.nn.functional as F
  368. >>> kl_loss = nn.KLDivLoss(reduction="batchmean")
  369. >>> # input should be a distribution in the log space
  370. >>> input = F.log_softmax(torch.randn(3, 5, requires_grad=True), dim=1)
  371. >>> # Sample a batch of distributions. Usually this would come from the dataset
  372. >>> target = F.softmax(torch.rand(3, 5), dim=1)
  373. >>> output = kl_loss(input, target)
  374. >>> kl_loss = nn.KLDivLoss(reduction="batchmean", log_target=True)
  375. >>> log_target = F.log_softmax(torch.rand(3, 5), dim=1)
  376. >>> output = kl_loss(input, log_target)
  377. """
  378. __constants__ = ['reduction']
  379. def __init__(self, size_average=None, reduce=None, reduction: str = 'mean', log_target: bool = False) -> None:
  380. super().__init__(size_average, reduce, reduction)
  381. self.log_target = log_target
  382. def forward(self, input: Tensor, target: Tensor) -> Tensor:
  383. return F.kl_div(input, target, reduction=self.reduction, log_target=self.log_target)
  384. class MSELoss(_Loss):
  385. r"""Creates a criterion that measures the mean squared error (squared L2 norm) between
  386. each element in the input :math:`x` and target :math:`y`.
  387. The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as:
  388. .. math::
  389. \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
  390. l_n = \left( x_n - y_n \right)^2,
  391. where :math:`N` is the batch size. If :attr:`reduction` is not ``'none'``
  392. (default ``'mean'``), then:
  393. .. math::
  394. \ell(x, y) =
  395. \begin{cases}
  396. \operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
  397. \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
  398. \end{cases}
  399. :math:`x` and :math:`y` are tensors of arbitrary shapes with a total
  400. of :math:`n` elements each.
  401. The mean operation still operates over all the elements, and divides by :math:`n`.
  402. The division by :math:`n` can be avoided if one sets ``reduction = 'sum'``.
  403. Args:
  404. size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
  405. the losses are averaged over each loss element in the batch. Note that for
  406. some losses, there are multiple elements per sample. If the field :attr:`size_average`
  407. is set to ``False``, the losses are instead summed for each minibatch. Ignored
  408. when :attr:`reduce` is ``False``. Default: ``True``
  409. reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
  410. losses are averaged or summed over observations for each minibatch depending
  411. on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
  412. batch element instead and ignores :attr:`size_average`. Default: ``True``
  413. reduction (str, optional): Specifies the reduction to apply to the output:
  414. ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
  415. ``'mean'``: the sum of the output will be divided by the number of
  416. elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
  417. and :attr:`reduce` are in the process of being deprecated, and in the meantime,
  418. specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
  419. Shape:
  420. - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
  421. - Target: :math:`(*)`, same shape as the input.
  422. Examples::
  423. >>> loss = nn.MSELoss()
  424. >>> input = torch.randn(3, 5, requires_grad=True)
  425. >>> target = torch.randn(3, 5)
  426. >>> output = loss(input, target)
  427. >>> output.backward()
  428. """
  429. __constants__ = ['reduction']
  430. def __init__(self, size_average=None, reduce=None, reduction: str = 'mean') -> None:
  431. super().__init__(size_average, reduce, reduction)
  432. def forward(self, input: Tensor, target: Tensor) -> Tensor:
  433. return F.mse_loss(input, target, reduction=self.reduction)
  434. class BCELoss(_WeightedLoss):
  435. r"""Creates a criterion that measures the Binary Cross Entropy between the target and
  436. the input probabilities:
  437. The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as:
  438. .. math::
  439. \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
  440. l_n = - w_n \left[ y_n \cdot \log x_n + (1 - y_n) \cdot \log (1 - x_n) \right],
  441. where :math:`N` is the batch size. If :attr:`reduction` is not ``'none'``
  442. (default ``'mean'``), then
  443. .. math::
  444. \ell(x, y) = \begin{cases}
  445. \operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
  446. \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
  447. \end{cases}
  448. This is used for measuring the error of a reconstruction in for example
  449. an auto-encoder. Note that the targets :math:`y` should be numbers
  450. between 0 and 1.
  451. Notice that if :math:`x_n` is either 0 or 1, one of the log terms would be
  452. mathematically undefined in the above loss equation. PyTorch chooses to set
  453. :math:`\log (0) = -\infty`, since :math:`\lim_{x\to 0} \log (x) = -\infty`.
  454. However, an infinite term in the loss equation is not desirable for several reasons.
  455. For one, if either :math:`y_n = 0` or :math:`(1 - y_n) = 0`, then we would be
  456. multiplying 0 with infinity. Secondly, if we have an infinite loss value, then
  457. we would also have an infinite term in our gradient, since
  458. :math:`\lim_{x\to 0} \frac{d}{dx} \log (x) = \infty`.
  459. This would make BCELoss's backward method nonlinear with respect to :math:`x_n`,
  460. and using it for things like linear regression would not be straight-forward.
  461. Our solution is that BCELoss clamps its log function outputs to be greater than
  462. or equal to -100. This way, we can always have a finite loss value and a linear
  463. backward method.
  464. Args:
  465. weight (Tensor, optional): a manual rescaling weight given to the loss
  466. of each batch element. If given, has to be a Tensor of size `nbatch`.
  467. size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
  468. the losses are averaged over each loss element in the batch. Note that for
  469. some losses, there are multiple elements per sample. If the field :attr:`size_average`
  470. is set to ``False``, the losses are instead summed for each minibatch. Ignored
  471. when :attr:`reduce` is ``False``. Default: ``True``
  472. reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
  473. losses are averaged or summed over observations for each minibatch depending
  474. on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
  475. batch element instead and ignores :attr:`size_average`. Default: ``True``
  476. reduction (str, optional): Specifies the reduction to apply to the output:
  477. ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
  478. ``'mean'``: the sum of the output will be divided by the number of
  479. elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
  480. and :attr:`reduce` are in the process of being deprecated, and in the meantime,
  481. specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
  482. Shape:
  483. - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
  484. - Target: :math:`(*)`, same shape as the input.
  485. - Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(*)`, same
  486. shape as input.
  487. Examples::
  488. >>> m = nn.Sigmoid()
  489. >>> loss = nn.BCELoss()
  490. >>> input = torch.randn(3, requires_grad=True)
  491. >>> target = torch.empty(3).random_(2)
  492. >>> output = loss(m(input), target)
  493. >>> output.backward()
  494. """
  495. __constants__ = ['reduction']
  496. def __init__(self, weight: Optional[Tensor] = None, size_average=None, reduce=None, reduction: str = 'mean') -> None:
  497. super().__init__(weight, size_average, reduce, reduction)
  498. def forward(self, input: Tensor, target: Tensor) -> Tensor:
  499. return F.binary_cross_entropy(input, target, weight=self.weight, reduction=self.reduction)
  500. class BCEWithLogitsLoss(_Loss):
  501. r"""This loss combines a `Sigmoid` layer and the `BCELoss` in one single
  502. class. This version is more numerically stable than using a plain `Sigmoid`
  503. followed by a `BCELoss` as, by combining the operations into one layer,
  504. we take advantage of the log-sum-exp trick for numerical stability.
  505. The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as:
  506. .. math::
  507. \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
  508. l_n = - w_n \left[ y_n \cdot \log \sigma(x_n)
  509. + (1 - y_n) \cdot \log (1 - \sigma(x_n)) \right],
  510. where :math:`N` is the batch size. If :attr:`reduction` is not ``'none'``
  511. (default ``'mean'``), then
  512. .. math::
  513. \ell(x, y) = \begin{cases}
  514. \operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
  515. \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
  516. \end{cases}
  517. This is used for measuring the error of a reconstruction in for example
  518. an auto-encoder. Note that the targets `t[i]` should be numbers
  519. between 0 and 1.
  520. It's possible to trade off recall and precision by adding weights to positive examples.
  521. In the case of multi-label classification the loss can be described as:
  522. .. math::
  523. \ell_c(x, y) = L_c = \{l_{1,c},\dots,l_{N,c}\}^\top, \quad
  524. l_{n,c} = - w_{n,c} \left[ p_c y_{n,c} \cdot \log \sigma(x_{n,c})
  525. + (1 - y_{n,c}) \cdot \log (1 - \sigma(x_{n,c})) \right],
  526. where :math:`c` is the class number (:math:`c > 1` for multi-label binary classification,
  527. :math:`c = 1` for single-label binary classification),
  528. :math:`n` is the number of the sample in the batch and
  529. :math:`p_c` is the weight of the positive answer for the class :math:`c`.
  530. :math:`p_c > 1` increases the recall, :math:`p_c < 1` increases the precision.
  531. For example, if a dataset contains 100 positive and 300 negative examples of a single class,
  532. then `pos_weight` for the class should be equal to :math:`\frac{300}{100}=3`.
  533. The loss would act as if the dataset contains :math:`3\times 100=300` positive examples.
  534. Examples::
  535. >>> target = torch.ones([10, 64], dtype=torch.float32) # 64 classes, batch size = 10
  536. >>> output = torch.full([10, 64], 1.5) # A prediction (logit)
  537. >>> pos_weight = torch.ones([64]) # All weights are equal to 1
  538. >>> criterion = torch.nn.BCEWithLogitsLoss(pos_weight=pos_weight)
  539. >>> criterion(output, target) # -log(sigmoid(1.5))
  540. tensor(0.20...)
  541. Args:
  542. weight (Tensor, optional): a manual rescaling weight given to the loss
  543. of each batch element. If given, has to be a Tensor of size `nbatch`.
  544. size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
  545. the losses are averaged over each loss element in the batch. Note that for
  546. some losses, there are multiple elements per sample. If the field :attr:`size_average`
  547. is set to ``False``, the losses are instead summed for each minibatch. Ignored
  548. when :attr:`reduce` is ``False``. Default: ``True``
  549. reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
  550. losses are averaged or summed over observations for each minibatch depending
  551. on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
  552. batch element instead and ignores :attr:`size_average`. Default: ``True``
  553. reduction (str, optional): Specifies the reduction to apply to the output:
  554. ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
  555. ``'mean'``: the sum of the output will be divided by the number of
  556. elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
  557. and :attr:`reduce` are in the process of being deprecated, and in the meantime,
  558. specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
  559. pos_weight (Tensor, optional): a weight of positive examples.
  560. Must be a vector with length equal to the number of classes.
  561. Shape:
  562. - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
  563. - Target: :math:`(*)`, same shape as the input.
  564. - Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(*)`, same
  565. shape as input.
  566. Examples::
  567. >>> loss = nn.BCEWithLogitsLoss()
  568. >>> input = torch.randn(3, requires_grad=True)
  569. >>> target = torch.empty(3).random_(2)
  570. >>> output = loss(input, target)
  571. >>> output.backward()
  572. """
  573. def __init__(self, weight: Optional[Tensor] = None, size_average=None, reduce=None, reduction: str = 'mean',
  574. pos_weight: Optional[Tensor] = None) -> None:
  575. super().__init__(size_average, reduce, reduction)
  576. self.register_buffer('weight', weight)
  577. self.register_buffer('pos_weight', pos_weight)
  578. self.weight: Optional[Tensor]
  579. self.pos_weight: Optional[Tensor]
  580. def forward(self, input: Tensor, target: Tensor) -> Tensor:
  581. return F.binary_cross_entropy_with_logits(input, target,
  582. self.weight,
  583. pos_weight=self.pos_weight,
  584. reduction=self.reduction)
  585. class HingeEmbeddingLoss(_Loss):
  586. r"""Measures the loss given an input tensor :math:`x` and a labels tensor :math:`y`
  587. (containing 1 or -1).
  588. This is usually used for measuring whether two inputs are similar or
  589. dissimilar, e.g. using the L1 pairwise distance as :math:`x`, and is typically
  590. used for learning nonlinear embeddings or semi-supervised learning.
  591. The loss function for :math:`n`-th sample in the mini-batch is
  592. .. math::
  593. l_n = \begin{cases}
  594. x_n, & \text{if}\; y_n = 1,\\
  595. \max \{0, \Delta - x_n\}, & \text{if}\; y_n = -1,
  596. \end{cases}
  597. and the total loss functions is
  598. .. math::
  599. \ell(x, y) = \begin{cases}
  600. \operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
  601. \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
  602. \end{cases}
  603. where :math:`L = \{l_1,\dots,l_N\}^\top`.
  604. Args:
  605. margin (float, optional): Has a default value of `1`.
  606. size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
  607. the losses are averaged over each loss element in the batch. Note that for
  608. some losses, there are multiple elements per sample. If the field :attr:`size_average`
  609. is set to ``False``, the losses are instead summed for each minibatch. Ignored
  610. when :attr:`reduce` is ``False``. Default: ``True``
  611. reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
  612. losses are averaged or summed over observations for each minibatch depending
  613. on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
  614. batch element instead and ignores :attr:`size_average`. Default: ``True``
  615. reduction (str, optional): Specifies the reduction to apply to the output:
  616. ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
  617. ``'mean'``: the sum of the output will be divided by the number of
  618. elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
  619. and :attr:`reduce` are in the process of being deprecated, and in the meantime,
  620. specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
  621. Shape:
  622. - Input: :math:`(*)` where :math:`*` means, any number of dimensions. The sum operation
  623. operates over all the elements.
  624. - Target: :math:`(*)`, same shape as the input
  625. - Output: scalar. If :attr:`reduction` is ``'none'``, then same shape as the input
  626. """
  627. __constants__ = ['margin', 'reduction']
  628. margin: float
  629. def __init__(self, margin: float = 1.0, size_average=None, reduce=None, reduction: str = 'mean') -> None:
  630. super().__init__(size_average, reduce, reduction)
  631. self.margin = margin
  632. def forward(self, input: Tensor, target: Tensor) -> Tensor:
  633. return F.hinge_embedding_loss(input, target, margin=self.margin, reduction=self.reduction)
  634. class MultiLabelMarginLoss(_Loss):
  635. r"""Creates a criterion that optimizes a multi-class multi-classification
  636. hinge loss (margin-based loss) between input :math:`x` (a 2D mini-batch `Tensor`)
  637. and output :math:`y` (which is a 2D `Tensor` of target class indices).
  638. For each sample in the mini-batch:
  639. .. math::
  640. \text{loss}(x, y) = \sum_{ij}\frac{\max(0, 1 - (x[y[j]] - x[i]))}{\text{x.size}(0)}
  641. where :math:`x \in \left\{0, \; \cdots , \; \text{x.size}(0) - 1\right\}`, \
  642. :math:`y \in \left\{0, \; \cdots , \; \text{y.size}(0) - 1\right\}`, \
  643. :math:`0 \leq y[j] \leq \text{x.size}(0)-1`, \
  644. and :math:`i \neq y[j]` for all :math:`i` and :math:`j`.
  645. :math:`y` and :math:`x` must have the same size.
  646. The criterion only considers a contiguous block of non-negative targets that
  647. starts at the front.
  648. This allows for different samples to have variable amounts of target classes.
  649. Args:
  650. size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
  651. the losses are averaged over each loss element in the batch. Note that for
  652. some losses, there are multiple elements per sample. If the field :attr:`size_average`
  653. is set to ``False``, the losses are instead summed for each minibatch. Ignored
  654. when :attr:`reduce` is ``False``. Default: ``True``
  655. reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
  656. losses are averaged or summed over observations for each minibatch depending
  657. on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
  658. batch element instead and ignores :attr:`size_average`. Default: ``True``
  659. reduction (str, optional): Specifies the reduction to apply to the output:
  660. ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
  661. ``'mean'``: the sum of the output will be divided by the number of
  662. elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
  663. and :attr:`reduce` are in the process of being deprecated, and in the meantime,
  664. specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
  665. Shape:
  666. - Input: :math:`(C)` or :math:`(N, C)` where `N` is the batch size and `C`
  667. is the number of classes.
  668. - Target: :math:`(C)` or :math:`(N, C)`, label targets padded by -1 ensuring same shape as the input.
  669. - Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(N)`.
  670. Examples::
  671. >>> loss = nn.MultiLabelMarginLoss()
  672. >>> x = torch.FloatTensor([[0.1, 0.2, 0.4, 0.8]])
  673. >>> # for target y, only consider labels 3 and 0, not after label -1
  674. >>> y = torch.LongTensor([[3, 0, -1, 1]])
  675. >>> # 0.25 * ((1-(0.1-0.2)) + (1-(0.1-0.4)) + (1-(0.8-0.2)) + (1-(0.8-0.4)))
  676. >>> loss(x, y)
  677. tensor(0.85...)
  678. """
  679. __constants__ = ['reduction']
  680. def __init__(self, size_average=None, reduce=None, reduction: str = 'mean') -> None:
  681. super().__init__(size_average, reduce, reduction)
  682. def forward(self, input: Tensor, target: Tensor) -> Tensor:
  683. return F.multilabel_margin_loss(input, target, reduction=self.reduction)
  684. class SmoothL1Loss(_Loss):
  685. r"""Creates a criterion that uses a squared term if the absolute
  686. element-wise error falls below beta and an L1 term otherwise.
  687. It is less sensitive to outliers than :class:`torch.nn.MSELoss` and in some cases
  688. prevents exploding gradients (e.g. see the paper `Fast R-CNN`_ by Ross Girshick).
  689. For a batch of size :math:`N`, the unreduced loss can be described as:
  690. .. math::
  691. \ell(x, y) = L = \{l_1, ..., l_N\}^T
  692. with
  693. .. math::
  694. l_n = \begin{cases}
  695. 0.5 (x_n - y_n)^2 / beta, & \text{if } |x_n - y_n| < beta \\
  696. |x_n - y_n| - 0.5 * beta, & \text{otherwise }
  697. \end{cases}
  698. If `reduction` is not `none`, then:
  699. .. math::
  700. \ell(x, y) =
  701. \begin{cases}
  702. \operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
  703. \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
  704. \end{cases}
  705. .. note::
  706. Smooth L1 loss can be seen as exactly :class:`L1Loss`, but with the :math:`|x - y| < beta`
  707. portion replaced with a quadratic function such that its slope is 1 at :math:`|x - y| = beta`.
  708. The quadratic segment smooths the L1 loss near :math:`|x - y| = 0`.
  709. .. note::
  710. Smooth L1 loss is closely related to :class:`HuberLoss`, being
  711. equivalent to :math:`huber(x, y) / beta` (note that Smooth L1's beta hyper-parameter is
  712. also known as delta for Huber). This leads to the following differences:
  713. * As beta -> 0, Smooth L1 loss converges to :class:`L1Loss`, while :class:`HuberLoss`
  714. converges to a constant 0 loss. When beta is 0, Smooth L1 loss is equivalent to L1 loss.
  715. * As beta -> :math:`+\infty`, Smooth L1 loss converges to a constant 0 loss, while
  716. :class:`HuberLoss` converges to :class:`MSELoss`.
  717. * For Smooth L1 loss, as beta varies, the L1 segment of the loss has a constant slope of 1.
  718. For :class:`HuberLoss`, the slope of the L1 segment is beta.
  719. .. _`Fast R-CNN`: https://arxiv.org/abs/1504.08083
  720. Args:
  721. size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
  722. the losses are averaged over each loss element in the batch. Note that for
  723. some losses, there are multiple elements per sample. If the field :attr:`size_average`
  724. is set to ``False``, the losses are instead summed for each minibatch. Ignored
  725. when :attr:`reduce` is ``False``. Default: ``True``
  726. reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
  727. losses are averaged or summed over observations for each minibatch depending
  728. on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
  729. batch element instead and ignores :attr:`size_average`. Default: ``True``
  730. reduction (str, optional): Specifies the reduction to apply to the output:
  731. ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
  732. ``'mean'``: the sum of the output will be divided by the number of
  733. elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
  734. and :attr:`reduce` are in the process of being deprecated, and in the meantime,
  735. specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
  736. beta (float, optional): Specifies the threshold at which to change between L1 and L2 loss.
  737. The value must be non-negative. Default: 1.0
  738. Shape:
  739. - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
  740. - Target: :math:`(*)`, same shape as the input.
  741. - Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(*)`, same shape as the input.
  742. """
  743. __constants__ = ['reduction']
  744. def __init__(self, size_average=None, reduce=None, reduction: str = 'mean', beta: float = 1.0) -> None:
  745. super().__init__(size_average, reduce, reduction)
  746. self.beta = beta
  747. def forward(self, input: Tensor, target: Tensor) -> Tensor:
  748. return F.smooth_l1_loss(input, target, reduction=self.reduction, beta=self.beta)
  749. class HuberLoss(_Loss):
  750. r"""Creates a criterion that uses a squared term if the absolute
  751. element-wise error falls below delta and a delta-scaled L1 term otherwise.
  752. This loss combines advantages of both :class:`L1Loss` and :class:`MSELoss`; the
  753. delta-scaled L1 region makes the loss less sensitive to outliers than :class:`MSELoss`,
  754. while the L2 region provides smoothness over :class:`L1Loss` near 0. See
  755. `Huber loss <https://en.wikipedia.org/wiki/Huber_loss>`_ for more information.
  756. For a batch of size :math:`N`, the unreduced loss can be described as:
  757. .. math::
  758. \ell(x, y) = L = \{l_1, ..., l_N\}^T
  759. with
  760. .. math::
  761. l_n = \begin{cases}
  762. 0.5 (x_n - y_n)^2, & \text{if } |x_n - y_n| < delta \\
  763. delta * (|x_n - y_n| - 0.5 * delta), & \text{otherwise }
  764. \end{cases}
  765. If `reduction` is not `none`, then:
  766. .. math::
  767. \ell(x, y) =
  768. \begin{cases}
  769. \operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
  770. \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
  771. \end{cases}
  772. .. note::
  773. When delta is set to 1, this loss is equivalent to :class:`SmoothL1Loss`.
  774. In general, this loss differs from :class:`SmoothL1Loss` by a factor of delta (AKA beta
  775. in Smooth L1).
  776. See :class:`SmoothL1Loss` for additional discussion on the differences in behavior
  777. between the two losses.
  778. Args:
  779. reduction (str, optional): Specifies the reduction to apply to the output:
  780. ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
  781. ``'mean'``: the sum of the output will be divided by the number of
  782. elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'``
  783. delta (float, optional): Specifies the threshold at which to change between delta-scaled L1 and L2 loss.
  784. The value must be positive. Default: 1.0
  785. Shape:
  786. - Input: :math:`(*)` where :math:`*` means any number of dimensions.
  787. - Target: :math:`(*)`, same shape as the input.
  788. - Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(*)`, same shape as the input.
  789. """
  790. __constants__ = ['reduction', 'delta']
  791. def __init__(self, reduction: str = 'mean', delta: float = 1.0) -> None:
  792. super().__init__(reduction=reduction)
  793. self.delta = delta
  794. def forward(self, input: Tensor, target: Tensor) -> Tensor:
  795. return F.huber_loss(input, target, reduction=self.reduction, delta=self.delta)
  796. class SoftMarginLoss(_Loss):
  797. r"""Creates a criterion that optimizes a two-class classification
  798. logistic loss between input tensor :math:`x` and target tensor :math:`y`
  799. (containing 1 or -1).
  800. .. math::
  801. \text{loss}(x, y) = \sum_i \frac{\log(1 + \exp(-y[i]*x[i]))}{\text{x.nelement}()}
  802. Args:
  803. size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
  804. the losses are averaged over each loss element in the batch. Note that for
  805. some losses, there are multiple elements per sample. If the field :attr:`size_average`
  806. is set to ``False``, the losses are instead summed for each minibatch. Ignored
  807. when :attr:`reduce` is ``False``. Default: ``True``
  808. reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
  809. losses are averaged or summed over observations for each minibatch depending
  810. on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
  811. batch element instead and ignores :attr:`size_average`. Default: ``True``
  812. reduction (str, optional): Specifies the reduction to apply to the output:
  813. ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
  814. ``'mean'``: the sum of the output will be divided by the number of
  815. elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
  816. and :attr:`reduce` are in the process of being deprecated, and in the meantime,
  817. specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
  818. Shape:
  819. - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
  820. - Target: :math:`(*)`, same shape as the input.
  821. - Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(*)`, same
  822. shape as input.
  823. """
  824. __constants__ = ['reduction']
  825. def __init__(self, size_average=None, reduce=None, reduction: str = 'mean') -> None:
  826. super().__init__(size_average, reduce, reduction)
  827. def forward(self, input: Tensor, target: Tensor) -> Tensor:
  828. return F.soft_margin_loss(input, target, reduction=self.reduction)
  829. class CrossEntropyLoss(_WeightedLoss):
  830. r"""This criterion computes the cross entropy loss between input logits
  831. and target.
  832. It is useful when training a classification problem with `C` classes.
  833. If provided, the optional argument :attr:`weight` should be a 1D `Tensor`
  834. assigning weight to each of the classes.
  835. This is particularly useful when you have an unbalanced training set.
  836. The `input` is expected to contain the unnormalized logits for each class (which do `not` need
  837. to be positive or sum to 1, in general).
  838. `input` has to be a Tensor of size :math:`(C)` for unbatched input,
  839. :math:`(minibatch, C)` or :math:`(minibatch, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1` for the
  840. `K`-dimensional case. The last being useful for higher dimension inputs, such
  841. as computing cross entropy loss per-pixel for 2D images.
  842. The `target` that this criterion expects should contain either:
  843. - Class indices in the range :math:`[0, C)` where :math:`C` is the number of classes; if
  844. `ignore_index` is specified, this loss also accepts this class index (this index
  845. may not necessarily be in the class range). The unreduced (i.e. with :attr:`reduction`
  846. set to ``'none'``) loss for this case can be described as:
  847. .. math::
  848. \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
  849. l_n = - w_{y_n} \log \frac{\exp(x_{n,y_n})}{\sum_{c=1}^C \exp(x_{n,c})}
  850. \cdot \mathbb{1}\{y_n \not= \text{ignore\_index}\}
  851. where :math:`x` is the input, :math:`y` is the target, :math:`w` is the weight,
  852. :math:`C` is the number of classes, and :math:`N` spans the minibatch dimension as well as
  853. :math:`d_1, ..., d_k` for the `K`-dimensional case. If
  854. :attr:`reduction` is not ``'none'`` (default ``'mean'``), then
  855. .. math::
  856. \ell(x, y) = \begin{cases}
  857. \sum_{n=1}^N \frac{1}{\sum_{n=1}^N w_{y_n} \cdot \mathbb{1}\{y_n \not= \text{ignore\_index}\}} l_n, &
  858. \text{if reduction} = \text{`mean';}\\
  859. \sum_{n=1}^N l_n, &
  860. \text{if reduction} = \text{`sum'.}
  861. \end{cases}
  862. Note that this case is equivalent to the combination of :class:`~torch.nn.LogSoftmax` and
  863. :class:`~torch.nn.NLLLoss`.
  864. - Probabilities for each class; useful when labels beyond a single class per minibatch item
  865. are required, such as for blended labels, label smoothing, etc. The unreduced (i.e. with
  866. :attr:`reduction` set to ``'none'``) loss for this case can be described as:
  867. .. math::
  868. \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
  869. l_n = - \sum_{c=1}^C w_c \log \frac{\exp(x_{n,c})}{\sum_{i=1}^C \exp(x_{n,i})} y_{n,c}
  870. where :math:`x` is the input, :math:`y` is the target, :math:`w` is the weight,
  871. :math:`C` is the number of classes, and :math:`N` spans the minibatch dimension as well as
  872. :math:`d_1, ..., d_k` for the `K`-dimensional case. If
  873. :attr:`reduction` is not ``'none'`` (default ``'mean'``), then
  874. .. math::
  875. \ell(x, y) = \begin{cases}
  876. \frac{\sum_{n=1}^N l_n}{N}, &
  877. \text{if reduction} = \text{`mean';}\\
  878. \sum_{n=1}^N l_n, &
  879. \text{if reduction} = \text{`sum'.}
  880. \end{cases}
  881. .. note::
  882. The performance of this criterion is generally better when `target` contains class
  883. indices, as this allows for optimized computation. Consider providing `target` as
  884. class probabilities only when a single class label per minibatch item is too restrictive.
  885. Args:
  886. weight (Tensor, optional): a manual rescaling weight given to each class.
  887. If given, has to be a Tensor of size `C`
  888. size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
  889. the losses are averaged over each loss element in the batch. Note that for
  890. some losses, there are multiple elements per sample. If the field :attr:`size_average`
  891. is set to ``False``, the losses are instead summed for each minibatch. Ignored
  892. when :attr:`reduce` is ``False``. Default: ``True``
  893. ignore_index (int, optional): Specifies a target value that is ignored
  894. and does not contribute to the input gradient. When :attr:`size_average` is
  895. ``True``, the loss is averaged over non-ignored targets. Note that
  896. :attr:`ignore_index` is only applicable when the target contains class indices.
  897. reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
  898. losses are averaged or summed over observations for each minibatch depending
  899. on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
  900. batch element instead and ignores :attr:`size_average`. Default: ``True``
  901. reduction (str, optional): Specifies the reduction to apply to the output:
  902. ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will
  903. be applied, ``'mean'``: the weighted mean of the output is taken,
  904. ``'sum'``: the output will be summed. Note: :attr:`size_average`
  905. and :attr:`reduce` are in the process of being deprecated, and in
  906. the meantime, specifying either of those two args will override
  907. :attr:`reduction`. Default: ``'mean'``
  908. label_smoothing (float, optional): A float in [0.0, 1.0]. Specifies the amount
  909. of smoothing when computing the loss, where 0.0 means no smoothing. The targets
  910. become a mixture of the original ground truth and a uniform distribution as described in
  911. `Rethinking the Inception Architecture for Computer Vision <https://arxiv.org/abs/1512.00567>`__. Default: :math:`0.0`.
  912. Shape:
  913. - Input: Shape :math:`(C)`, :math:`(N, C)` or :math:`(N, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1`
  914. in the case of `K`-dimensional loss.
  915. - Target: If containing class indices, shape :math:`()`, :math:`(N)` or :math:`(N, d_1, d_2, ..., d_K)` with
  916. :math:`K \geq 1` in the case of K-dimensional loss where each value should be between :math:`[0, C)`.
  917. If containing class probabilities, same shape as the input and each value should be between :math:`[0, 1]`.
  918. - Output: If reduction is 'none', shape :math:`()`, :math:`(N)` or :math:`(N, d_1, d_2, ..., d_K)` with :math:`K \geq 1`
  919. in the case of K-dimensional loss, depending on the shape of the input. Otherwise, scalar.
  920. where:
  921. .. math::
  922. \begin{aligned}
  923. C ={} & \text{number of classes} \\
  924. N ={} & \text{batch size} \\
  925. \end{aligned}
  926. Examples::
  927. >>> # Example of target with class indices
  928. >>> loss = nn.CrossEntropyLoss()
  929. >>> input = torch.randn(3, 5, requires_grad=True)
  930. >>> target = torch.empty(3, dtype=torch.long).random_(5)
  931. >>> output = loss(input, target)
  932. >>> output.backward()
  933. >>>
  934. >>> # Example of target with class probabilities
  935. >>> input = torch.randn(3, 5, requires_grad=True)
  936. >>> target = torch.randn(3, 5).softmax(dim=1)
  937. >>> output = loss(input, target)
  938. >>> output.backward()
  939. """
  940. __constants__ = ['ignore_index', 'reduction', 'label_smoothing']
  941. ignore_index: int
  942. label_smoothing: float
  943. def __init__(self, weight: Optional[Tensor] = None, size_average=None, ignore_index: int = -100,
  944. reduce=None, reduction: str = 'mean', label_smoothing: float = 0.0) -> None:
  945. super().__init__(weight, size_average, reduce, reduction)
  946. self.ignore_index = ignore_index
  947. self.label_smoothing = label_smoothing
  948. def forward(self, input: Tensor, target: Tensor) -> Tensor:
  949. return F.cross_entropy(input, target, weight=self.weight,
  950. ignore_index=self.ignore_index, reduction=self.reduction,
  951. label_smoothing=self.label_smoothing)
  952. class MultiLabelSoftMarginLoss(_WeightedLoss):
  953. r"""Creates a criterion that optimizes a multi-label one-versus-all
  954. loss based on max-entropy, between input :math:`x` and target :math:`y` of size
  955. :math:`(N, C)`.
  956. For each sample in the minibatch:
  957. .. math::
  958. loss(x, y) = - \frac{1}{C} * \sum_i y[i] * \log((1 + \exp(-x[i]))^{-1})
  959. + (1-y[i]) * \log\left(\frac{\exp(-x[i])}{(1 + \exp(-x[i]))}\right)
  960. where :math:`i \in \left\{0, \; \cdots , \; \text{x.nElement}() - 1\right\}`,
  961. :math:`y[i] \in \left\{0, \; 1\right\}`.
  962. Args:
  963. weight (Tensor, optional): a manual rescaling weight given to each
  964. class. If given, it has to be a Tensor of size `C`. Otherwise, it is
  965. treated as if having all ones.
  966. size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
  967. the losses are averaged over each loss element in the batch. Note that for
  968. some losses, there are multiple elements per sample. If the field :attr:`size_average`
  969. is set to ``False``, the losses are instead summed for each minibatch. Ignored
  970. when :attr:`reduce` is ``False``. Default: ``True``
  971. reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
  972. losses are averaged or summed over observations for each minibatch depending
  973. on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
  974. batch element instead and ignores :attr:`size_average`. Default: ``True``
  975. reduction (str, optional): Specifies the reduction to apply to the output:
  976. ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
  977. ``'mean'``: the sum of the output will be divided by the number of
  978. elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
  979. and :attr:`reduce` are in the process of being deprecated, and in the meantime,
  980. specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
  981. Shape:
  982. - Input: :math:`(N, C)` where `N` is the batch size and `C` is the number of classes.
  983. - Target: :math:`(N, C)`, label targets padded by -1 ensuring same shape as the input.
  984. - Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(N)`.
  985. """
  986. __constants__ = ['reduction']
  987. def __init__(self, weight: Optional[Tensor] = None, size_average=None, reduce=None, reduction: str = 'mean') -> None:
  988. super().__init__(weight, size_average, reduce, reduction)
  989. def forward(self, input: Tensor, target: Tensor) -> Tensor:
  990. return F.multilabel_soft_margin_loss(input, target, weight=self.weight, reduction=self.reduction)
  991. class CosineEmbeddingLoss(_Loss):
  992. r"""Creates a criterion that measures the loss given input tensors
  993. :math:`x_1`, :math:`x_2` and a `Tensor` label :math:`y` with values 1 or -1.
  994. This is used for measuring whether two inputs are similar or dissimilar,
  995. using the cosine similarity, and is typically used for learning nonlinear
  996. embeddings or semi-supervised learning.
  997. The loss function for each sample is:
  998. .. math::
  999. \text{loss}(x, y) =
  1000. \begin{cases}
  1001. 1 - \cos(x_1, x_2), & \text{if } y = 1 \\
  1002. \max(0, \cos(x_1, x_2) - \text{margin}), & \text{if } y = -1
  1003. \end{cases}
  1004. Args:
  1005. margin (float, optional): Should be a number from :math:`-1` to :math:`1`,
  1006. :math:`0` to :math:`0.5` is suggested. If :attr:`margin` is missing, the
  1007. default value is :math:`0`.
  1008. size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
  1009. the losses are averaged over each loss element in the batch. Note that for
  1010. some losses, there are multiple elements per sample. If the field :attr:`size_average`
  1011. is set to ``False``, the losses are instead summed for each minibatch. Ignored
  1012. when :attr:`reduce` is ``False``. Default: ``True``
  1013. reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
  1014. losses are averaged or summed over observations for each minibatch depending
  1015. on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
  1016. batch element instead and ignores :attr:`size_average`. Default: ``True``
  1017. reduction (str, optional): Specifies the reduction to apply to the output:
  1018. ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
  1019. ``'mean'``: the sum of the output will be divided by the number of
  1020. elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
  1021. and :attr:`reduce` are in the process of being deprecated, and in the meantime,
  1022. specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
  1023. Shape:
  1024. - Input1: :math:`(N, D)` or :math:`(D)`, where `N` is the batch size and `D` is the embedding dimension.
  1025. - Input2: :math:`(N, D)` or :math:`(D)`, same shape as Input1.
  1026. - Target: :math:`(N)` or :math:`()`.
  1027. - Output: If :attr:`reduction` is ``'none'``, then :math:`(N)`, otherwise scalar.
  1028. """
  1029. __constants__ = ['margin', 'reduction']
  1030. margin: float
  1031. def __init__(self, margin: float = 0., size_average=None, reduce=None, reduction: str = 'mean') -> None:
  1032. super().__init__(size_average, reduce, reduction)
  1033. self.margin = margin
  1034. def forward(self, input1: Tensor, input2: Tensor, target: Tensor) -> Tensor:
  1035. return F.cosine_embedding_loss(input1, input2, target, margin=self.margin, reduction=self.reduction)
  1036. class MarginRankingLoss(_Loss):
  1037. r"""Creates a criterion that measures the loss given
  1038. inputs :math:`x1`, :math:`x2`, two 1D mini-batch or 0D `Tensors`,
  1039. and a label 1D mini-batch or 0D `Tensor` :math:`y` (containing 1 or -1).
  1040. If :math:`y = 1` then it assumed the first input should be ranked higher
  1041. (have a larger value) than the second input, and vice-versa for :math:`y = -1`.
  1042. The loss function for each pair of samples in the mini-batch is:
  1043. .. math::
  1044. \text{loss}(x1, x2, y) = \max(0, -y * (x1 - x2) + \text{margin})
  1045. Args:
  1046. margin (float, optional): Has a default value of :math:`0`.
  1047. size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
  1048. the losses are averaged over each loss element in the batch. Note that for
  1049. some losses, there are multiple elements per sample. If the field :attr:`size_average`
  1050. is set to ``False``, the losses are instead summed for each minibatch. Ignored
  1051. when :attr:`reduce` is ``False``. Default: ``True``
  1052. reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
  1053. losses are averaged or summed over observations for each minibatch depending
  1054. on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
  1055. batch element instead and ignores :attr:`size_average`. Default: ``True``
  1056. reduction (str, optional): Specifies the reduction to apply to the output:
  1057. ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
  1058. ``'mean'``: the sum of the output will be divided by the number of
  1059. elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
  1060. and :attr:`reduce` are in the process of being deprecated, and in the meantime,
  1061. specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
  1062. Shape:
  1063. - Input1: :math:`(N)` or :math:`()` where `N` is the batch size.
  1064. - Input2: :math:`(N)` or :math:`()`, same shape as the Input1.
  1065. - Target: :math:`(N)` or :math:`()`, same shape as the inputs.
  1066. - Output: scalar. If :attr:`reduction` is ``'none'`` and Input size is not :math:`()`, then :math:`(N)`.
  1067. Examples::
  1068. >>> loss = nn.MarginRankingLoss()
  1069. >>> input1 = torch.randn(3, requires_grad=True)
  1070. >>> input2 = torch.randn(3, requires_grad=True)
  1071. >>> target = torch.randn(3).sign()
  1072. >>> output = loss(input1, input2, target)
  1073. >>> output.backward()
  1074. """
  1075. __constants__ = ['margin', 'reduction']
  1076. margin: float
  1077. def __init__(self, margin: float = 0., size_average=None, reduce=None, reduction: str = 'mean') -> None:
  1078. super().__init__(size_average, reduce, reduction)
  1079. self.margin = margin
  1080. def forward(self, input1: Tensor, input2: Tensor, target: Tensor) -> Tensor:
  1081. return F.margin_ranking_loss(input1, input2, target, margin=self.margin, reduction=self.reduction)
  1082. class MultiMarginLoss(_WeightedLoss):
  1083. r"""Creates a criterion that optimizes a multi-class classification hinge
  1084. loss (margin-based loss) between input :math:`x` (a 2D mini-batch `Tensor`) and
  1085. output :math:`y` (which is a 1D tensor of target class indices,
  1086. :math:`0 \leq y \leq \text{x.size}(1)-1`):
  1087. For each mini-batch sample, the loss in terms of the 1D input :math:`x` and scalar
  1088. output :math:`y` is:
  1089. .. math::
  1090. \text{loss}(x, y) = \frac{\sum_i \max(0, \text{margin} - x[y] + x[i])^p}{\text{x.size}(0)}
  1091. where :math:`i \in \left\{0, \; \cdots , \; \text{x.size}(0) - 1\right\}`
  1092. and :math:`i \neq y`.
  1093. Optionally, you can give non-equal weighting on the classes by passing
  1094. a 1D :attr:`weight` tensor into the constructor.
  1095. The loss function then becomes:
  1096. .. math::
  1097. \text{loss}(x, y) = \frac{\sum_i \max(0, w[y] * (\text{margin} - x[y] + x[i]))^p}{\text{x.size}(0)}
  1098. Args:
  1099. p (int, optional): Has a default value of :math:`1`. :math:`1` and :math:`2`
  1100. are the only supported values.
  1101. margin (float, optional): Has a default value of :math:`1`.
  1102. weight (Tensor, optional): a manual rescaling weight given to each
  1103. class. If given, it has to be a Tensor of size `C`. Otherwise, it is
  1104. treated as if having all ones.
  1105. size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
  1106. the losses are averaged over each loss element in the batch. Note that for
  1107. some losses, there are multiple elements per sample. If the field :attr:`size_average`
  1108. is set to ``False``, the losses are instead summed for each minibatch. Ignored
  1109. when :attr:`reduce` is ``False``. Default: ``True``
  1110. reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
  1111. losses are averaged or summed over observations for each minibatch depending
  1112. on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
  1113. batch element instead and ignores :attr:`size_average`. Default: ``True``
  1114. reduction (str, optional): Specifies the reduction to apply to the output:
  1115. ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
  1116. ``'mean'``: the sum of the output will be divided by the number of
  1117. elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
  1118. and :attr:`reduce` are in the process of being deprecated, and in the meantime,
  1119. specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
  1120. Shape:
  1121. - Input: :math:`(N, C)` or :math:`(C)`, where :math:`N` is the batch size and :math:`C` is the number of classes.
  1122. - Target: :math:`(N)` or :math:`()`, where each value is :math:`0 \leq \text{targets}[i] \leq C-1`.
  1123. - Output: scalar. If :attr:`reduction` is ``'none'``, then same shape as the target.
  1124. Examples::
  1125. >>> loss = nn.MultiMarginLoss()
  1126. >>> x = torch.tensor([[0.1, 0.2, 0.4, 0.8]])
  1127. >>> y = torch.tensor([3])
  1128. >>> # 0.25 * ((1-(0.8-0.1)) + (1-(0.8-0.2)) + (1-(0.8-0.4)))
  1129. >>> loss(x, y)
  1130. tensor(0.32...)
  1131. """
  1132. __constants__ = ['p', 'margin', 'reduction']
  1133. margin: float
  1134. p: int
  1135. def __init__(self, p: int = 1, margin: float = 1., weight: Optional[Tensor] = None, size_average=None,
  1136. reduce=None, reduction: str = 'mean') -> None:
  1137. super().__init__(weight, size_average, reduce, reduction)
  1138. if p != 1 and p != 2:
  1139. raise ValueError("only p == 1 and p == 2 supported")
  1140. assert weight is None or weight.dim() == 1
  1141. self.p = p
  1142. self.margin = margin
  1143. def forward(self, input: Tensor, target: Tensor) -> Tensor:
  1144. return F.multi_margin_loss(input, target, p=self.p, margin=self.margin,
  1145. weight=self.weight, reduction=self.reduction)
  1146. class TripletMarginLoss(_Loss):
  1147. r"""Creates a criterion that measures the triplet loss given an input
  1148. tensors :math:`x1`, :math:`x2`, :math:`x3` and a margin with a value greater than :math:`0`.
  1149. This is used for measuring a relative similarity between samples. A triplet
  1150. is composed by `a`, `p` and `n` (i.e., `anchor`, `positive examples` and `negative
  1151. examples` respectively). The shapes of all input tensors should be
  1152. :math:`(N, D)`.
  1153. The distance swap is described in detail in the paper `Learning shallow
  1154. convolutional feature descriptors with triplet losses`_ by
  1155. V. Balntas, E. Riba et al.
  1156. The loss function for each sample in the mini-batch is:
  1157. .. math::
  1158. L(a, p, n) = \max \{d(a_i, p_i) - d(a_i, n_i) + {\rm margin}, 0\}
  1159. where
  1160. .. math::
  1161. d(x_i, y_i) = \left\lVert {\bf x}_i - {\bf y}_i \right\rVert_p
  1162. See also :class:`~torch.nn.TripletMarginWithDistanceLoss`, which computes the
  1163. triplet margin loss for input tensors using a custom distance function.
  1164. Args:
  1165. margin (float, optional): Default: :math:`1`.
  1166. p (int, optional): The norm degree for pairwise distance. Default: :math:`2`.
  1167. swap (bool, optional): The distance swap is described in detail in the paper
  1168. `Learning shallow convolutional feature descriptors with triplet losses` by
  1169. V. Balntas, E. Riba et al. Default: ``False``.
  1170. size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
  1171. the losses are averaged over each loss element in the batch. Note that for
  1172. some losses, there are multiple elements per sample. If the field :attr:`size_average`
  1173. is set to ``False``, the losses are instead summed for each minibatch. Ignored
  1174. when :attr:`reduce` is ``False``. Default: ``True``
  1175. reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
  1176. losses are averaged or summed over observations for each minibatch depending
  1177. on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
  1178. batch element instead and ignores :attr:`size_average`. Default: ``True``
  1179. reduction (str, optional): Specifies the reduction to apply to the output:
  1180. ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
  1181. ``'mean'``: the sum of the output will be divided by the number of
  1182. elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
  1183. and :attr:`reduce` are in the process of being deprecated, and in the meantime,
  1184. specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
  1185. Shape:
  1186. - Input: :math:`(N, D)` or :math:`(D)` where :math:`D` is the vector dimension.
  1187. - Output: A Tensor of shape :math:`(N)` if :attr:`reduction` is ``'none'`` and
  1188. input shape is :math:`(N, D)`; a scalar otherwise.
  1189. Examples::
  1190. >>> triplet_loss = nn.TripletMarginLoss(margin=1.0, p=2)
  1191. >>> anchor = torch.randn(100, 128, requires_grad=True)
  1192. >>> positive = torch.randn(100, 128, requires_grad=True)
  1193. >>> negative = torch.randn(100, 128, requires_grad=True)
  1194. >>> output = triplet_loss(anchor, positive, negative)
  1195. >>> output.backward()
  1196. .. _Learning shallow convolutional feature descriptors with triplet losses:
  1197. http://www.bmva.org/bmvc/2016/papers/paper119/index.html
  1198. """
  1199. __constants__ = ['margin', 'p', 'eps', 'swap', 'reduction']
  1200. margin: float
  1201. p: float
  1202. eps: float
  1203. swap: bool
  1204. def __init__(self, margin: float = 1.0, p: float = 2., eps: float = 1e-6, swap: bool = False, size_average=None,
  1205. reduce=None, reduction: str = 'mean'):
  1206. super().__init__(size_average, reduce, reduction)
  1207. self.margin = margin
  1208. self.p = p
  1209. self.eps = eps
  1210. self.swap = swap
  1211. def forward(self, anchor: Tensor, positive: Tensor, negative: Tensor) -> Tensor:
  1212. return F.triplet_margin_loss(anchor, positive, negative, margin=self.margin, p=self.p,
  1213. eps=self.eps, swap=self.swap, reduction=self.reduction)
  1214. class TripletMarginWithDistanceLoss(_Loss):
  1215. r"""Creates a criterion that measures the triplet loss given input
  1216. tensors :math:`a`, :math:`p`, and :math:`n` (representing anchor,
  1217. positive, and negative examples, respectively), and a nonnegative,
  1218. real-valued function ("distance function") used to compute the relationship
  1219. between the anchor and positive example ("positive distance") and the
  1220. anchor and negative example ("negative distance").
  1221. The unreduced loss (i.e., with :attr:`reduction` set to ``'none'``)
  1222. can be described as:
  1223. .. math::
  1224. \ell(a, p, n) = L = \{l_1,\dots,l_N\}^\top, \quad
  1225. l_i = \max \{d(a_i, p_i) - d(a_i, n_i) + {\rm margin}, 0\}
  1226. where :math:`N` is the batch size; :math:`d` is a nonnegative, real-valued function
  1227. quantifying the closeness of two tensors, referred to as the :attr:`distance_function`;
  1228. and :math:`margin` is a nonnegative margin representing the minimum difference
  1229. between the positive and negative distances that is required for the loss to
  1230. be 0. The input tensors have :math:`N` elements each and can be of any shape
  1231. that the distance function can handle.
  1232. If :attr:`reduction` is not ``'none'``
  1233. (default ``'mean'``), then:
  1234. .. math::
  1235. \ell(x, y) =
  1236. \begin{cases}
  1237. \operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
  1238. \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
  1239. \end{cases}
  1240. See also :class:`~torch.nn.TripletMarginLoss`, which computes the triplet
  1241. loss for input tensors using the :math:`l_p` distance as the distance function.
  1242. Args:
  1243. distance_function (Callable, optional): A nonnegative, real-valued function that
  1244. quantifies the closeness of two tensors. If not specified,
  1245. `nn.PairwiseDistance` will be used. Default: ``None``
  1246. margin (float, optional): A nonnegative margin representing the minimum difference
  1247. between the positive and negative distances required for the loss to be 0. Larger
  1248. margins penalize cases where the negative examples are not distant enough from the
  1249. anchors, relative to the positives. Default: :math:`1`.
  1250. swap (bool, optional): Whether to use the distance swap described in the paper
  1251. `Learning shallow convolutional feature descriptors with triplet losses` by
  1252. V. Balntas, E. Riba et al. If True, and if the positive example is closer to the
  1253. negative example than the anchor is, swaps the positive example and the anchor in
  1254. the loss computation. Default: ``False``.
  1255. reduction (str, optional): Specifies the (optional) reduction to apply to the output:
  1256. ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
  1257. ``'mean'``: the sum of the output will be divided by the number of
  1258. elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'``
  1259. Shape:
  1260. - Input: :math:`(N, *)` where :math:`*` represents any number of additional dimensions
  1261. as supported by the distance function.
  1262. - Output: A Tensor of shape :math:`(N)` if :attr:`reduction` is ``'none'``, or a scalar
  1263. otherwise.
  1264. Examples::
  1265. >>> # Initialize embeddings
  1266. >>> embedding = nn.Embedding(1000, 128)
  1267. >>> anchor_ids = torch.randint(0, 1000, (1,))
  1268. >>> positive_ids = torch.randint(0, 1000, (1,))
  1269. >>> negative_ids = torch.randint(0, 1000, (1,))
  1270. >>> anchor = embedding(anchor_ids)
  1271. >>> positive = embedding(positive_ids)
  1272. >>> negative = embedding(negative_ids)
  1273. >>>
  1274. >>> # Built-in Distance Function
  1275. >>> triplet_loss = \
  1276. >>> nn.TripletMarginWithDistanceLoss(distance_function=nn.PairwiseDistance())
  1277. >>> output = triplet_loss(anchor, positive, negative)
  1278. >>> output.backward()
  1279. >>>
  1280. >>> # Custom Distance Function
  1281. >>> def l_infinity(x1, x2):
  1282. >>> return torch.max(torch.abs(x1 - x2), dim=1).values
  1283. >>>
  1284. >>> # xdoctest: +SKIP("FIXME: Would call backwards a second time")
  1285. >>> triplet_loss = (
  1286. >>> nn.TripletMarginWithDistanceLoss(distance_function=l_infinity, margin=1.5))
  1287. >>> output = triplet_loss(anchor, positive, negative)
  1288. >>> output.backward()
  1289. >>>
  1290. >>> # Custom Distance Function (Lambda)
  1291. >>> triplet_loss = (
  1292. >>> nn.TripletMarginWithDistanceLoss(
  1293. >>> distance_function=lambda x, y: 1.0 - F.cosine_similarity(x, y)))
  1294. >>> output = triplet_loss(anchor, positive, negative)
  1295. >>> output.backward()
  1296. Reference:
  1297. V. Balntas, et al.: Learning shallow convolutional feature descriptors with triplet losses:
  1298. http://www.bmva.org/bmvc/2016/papers/paper119/index.html
  1299. """
  1300. __constants__ = ['margin', 'swap', 'reduction']
  1301. margin: float
  1302. swap: bool
  1303. def __init__(self, *, distance_function: Optional[Callable[[Tensor, Tensor], Tensor]] = None,
  1304. margin: float = 1.0, swap: bool = False, reduction: str = 'mean'):
  1305. super().__init__(size_average=None, reduce=None, reduction=reduction)
  1306. self.distance_function: Optional[Callable[[Tensor, Tensor], Tensor]] = \
  1307. distance_function if distance_function is not None else PairwiseDistance()
  1308. self.margin = margin
  1309. self.swap = swap
  1310. def forward(self, anchor: Tensor, positive: Tensor, negative: Tensor) -> Tensor:
  1311. return F.triplet_margin_with_distance_loss(anchor, positive, negative,
  1312. distance_function=self.distance_function,
  1313. margin=self.margin, swap=self.swap, reduction=self.reduction)
  1314. class CTCLoss(_Loss):
  1315. r"""The Connectionist Temporal Classification loss.
  1316. Calculates loss between a continuous (unsegmented) time series and a target sequence. CTCLoss sums over the
  1317. probability of possible alignments of input to target, producing a loss value which is differentiable
  1318. with respect to each input node. The alignment of input to target is assumed to be "many-to-one", which
  1319. limits the length of the target sequence such that it must be :math:`\leq` the input length.
  1320. Args:
  1321. blank (int, optional): blank label. Default :math:`0`.
  1322. reduction (str, optional): Specifies the reduction to apply to the output:
  1323. ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
  1324. ``'mean'``: the output losses will be divided by the target lengths and
  1325. then the mean over the batch is taken. Default: ``'mean'``
  1326. zero_infinity (bool, optional):
  1327. Whether to zero infinite losses and the associated gradients.
  1328. Default: ``False``
  1329. Infinite losses mainly occur when the inputs are too short
  1330. to be aligned to the targets.
  1331. Shape:
  1332. - Log_probs: Tensor of size :math:`(T, N, C)` or :math:`(T, C)`,
  1333. where :math:`T = \text{input length}`,
  1334. :math:`N = \text{batch size}`, and
  1335. :math:`C = \text{number of classes (including blank)}`.
  1336. The logarithmized probabilities of the outputs (e.g. obtained with
  1337. :func:`torch.nn.functional.log_softmax`).
  1338. - Targets: Tensor of size :math:`(N, S)` or
  1339. :math:`(\operatorname{sum}(\text{target\_lengths}))`,
  1340. where :math:`N = \text{batch size}` and
  1341. :math:`S = \text{max target length, if shape is } (N, S)`.
  1342. It represent the target sequences. Each element in the target
  1343. sequence is a class index. And the target index cannot be blank (default=0).
  1344. In the :math:`(N, S)` form, targets are padded to the
  1345. length of the longest sequence, and stacked.
  1346. In the :math:`(\operatorname{sum}(\text{target\_lengths}))` form,
  1347. the targets are assumed to be un-padded and
  1348. concatenated within 1 dimension.
  1349. - Input_lengths: Tuple or tensor of size :math:`(N)` or :math:`()`,
  1350. where :math:`N = \text{batch size}`. It represent the lengths of the
  1351. inputs (must each be :math:`\leq T`). And the lengths are specified
  1352. for each sequence to achieve masking under the assumption that sequences
  1353. are padded to equal lengths.
  1354. - Target_lengths: Tuple or tensor of size :math:`(N)` or :math:`()`,
  1355. where :math:`N = \text{batch size}`. It represent lengths of the targets.
  1356. Lengths are specified for each sequence to achieve masking under the
  1357. assumption that sequences are padded to equal lengths. If target shape is
  1358. :math:`(N,S)`, target_lengths are effectively the stop index
  1359. :math:`s_n` for each target sequence, such that ``target_n = targets[n,0:s_n]`` for
  1360. each target in a batch. Lengths must each be :math:`\leq S`
  1361. If the targets are given as a 1d tensor that is the concatenation of individual
  1362. targets, the target_lengths must add up to the total length of the tensor.
  1363. - Output: scalar. If :attr:`reduction` is ``'none'``, then
  1364. :math:`(N)` if input is batched or :math:`()` if input is unbatched, where :math:`N = \text{batch size}`.
  1365. Examples::
  1366. >>> # Target are to be padded
  1367. >>> T = 50 # Input sequence length
  1368. >>> C = 20 # Number of classes (including blank)
  1369. >>> N = 16 # Batch size
  1370. >>> S = 30 # Target sequence length of longest target in batch (padding length)
  1371. >>> S_min = 10 # Minimum target length, for demonstration purposes
  1372. >>>
  1373. >>> # Initialize random batch of input vectors, for *size = (T,N,C)
  1374. >>> input = torch.randn(T, N, C).log_softmax(2).detach().requires_grad_()
  1375. >>>
  1376. >>> # Initialize random batch of targets (0 = blank, 1:C = classes)
  1377. >>> target = torch.randint(low=1, high=C, size=(N, S), dtype=torch.long)
  1378. >>>
  1379. >>> input_lengths = torch.full(size=(N,), fill_value=T, dtype=torch.long)
  1380. >>> target_lengths = torch.randint(low=S_min, high=S, size=(N,), dtype=torch.long)
  1381. >>> ctc_loss = nn.CTCLoss()
  1382. >>> loss = ctc_loss(input, target, input_lengths, target_lengths)
  1383. >>> loss.backward()
  1384. >>>
  1385. >>>
  1386. >>> # Target are to be un-padded
  1387. >>> T = 50 # Input sequence length
  1388. >>> C = 20 # Number of classes (including blank)
  1389. >>> N = 16 # Batch size
  1390. >>>
  1391. >>> # Initialize random batch of input vectors, for *size = (T,N,C)
  1392. >>> input = torch.randn(T, N, C).log_softmax(2).detach().requires_grad_()
  1393. >>> input_lengths = torch.full(size=(N,), fill_value=T, dtype=torch.long)
  1394. >>>
  1395. >>> # Initialize random batch of targets (0 = blank, 1:C = classes)
  1396. >>> target_lengths = torch.randint(low=1, high=T, size=(N,), dtype=torch.long)
  1397. >>> target = torch.randint(low=1, high=C, size=(sum(target_lengths),), dtype=torch.long)
  1398. >>> ctc_loss = nn.CTCLoss()
  1399. >>> loss = ctc_loss(input, target, input_lengths, target_lengths)
  1400. >>> loss.backward()
  1401. >>>
  1402. >>>
  1403. >>> # Target are to be un-padded and unbatched (effectively N=1)
  1404. >>> T = 50 # Input sequence length
  1405. >>> C = 20 # Number of classes (including blank)
  1406. >>>
  1407. >>> # Initialize random batch of input vectors, for *size = (T,C)
  1408. >>> # xdoctest: +SKIP("FIXME: error in doctest")
  1409. >>> input = torch.randn(T, C).log_softmax(2).detach().requires_grad_()
  1410. >>> input_lengths = torch.tensor(T, dtype=torch.long)
  1411. >>>
  1412. >>> # Initialize random batch of targets (0 = blank, 1:C = classes)
  1413. >>> target_lengths = torch.randint(low=1, high=T, size=(), dtype=torch.long)
  1414. >>> target = torch.randint(low=1, high=C, size=(target_lengths,), dtype=torch.long)
  1415. >>> ctc_loss = nn.CTCLoss()
  1416. >>> loss = ctc_loss(input, target, input_lengths, target_lengths)
  1417. >>> loss.backward()
  1418. Reference:
  1419. A. Graves et al.: Connectionist Temporal Classification:
  1420. Labelling Unsegmented Sequence Data with Recurrent Neural Networks:
  1421. https://www.cs.toronto.edu/~graves/icml_2006.pdf
  1422. Note:
  1423. In order to use CuDNN, the following must be satisfied: :attr:`targets` must be
  1424. in concatenated format, all :attr:`input_lengths` must be `T`. :math:`blank=0`,
  1425. :attr:`target_lengths` :math:`\leq 256`, the integer arguments must be of
  1426. dtype :attr:`torch.int32`.
  1427. The regular implementation uses the (more common in PyTorch) `torch.long` dtype.
  1428. Note:
  1429. In some circumstances when using the CUDA backend with CuDNN, this operator
  1430. may select a nondeterministic algorithm to increase performance. If this is
  1431. undesirable, you can try to make the operation deterministic (potentially at
  1432. a performance cost) by setting ``torch.backends.cudnn.deterministic =
  1433. True``.
  1434. Please see the notes on :doc:`/notes/randomness` for background.
  1435. """
  1436. __constants__ = ['blank', 'reduction']
  1437. blank: int
  1438. zero_infinity: bool
  1439. def __init__(self, blank: int = 0, reduction: str = 'mean', zero_infinity: bool = False):
  1440. super().__init__(reduction=reduction)
  1441. self.blank = blank
  1442. self.zero_infinity = zero_infinity
  1443. def forward(self, log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor) -> Tensor:
  1444. return F.ctc_loss(log_probs, targets, input_lengths, target_lengths, self.blank, self.reduction,
  1445. self.zero_infinity)
  1446. # TODO: L1HingeEmbeddingCriterion
  1447. # TODO: MSECriterion weight
  1448. # TODO: ClassSimplexCriterion