categorical.py 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141
  1. import torch
  2. from torch import nan
  3. from torch.distributions import constraints
  4. from torch.distributions.distribution import Distribution
  5. from torch.distributions.utils import probs_to_logits, logits_to_probs, lazy_property
  6. __all__ = ['Categorical']
  7. class Categorical(Distribution):
  8. r"""
  9. Creates a categorical distribution parameterized by either :attr:`probs` or
  10. :attr:`logits` (but not both).
  11. .. note::
  12. It is equivalent to the distribution that :func:`torch.multinomial`
  13. samples from.
  14. Samples are integers from :math:`\{0, \ldots, K-1\}` where `K` is ``probs.size(-1)``.
  15. If `probs` is 1-dimensional with length-`K`, each element is the relative probability
  16. of sampling the class at that index.
  17. If `probs` is N-dimensional, the first N-1 dimensions are treated as a batch of
  18. relative probability vectors.
  19. .. note:: The `probs` argument must be non-negative, finite and have a non-zero sum,
  20. and it will be normalized to sum to 1 along the last dimension. :attr:`probs`
  21. will return this normalized value.
  22. The `logits` argument will be interpreted as unnormalized log probabilities
  23. and can therefore be any real number. It will likewise be normalized so that
  24. the resulting probabilities sum to 1 along the last dimension. :attr:`logits`
  25. will return this normalized value.
  26. See also: :func:`torch.multinomial`
  27. Example::
  28. >>> # xdoctest: +IGNORE_WANT("non-deterinistic")
  29. >>> m = Categorical(torch.tensor([ 0.25, 0.25, 0.25, 0.25 ]))
  30. >>> m.sample() # equal probability of 0, 1, 2, 3
  31. tensor(3)
  32. Args:
  33. probs (Tensor): event probabilities
  34. logits (Tensor): event log probabilities (unnormalized)
  35. """
  36. arg_constraints = {'probs': constraints.simplex,
  37. 'logits': constraints.real_vector}
  38. has_enumerate_support = True
  39. def __init__(self, probs=None, logits=None, validate_args=None):
  40. if (probs is None) == (logits is None):
  41. raise ValueError("Either `probs` or `logits` must be specified, but not both.")
  42. if probs is not None:
  43. if probs.dim() < 1:
  44. raise ValueError("`probs` parameter must be at least one-dimensional.")
  45. self.probs = probs / probs.sum(-1, keepdim=True)
  46. else:
  47. if logits.dim() < 1:
  48. raise ValueError("`logits` parameter must be at least one-dimensional.")
  49. # Normalize
  50. self.logits = logits - logits.logsumexp(dim=-1, keepdim=True)
  51. self._param = self.probs if probs is not None else self.logits
  52. self._num_events = self._param.size()[-1]
  53. batch_shape = self._param.size()[:-1] if self._param.ndimension() > 1 else torch.Size()
  54. super().__init__(batch_shape, validate_args=validate_args)
  55. def expand(self, batch_shape, _instance=None):
  56. new = self._get_checked_instance(Categorical, _instance)
  57. batch_shape = torch.Size(batch_shape)
  58. param_shape = batch_shape + torch.Size((self._num_events,))
  59. if 'probs' in self.__dict__:
  60. new.probs = self.probs.expand(param_shape)
  61. new._param = new.probs
  62. if 'logits' in self.__dict__:
  63. new.logits = self.logits.expand(param_shape)
  64. new._param = new.logits
  65. new._num_events = self._num_events
  66. super(Categorical, new).__init__(batch_shape, validate_args=False)
  67. new._validate_args = self._validate_args
  68. return new
  69. def _new(self, *args, **kwargs):
  70. return self._param.new(*args, **kwargs)
  71. @constraints.dependent_property(is_discrete=True, event_dim=0)
  72. def support(self):
  73. return constraints.integer_interval(0, self._num_events - 1)
  74. @lazy_property
  75. def logits(self):
  76. return probs_to_logits(self.probs)
  77. @lazy_property
  78. def probs(self):
  79. return logits_to_probs(self.logits)
  80. @property
  81. def param_shape(self):
  82. return self._param.size()
  83. @property
  84. def mean(self):
  85. return torch.full(self._extended_shape(), nan, dtype=self.probs.dtype, device=self.probs.device)
  86. @property
  87. def mode(self):
  88. return self.probs.argmax(axis=-1)
  89. @property
  90. def variance(self):
  91. return torch.full(self._extended_shape(), nan, dtype=self.probs.dtype, device=self.probs.device)
  92. def sample(self, sample_shape=torch.Size()):
  93. if not isinstance(sample_shape, torch.Size):
  94. sample_shape = torch.Size(sample_shape)
  95. probs_2d = self.probs.reshape(-1, self._num_events)
  96. samples_2d = torch.multinomial(probs_2d, sample_shape.numel(), True).T
  97. return samples_2d.reshape(self._extended_shape(sample_shape))
  98. def log_prob(self, value):
  99. if self._validate_args:
  100. self._validate_sample(value)
  101. value = value.long().unsqueeze(-1)
  102. value, log_pmf = torch.broadcast_tensors(value, self.logits)
  103. value = value[..., :1]
  104. return log_pmf.gather(-1, value).squeeze(-1)
  105. def entropy(self):
  106. min_real = torch.finfo(self.logits.dtype).min
  107. logits = torch.clamp(self.logits, min=min_real)
  108. p_log_p = logits * self.probs
  109. return -p_log_p.sum(-1)
  110. def enumerate_support(self, expand=True):
  111. num_events = self._num_events
  112. values = torch.arange(num_events, dtype=torch.long, device=self._param.device)
  113. values = values.view((-1,) + (1,) * len(self._batch_shape))
  114. if expand:
  115. values = values.expand((-1,) + self._batch_shape)
  116. return values