dirichlet.py 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105
  1. import torch
  2. from torch.autograd import Function
  3. from torch.autograd.function import once_differentiable
  4. from torch.distributions import constraints
  5. from torch.distributions.exp_family import ExponentialFamily
  6. __all__ = ['Dirichlet']
  7. # This helper is exposed for testing.
  8. def _Dirichlet_backward(x, concentration, grad_output):
  9. total = concentration.sum(-1, True).expand_as(concentration)
  10. grad = torch._dirichlet_grad(x, concentration, total)
  11. return grad * (grad_output - (x * grad_output).sum(-1, True))
  12. class _Dirichlet(Function):
  13. @staticmethod
  14. def forward(ctx, concentration):
  15. x = torch._sample_dirichlet(concentration)
  16. ctx.save_for_backward(x, concentration)
  17. return x
  18. @staticmethod
  19. @once_differentiable
  20. def backward(ctx, grad_output):
  21. x, concentration = ctx.saved_tensors
  22. return _Dirichlet_backward(x, concentration, grad_output)
  23. class Dirichlet(ExponentialFamily):
  24. r"""
  25. Creates a Dirichlet distribution parameterized by concentration :attr:`concentration`.
  26. Example::
  27. >>> # xdoctest: +IGNORE_WANT("non-deterinistic")
  28. >>> m = Dirichlet(torch.tensor([0.5, 0.5]))
  29. >>> m.sample() # Dirichlet distributed with concentration [0.5, 0.5]
  30. tensor([ 0.1046, 0.8954])
  31. Args:
  32. concentration (Tensor): concentration parameter of the distribution
  33. (often referred to as alpha)
  34. """
  35. arg_constraints = {'concentration': constraints.independent(constraints.positive, 1)}
  36. support = constraints.simplex
  37. has_rsample = True
  38. def __init__(self, concentration, validate_args=None):
  39. if concentration.dim() < 1:
  40. raise ValueError("`concentration` parameter must be at least one-dimensional.")
  41. self.concentration = concentration
  42. batch_shape, event_shape = concentration.shape[:-1], concentration.shape[-1:]
  43. super().__init__(batch_shape, event_shape, validate_args=validate_args)
  44. def expand(self, batch_shape, _instance=None):
  45. new = self._get_checked_instance(Dirichlet, _instance)
  46. batch_shape = torch.Size(batch_shape)
  47. new.concentration = self.concentration.expand(batch_shape + self.event_shape)
  48. super(Dirichlet, new).__init__(batch_shape, self.event_shape, validate_args=False)
  49. new._validate_args = self._validate_args
  50. return new
  51. def rsample(self, sample_shape=()):
  52. shape = self._extended_shape(sample_shape)
  53. concentration = self.concentration.expand(shape)
  54. return _Dirichlet.apply(concentration)
  55. def log_prob(self, value):
  56. if self._validate_args:
  57. self._validate_sample(value)
  58. return ((torch.log(value) * (self.concentration - 1.0)).sum(-1) +
  59. torch.lgamma(self.concentration.sum(-1)) -
  60. torch.lgamma(self.concentration).sum(-1))
  61. @property
  62. def mean(self):
  63. return self.concentration / self.concentration.sum(-1, True)
  64. @property
  65. def mode(self):
  66. concentrationm1 = (self.concentration - 1).clamp(min=0.)
  67. mode = concentrationm1 / concentrationm1.sum(-1, True)
  68. mask = (self.concentration < 1).all(axis=-1)
  69. mode[mask] = torch.nn.functional.one_hot(mode[mask].argmax(axis=-1), concentrationm1.shape[-1]).to(mode)
  70. return mode
  71. @property
  72. def variance(self):
  73. con0 = self.concentration.sum(-1, True)
  74. return self.concentration * (con0 - self.concentration) / (con0.pow(2) * (con0 + 1))
  75. def entropy(self):
  76. k = self.concentration.size(-1)
  77. a0 = self.concentration.sum(-1)
  78. return (torch.lgamma(self.concentration).sum(-1) - torch.lgamma(a0) -
  79. (k - a0) * torch.digamma(a0) -
  80. ((self.concentration - 1.0) * torch.digamma(self.concentration)).sum(-1))
  81. @property
  82. def _natural_params(self):
  83. return (self.concentration, )
  84. def _log_normalizer(self, x):
  85. return x.lgamma().sum(-1) - torch.lgamma(x.sum(-1))