log_normal.py 1.9 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061
  1. from torch.distributions import constraints
  2. from torch.distributions.transforms import ExpTransform
  3. from torch.distributions.normal import Normal
  4. from torch.distributions.transformed_distribution import TransformedDistribution
  5. __all__ = ['LogNormal']
  6. class LogNormal(TransformedDistribution):
  7. r"""
  8. Creates a log-normal distribution parameterized by
  9. :attr:`loc` and :attr:`scale` where::
  10. X ~ Normal(loc, scale)
  11. Y = exp(X) ~ LogNormal(loc, scale)
  12. Example::
  13. >>> # xdoctest: +IGNORE_WANT("non-deterinistic")
  14. >>> m = LogNormal(torch.tensor([0.0]), torch.tensor([1.0]))
  15. >>> m.sample() # log-normal distributed with mean=0 and stddev=1
  16. tensor([ 0.1046])
  17. Args:
  18. loc (float or Tensor): mean of log of distribution
  19. scale (float or Tensor): standard deviation of log of the distribution
  20. """
  21. arg_constraints = {'loc': constraints.real, 'scale': constraints.positive}
  22. support = constraints.positive
  23. has_rsample = True
  24. def __init__(self, loc, scale, validate_args=None):
  25. base_dist = Normal(loc, scale, validate_args=validate_args)
  26. super().__init__(base_dist, ExpTransform(), validate_args=validate_args)
  27. def expand(self, batch_shape, _instance=None):
  28. new = self._get_checked_instance(LogNormal, _instance)
  29. return super().expand(batch_shape, _instance=new)
  30. @property
  31. def loc(self):
  32. return self.base_dist.loc
  33. @property
  34. def scale(self):
  35. return self.base_dist.scale
  36. @property
  37. def mean(self):
  38. return (self.loc + self.scale.pow(2) / 2).exp()
  39. @property
  40. def mode(self):
  41. return (self.loc - self.scale.square()).exp()
  42. @property
  43. def variance(self):
  44. scale_sq = self.scale.pow(2)
  45. return scale_sq.expm1() * (2 * self.loc + scale_sq).exp()
  46. def entropy(self):
  47. return self.base_dist.entropy() + self.loc