lkj_cholesky.py 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133
  1. """
  2. This closely follows the implementation in NumPyro (https://github.com/pyro-ppl/numpyro).
  3. Original copyright notice:
  4. # Copyright: Contributors to the Pyro project.
  5. # SPDX-License-Identifier: Apache-2.0
  6. """
  7. import math
  8. import torch
  9. from torch.distributions import constraints, Beta
  10. from torch.distributions.distribution import Distribution
  11. from torch.distributions.utils import broadcast_all
  12. __all__ = ['LKJCholesky']
  13. class LKJCholesky(Distribution):
  14. r"""
  15. LKJ distribution for lower Cholesky factor of correlation matrices.
  16. The distribution is controlled by ``concentration`` parameter :math:`\eta`
  17. to make the probability of the correlation matrix :math:`M` generated from
  18. a Cholesky factor proportional to :math:`\det(M)^{\eta - 1}`. Because of that,
  19. when ``concentration == 1``, we have a uniform distribution over Cholesky
  20. factors of correlation matrices::
  21. L ~ LKJCholesky(dim, concentration)
  22. X = L @ L' ~ LKJCorr(dim, concentration)
  23. Note that this distribution samples the
  24. Cholesky factor of correlation matrices and not the correlation matrices
  25. themselves and thereby differs slightly from the derivations in [1] for
  26. the `LKJCorr` distribution. For sampling, this uses the Onion method from
  27. [1] Section 3.
  28. Example::
  29. >>> # xdoctest: +IGNORE_WANT("non-deterinistic")
  30. >>> l = LKJCholesky(3, 0.5)
  31. >>> l.sample() # l @ l.T is a sample of a correlation 3x3 matrix
  32. tensor([[ 1.0000, 0.0000, 0.0000],
  33. [ 0.3516, 0.9361, 0.0000],
  34. [-0.1899, 0.4748, 0.8593]])
  35. Args:
  36. dimension (dim): dimension of the matrices
  37. concentration (float or Tensor): concentration/shape parameter of the
  38. distribution (often referred to as eta)
  39. **References**
  40. [1] `Generating random correlation matrices based on vines and extended onion method` (2009),
  41. Daniel Lewandowski, Dorota Kurowicka, Harry Joe.
  42. Journal of Multivariate Analysis. 100. 10.1016/j.jmva.2009.04.008
  43. """
  44. arg_constraints = {'concentration': constraints.positive}
  45. support = constraints.corr_cholesky
  46. def __init__(self, dim, concentration=1., validate_args=None):
  47. if dim < 2:
  48. raise ValueError(f'Expected dim to be an integer greater than or equal to 2. Found dim={dim}.')
  49. self.dim = dim
  50. self.concentration, = broadcast_all(concentration)
  51. batch_shape = self.concentration.size()
  52. event_shape = torch.Size((dim, dim))
  53. # This is used to draw vectorized samples from the beta distribution in Sec. 3.2 of [1].
  54. marginal_conc = self.concentration + 0.5 * (self.dim - 2)
  55. offset = torch.arange(self.dim - 1, dtype=self.concentration.dtype, device=self.concentration.device)
  56. offset = torch.cat([offset.new_zeros((1,)), offset])
  57. beta_conc1 = offset + 0.5
  58. beta_conc0 = marginal_conc.unsqueeze(-1) - 0.5 * offset
  59. self._beta = Beta(beta_conc1, beta_conc0)
  60. super().__init__(batch_shape, event_shape, validate_args)
  61. def expand(self, batch_shape, _instance=None):
  62. new = self._get_checked_instance(LKJCholesky, _instance)
  63. batch_shape = torch.Size(batch_shape)
  64. new.dim = self.dim
  65. new.concentration = self.concentration.expand(batch_shape)
  66. new._beta = self._beta.expand(batch_shape + (self.dim,))
  67. super(LKJCholesky, new).__init__(batch_shape, self.event_shape, validate_args=False)
  68. new._validate_args = self._validate_args
  69. return new
  70. def sample(self, sample_shape=torch.Size()):
  71. # This uses the Onion method, but there are a few differences from [1] Sec. 3.2:
  72. # - This vectorizes the for loop and also works for heterogeneous eta.
  73. # - Same algorithm generalizes to n=1.
  74. # - The procedure is simplified since we are sampling the cholesky factor of
  75. # the correlation matrix instead of the correlation matrix itself. As such,
  76. # we only need to generate `w`.
  77. y = self._beta.sample(sample_shape).unsqueeze(-1)
  78. u_normal = torch.randn(self._extended_shape(sample_shape),
  79. dtype=y.dtype,
  80. device=y.device).tril(-1)
  81. u_hypersphere = u_normal / u_normal.norm(dim=-1, keepdim=True)
  82. # Replace NaNs in first row
  83. u_hypersphere[..., 0, :].fill_(0.)
  84. w = torch.sqrt(y) * u_hypersphere
  85. # Fill diagonal elements; clamp for numerical stability
  86. eps = torch.finfo(w.dtype).tiny
  87. diag_elems = torch.clamp(1 - torch.sum(w**2, dim=-1), min=eps).sqrt()
  88. w += torch.diag_embed(diag_elems)
  89. return w
  90. def log_prob(self, value):
  91. # See: https://mc-stan.org/docs/2_25/functions-reference/cholesky-lkj-correlation-distribution.html
  92. # The probability of a correlation matrix is proportional to
  93. # determinant ** (concentration - 1) = prod(L_ii ^ 2(concentration - 1))
  94. # Additionally, the Jacobian of the transformation from Cholesky factor to
  95. # correlation matrix is:
  96. # prod(L_ii ^ (D - i))
  97. # So the probability of a Cholesky factor is propotional to
  98. # prod(L_ii ^ (2 * concentration - 2 + D - i)) = prod(L_ii ^ order_i)
  99. # with order_i = 2 * concentration - 2 + D - i
  100. if self._validate_args:
  101. self._validate_sample(value)
  102. diag_elems = value.diagonal(dim1=-1, dim2=-2)[..., 1:]
  103. order = torch.arange(2, self.dim + 1, device=self.concentration.device)
  104. order = 2 * (self.concentration - 1).unsqueeze(-1) + self.dim - order
  105. unnormalized_log_pdf = torch.sum(order * diag_elems.log(), dim=-1)
  106. # Compute normalization constant (page 1999 of [1])
  107. dm1 = self.dim - 1
  108. alpha = self.concentration + 0.5 * dm1
  109. denominator = torch.lgamma(alpha) * dm1
  110. numerator = torch.mvlgamma(alpha - 0.5, dm1)
  111. # pi_constant in [1] is D * (D - 1) / 4 * log(pi)
  112. # pi_constant in multigammaln is (D - 1) * (D - 2) / 4 * log(pi)
  113. # hence, we need to add a pi_constant = (D - 1) * log(pi) / 2
  114. pi_constant = 0.5 * dm1 * math.log(math.pi)
  115. normalize_term = pi_constant + numerator - denominator
  116. return unnormalized_log_pdf - normalize_term