kl.py 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827
  1. import math
  2. import warnings
  3. from functools import total_ordering
  4. from typing import Type, Dict, Callable, Tuple
  5. import torch
  6. from torch import inf
  7. from .bernoulli import Bernoulli
  8. from .beta import Beta
  9. from .binomial import Binomial
  10. from .categorical import Categorical
  11. from .cauchy import Cauchy
  12. from .continuous_bernoulli import ContinuousBernoulli
  13. from .dirichlet import Dirichlet
  14. from .distribution import Distribution
  15. from .exponential import Exponential
  16. from .exp_family import ExponentialFamily
  17. from .gamma import Gamma
  18. from .geometric import Geometric
  19. from .gumbel import Gumbel
  20. from .half_normal import HalfNormal
  21. from .independent import Independent
  22. from .laplace import Laplace
  23. from .lowrank_multivariate_normal import (LowRankMultivariateNormal, _batch_lowrank_logdet,
  24. _batch_lowrank_mahalanobis)
  25. from .multivariate_normal import (MultivariateNormal, _batch_mahalanobis)
  26. from .normal import Normal
  27. from .one_hot_categorical import OneHotCategorical
  28. from .pareto import Pareto
  29. from .poisson import Poisson
  30. from .transformed_distribution import TransformedDistribution
  31. from .uniform import Uniform
  32. from .utils import _sum_rightmost, euler_constant as _euler_gamma
  33. _KL_REGISTRY = {} # Source of truth mapping a few general (type, type) pairs to functions.
  34. _KL_MEMOIZE: Dict[Tuple[Type, Type], Callable] = {} # Memoized version mapping many specific (type, type) pairs to functions.
  35. __all__ = ["register_kl", "kl_divergence"]
  36. def register_kl(type_p, type_q):
  37. """
  38. Decorator to register a pairwise function with :meth:`kl_divergence`.
  39. Usage::
  40. @register_kl(Normal, Normal)
  41. def kl_normal_normal(p, q):
  42. # insert implementation here
  43. Lookup returns the most specific (type,type) match ordered by subclass. If
  44. the match is ambiguous, a `RuntimeWarning` is raised. For example to
  45. resolve the ambiguous situation::
  46. @register_kl(BaseP, DerivedQ)
  47. def kl_version1(p, q): ...
  48. @register_kl(DerivedP, BaseQ)
  49. def kl_version2(p, q): ...
  50. you should register a third most-specific implementation, e.g.::
  51. register_kl(DerivedP, DerivedQ)(kl_version1) # Break the tie.
  52. Args:
  53. type_p (type): A subclass of :class:`~torch.distributions.Distribution`.
  54. type_q (type): A subclass of :class:`~torch.distributions.Distribution`.
  55. """
  56. if not isinstance(type_p, type) and issubclass(type_p, Distribution):
  57. raise TypeError('Expected type_p to be a Distribution subclass but got {}'.format(type_p))
  58. if not isinstance(type_q, type) and issubclass(type_q, Distribution):
  59. raise TypeError('Expected type_q to be a Distribution subclass but got {}'.format(type_q))
  60. def decorator(fun):
  61. _KL_REGISTRY[type_p, type_q] = fun
  62. _KL_MEMOIZE.clear() # reset since lookup order may have changed
  63. return fun
  64. return decorator
  65. @total_ordering
  66. class _Match:
  67. __slots__ = ['types']
  68. def __init__(self, *types):
  69. self.types = types
  70. def __eq__(self, other):
  71. return self.types == other.types
  72. def __le__(self, other):
  73. for x, y in zip(self.types, other.types):
  74. if not issubclass(x, y):
  75. return False
  76. if x is not y:
  77. break
  78. return True
  79. def _dispatch_kl(type_p, type_q):
  80. """
  81. Find the most specific approximate match, assuming single inheritance.
  82. """
  83. matches = [(super_p, super_q) for super_p, super_q in _KL_REGISTRY
  84. if issubclass(type_p, super_p) and issubclass(type_q, super_q)]
  85. if not matches:
  86. return NotImplemented
  87. # Check that the left- and right- lexicographic orders agree.
  88. # mypy isn't smart enough to know that _Match implements __lt__
  89. # see: https://github.com/python/typing/issues/760#issuecomment-710670503
  90. left_p, left_q = min(_Match(*m) for m in matches).types # type: ignore[type-var]
  91. right_q, right_p = min(_Match(*reversed(m)) for m in matches).types # type: ignore[type-var]
  92. left_fun = _KL_REGISTRY[left_p, left_q]
  93. right_fun = _KL_REGISTRY[right_p, right_q]
  94. if left_fun is not right_fun:
  95. warnings.warn('Ambiguous kl_divergence({}, {}). Please register_kl({}, {})'.format(
  96. type_p.__name__, type_q.__name__, left_p.__name__, right_q.__name__),
  97. RuntimeWarning)
  98. return left_fun
  99. def _infinite_like(tensor):
  100. """
  101. Helper function for obtaining infinite KL Divergence throughout
  102. """
  103. return torch.full_like(tensor, inf)
  104. def _x_log_x(tensor):
  105. """
  106. Utility function for calculating x log x
  107. """
  108. return tensor * tensor.log()
  109. def _batch_trace_XXT(bmat):
  110. """
  111. Utility function for calculating the trace of XX^{T} with X having arbitrary trailing batch dimensions
  112. """
  113. n = bmat.size(-1)
  114. m = bmat.size(-2)
  115. flat_trace = bmat.reshape(-1, m * n).pow(2).sum(-1)
  116. return flat_trace.reshape(bmat.shape[:-2])
  117. def kl_divergence(p: Distribution, q: Distribution) -> torch.Tensor:
  118. r"""
  119. Compute Kullback-Leibler divergence :math:`KL(p \| q)` between two distributions.
  120. .. math::
  121. KL(p \| q) = \int p(x) \log\frac {p(x)} {q(x)} \,dx
  122. Args:
  123. p (Distribution): A :class:`~torch.distributions.Distribution` object.
  124. q (Distribution): A :class:`~torch.distributions.Distribution` object.
  125. Returns:
  126. Tensor: A batch of KL divergences of shape `batch_shape`.
  127. Raises:
  128. NotImplementedError: If the distribution types have not been registered via
  129. :meth:`register_kl`.
  130. """
  131. try:
  132. fun = _KL_MEMOIZE[type(p), type(q)]
  133. except KeyError:
  134. fun = _dispatch_kl(type(p), type(q))
  135. _KL_MEMOIZE[type(p), type(q)] = fun
  136. if fun is NotImplemented:
  137. raise NotImplementedError("No KL(p || q) is implemented for p type {} and q type {}"
  138. .format(p.__class__.__name__, q.__class__.__name__))
  139. return fun(p, q)
  140. ################################################################################
  141. # KL Divergence Implementations
  142. ################################################################################
  143. # Same distributions
  144. @register_kl(Bernoulli, Bernoulli)
  145. def _kl_bernoulli_bernoulli(p, q):
  146. t1 = p.probs * (torch.nn.functional.softplus(-q.logits) - torch.nn.functional.softplus(-p.logits))
  147. t1[q.probs == 0] = inf
  148. t1[p.probs == 0] = 0
  149. t2 = (1 - p.probs) * (torch.nn.functional.softplus(q.logits) - torch.nn.functional.softplus(p.logits))
  150. t2[q.probs == 1] = inf
  151. t2[p.probs == 1] = 0
  152. return t1 + t2
  153. @register_kl(Beta, Beta)
  154. def _kl_beta_beta(p, q):
  155. sum_params_p = p.concentration1 + p.concentration0
  156. sum_params_q = q.concentration1 + q.concentration0
  157. t1 = q.concentration1.lgamma() + q.concentration0.lgamma() + (sum_params_p).lgamma()
  158. t2 = p.concentration1.lgamma() + p.concentration0.lgamma() + (sum_params_q).lgamma()
  159. t3 = (p.concentration1 - q.concentration1) * torch.digamma(p.concentration1)
  160. t4 = (p.concentration0 - q.concentration0) * torch.digamma(p.concentration0)
  161. t5 = (sum_params_q - sum_params_p) * torch.digamma(sum_params_p)
  162. return t1 - t2 + t3 + t4 + t5
  163. @register_kl(Binomial, Binomial)
  164. def _kl_binomial_binomial(p, q):
  165. # from https://math.stackexchange.com/questions/2214993/
  166. # kullback-leibler-divergence-for-binomial-distributions-p-and-q
  167. if (p.total_count < q.total_count).any():
  168. raise NotImplementedError('KL between Binomials where q.total_count > p.total_count is not implemented')
  169. kl = p.total_count * (p.probs * (p.logits - q.logits) + (-p.probs).log1p() - (-q.probs).log1p())
  170. inf_idxs = p.total_count > q.total_count
  171. kl[inf_idxs] = _infinite_like(kl[inf_idxs])
  172. return kl
  173. @register_kl(Categorical, Categorical)
  174. def _kl_categorical_categorical(p, q):
  175. t = p.probs * (p.logits - q.logits)
  176. t[(q.probs == 0).expand_as(t)] = inf
  177. t[(p.probs == 0).expand_as(t)] = 0
  178. return t.sum(-1)
  179. @register_kl(ContinuousBernoulli, ContinuousBernoulli)
  180. def _kl_continuous_bernoulli_continuous_bernoulli(p, q):
  181. t1 = p.mean * (p.logits - q.logits)
  182. t2 = p._cont_bern_log_norm() + torch.log1p(-p.probs)
  183. t3 = - q._cont_bern_log_norm() - torch.log1p(-q.probs)
  184. return t1 + t2 + t3
  185. @register_kl(Dirichlet, Dirichlet)
  186. def _kl_dirichlet_dirichlet(p, q):
  187. # From http://bariskurt.com/kullback-leibler-divergence-between-two-dirichlet-and-beta-distributions/
  188. sum_p_concentration = p.concentration.sum(-1)
  189. sum_q_concentration = q.concentration.sum(-1)
  190. t1 = sum_p_concentration.lgamma() - sum_q_concentration.lgamma()
  191. t2 = (p.concentration.lgamma() - q.concentration.lgamma()).sum(-1)
  192. t3 = p.concentration - q.concentration
  193. t4 = p.concentration.digamma() - sum_p_concentration.digamma().unsqueeze(-1)
  194. return t1 - t2 + (t3 * t4).sum(-1)
  195. @register_kl(Exponential, Exponential)
  196. def _kl_exponential_exponential(p, q):
  197. rate_ratio = q.rate / p.rate
  198. t1 = -rate_ratio.log()
  199. return t1 + rate_ratio - 1
  200. @register_kl(ExponentialFamily, ExponentialFamily)
  201. def _kl_expfamily_expfamily(p, q):
  202. if not type(p) == type(q):
  203. raise NotImplementedError("The cross KL-divergence between different exponential families cannot \
  204. be computed using Bregman divergences")
  205. p_nparams = [np.detach().requires_grad_() for np in p._natural_params]
  206. q_nparams = q._natural_params
  207. lg_normal = p._log_normalizer(*p_nparams)
  208. gradients = torch.autograd.grad(lg_normal.sum(), p_nparams, create_graph=True)
  209. result = q._log_normalizer(*q_nparams) - lg_normal
  210. for pnp, qnp, g in zip(p_nparams, q_nparams, gradients):
  211. term = (qnp - pnp) * g
  212. result -= _sum_rightmost(term, len(q.event_shape))
  213. return result
  214. @register_kl(Gamma, Gamma)
  215. def _kl_gamma_gamma(p, q):
  216. t1 = q.concentration * (p.rate / q.rate).log()
  217. t2 = torch.lgamma(q.concentration) - torch.lgamma(p.concentration)
  218. t3 = (p.concentration - q.concentration) * torch.digamma(p.concentration)
  219. t4 = (q.rate - p.rate) * (p.concentration / p.rate)
  220. return t1 + t2 + t3 + t4
  221. @register_kl(Gumbel, Gumbel)
  222. def _kl_gumbel_gumbel(p, q):
  223. ct1 = p.scale / q.scale
  224. ct2 = q.loc / q.scale
  225. ct3 = p.loc / q.scale
  226. t1 = -ct1.log() - ct2 + ct3
  227. t2 = ct1 * _euler_gamma
  228. t3 = torch.exp(ct2 + (1 + ct1).lgamma() - ct3)
  229. return t1 + t2 + t3 - (1 + _euler_gamma)
  230. @register_kl(Geometric, Geometric)
  231. def _kl_geometric_geometric(p, q):
  232. return -p.entropy() - torch.log1p(-q.probs) / p.probs - q.logits
  233. @register_kl(HalfNormal, HalfNormal)
  234. def _kl_halfnormal_halfnormal(p, q):
  235. return _kl_normal_normal(p.base_dist, q.base_dist)
  236. @register_kl(Laplace, Laplace)
  237. def _kl_laplace_laplace(p, q):
  238. # From http://www.mast.queensu.ca/~communications/Papers/gil-msc11.pdf
  239. scale_ratio = p.scale / q.scale
  240. loc_abs_diff = (p.loc - q.loc).abs()
  241. t1 = -scale_ratio.log()
  242. t2 = loc_abs_diff / q.scale
  243. t3 = scale_ratio * torch.exp(-loc_abs_diff / p.scale)
  244. return t1 + t2 + t3 - 1
  245. @register_kl(LowRankMultivariateNormal, LowRankMultivariateNormal)
  246. def _kl_lowrankmultivariatenormal_lowrankmultivariatenormal(p, q):
  247. if p.event_shape != q.event_shape:
  248. raise ValueError("KL-divergence between two Low Rank Multivariate Normals with\
  249. different event shapes cannot be computed")
  250. term1 = (_batch_lowrank_logdet(q._unbroadcasted_cov_factor, q._unbroadcasted_cov_diag,
  251. q._capacitance_tril) -
  252. _batch_lowrank_logdet(p._unbroadcasted_cov_factor, p._unbroadcasted_cov_diag,
  253. p._capacitance_tril))
  254. term3 = _batch_lowrank_mahalanobis(q._unbroadcasted_cov_factor, q._unbroadcasted_cov_diag,
  255. q.loc - p.loc,
  256. q._capacitance_tril)
  257. # Expands term2 according to
  258. # inv(qcov) @ pcov = [inv(qD) - inv(qD) @ qW @ inv(qC) @ qW.T @ inv(qD)] @ (pW @ pW.T + pD)
  259. # = [inv(qD) - A.T @ A] @ (pD + pW @ pW.T)
  260. qWt_qDinv = (q._unbroadcasted_cov_factor.mT /
  261. q._unbroadcasted_cov_diag.unsqueeze(-2))
  262. A = torch.linalg.solve_triangular(q._capacitance_tril, qWt_qDinv, upper=False)
  263. term21 = (p._unbroadcasted_cov_diag / q._unbroadcasted_cov_diag).sum(-1)
  264. term22 = _batch_trace_XXT(p._unbroadcasted_cov_factor *
  265. q._unbroadcasted_cov_diag.rsqrt().unsqueeze(-1))
  266. term23 = _batch_trace_XXT(A * p._unbroadcasted_cov_diag.sqrt().unsqueeze(-2))
  267. term24 = _batch_trace_XXT(A.matmul(p._unbroadcasted_cov_factor))
  268. term2 = term21 + term22 - term23 - term24
  269. return 0.5 * (term1 + term2 + term3 - p.event_shape[0])
  270. @register_kl(MultivariateNormal, LowRankMultivariateNormal)
  271. def _kl_multivariatenormal_lowrankmultivariatenormal(p, q):
  272. if p.event_shape != q.event_shape:
  273. raise ValueError("KL-divergence between two (Low Rank) Multivariate Normals with\
  274. different event shapes cannot be computed")
  275. term1 = (_batch_lowrank_logdet(q._unbroadcasted_cov_factor, q._unbroadcasted_cov_diag,
  276. q._capacitance_tril) -
  277. 2 * p._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1))
  278. term3 = _batch_lowrank_mahalanobis(q._unbroadcasted_cov_factor, q._unbroadcasted_cov_diag,
  279. q.loc - p.loc,
  280. q._capacitance_tril)
  281. # Expands term2 according to
  282. # inv(qcov) @ pcov = [inv(qD) - inv(qD) @ qW @ inv(qC) @ qW.T @ inv(qD)] @ p_tril @ p_tril.T
  283. # = [inv(qD) - A.T @ A] @ p_tril @ p_tril.T
  284. qWt_qDinv = (q._unbroadcasted_cov_factor.mT /
  285. q._unbroadcasted_cov_diag.unsqueeze(-2))
  286. A = torch.linalg.solve_triangular(q._capacitance_tril, qWt_qDinv, upper=False)
  287. term21 = _batch_trace_XXT(p._unbroadcasted_scale_tril *
  288. q._unbroadcasted_cov_diag.rsqrt().unsqueeze(-1))
  289. term22 = _batch_trace_XXT(A.matmul(p._unbroadcasted_scale_tril))
  290. term2 = term21 - term22
  291. return 0.5 * (term1 + term2 + term3 - p.event_shape[0])
  292. @register_kl(LowRankMultivariateNormal, MultivariateNormal)
  293. def _kl_lowrankmultivariatenormal_multivariatenormal(p, q):
  294. if p.event_shape != q.event_shape:
  295. raise ValueError("KL-divergence between two (Low Rank) Multivariate Normals with\
  296. different event shapes cannot be computed")
  297. term1 = (2 * q._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1) -
  298. _batch_lowrank_logdet(p._unbroadcasted_cov_factor, p._unbroadcasted_cov_diag,
  299. p._capacitance_tril))
  300. term3 = _batch_mahalanobis(q._unbroadcasted_scale_tril, (q.loc - p.loc))
  301. # Expands term2 according to
  302. # inv(qcov) @ pcov = inv(q_tril @ q_tril.T) @ (pW @ pW.T + pD)
  303. combined_batch_shape = torch._C._infer_size(q._unbroadcasted_scale_tril.shape[:-2],
  304. p._unbroadcasted_cov_factor.shape[:-2])
  305. n = p.event_shape[0]
  306. q_scale_tril = q._unbroadcasted_scale_tril.expand(combined_batch_shape + (n, n))
  307. p_cov_factor = p._unbroadcasted_cov_factor.expand(combined_batch_shape +
  308. (n, p.cov_factor.size(-1)))
  309. p_cov_diag = (torch.diag_embed(p._unbroadcasted_cov_diag.sqrt())
  310. .expand(combined_batch_shape + (n, n)))
  311. term21 = _batch_trace_XXT(torch.linalg.solve_triangular(q_scale_tril, p_cov_factor, upper=False))
  312. term22 = _batch_trace_XXT(torch.linalg.solve_triangular(q_scale_tril, p_cov_diag, upper=False))
  313. term2 = term21 + term22
  314. return 0.5 * (term1 + term2 + term3 - p.event_shape[0])
  315. @register_kl(MultivariateNormal, MultivariateNormal)
  316. def _kl_multivariatenormal_multivariatenormal(p, q):
  317. # From https://en.wikipedia.org/wiki/Multivariate_normal_distribution#Kullback%E2%80%93Leibler_divergence
  318. if p.event_shape != q.event_shape:
  319. raise ValueError("KL-divergence between two Multivariate Normals with\
  320. different event shapes cannot be computed")
  321. half_term1 = (q._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1) -
  322. p._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1))
  323. combined_batch_shape = torch._C._infer_size(q._unbroadcasted_scale_tril.shape[:-2],
  324. p._unbroadcasted_scale_tril.shape[:-2])
  325. n = p.event_shape[0]
  326. q_scale_tril = q._unbroadcasted_scale_tril.expand(combined_batch_shape + (n, n))
  327. p_scale_tril = p._unbroadcasted_scale_tril.expand(combined_batch_shape + (n, n))
  328. term2 = _batch_trace_XXT(torch.linalg.solve_triangular(q_scale_tril, p_scale_tril, upper=False))
  329. term3 = _batch_mahalanobis(q._unbroadcasted_scale_tril, (q.loc - p.loc))
  330. return half_term1 + 0.5 * (term2 + term3 - n)
  331. @register_kl(Normal, Normal)
  332. def _kl_normal_normal(p, q):
  333. var_ratio = (p.scale / q.scale).pow(2)
  334. t1 = ((p.loc - q.loc) / q.scale).pow(2)
  335. return 0.5 * (var_ratio + t1 - 1 - var_ratio.log())
  336. @register_kl(OneHotCategorical, OneHotCategorical)
  337. def _kl_onehotcategorical_onehotcategorical(p, q):
  338. return _kl_categorical_categorical(p._categorical, q._categorical)
  339. @register_kl(Pareto, Pareto)
  340. def _kl_pareto_pareto(p, q):
  341. # From http://www.mast.queensu.ca/~communications/Papers/gil-msc11.pdf
  342. scale_ratio = p.scale / q.scale
  343. alpha_ratio = q.alpha / p.alpha
  344. t1 = q.alpha * scale_ratio.log()
  345. t2 = -alpha_ratio.log()
  346. result = t1 + t2 + alpha_ratio - 1
  347. result[p.support.lower_bound < q.support.lower_bound] = inf
  348. return result
  349. @register_kl(Poisson, Poisson)
  350. def _kl_poisson_poisson(p, q):
  351. return p.rate * (p.rate.log() - q.rate.log()) - (p.rate - q.rate)
  352. @register_kl(TransformedDistribution, TransformedDistribution)
  353. def _kl_transformed_transformed(p, q):
  354. if p.transforms != q.transforms:
  355. raise NotImplementedError
  356. if p.event_shape != q.event_shape:
  357. raise NotImplementedError
  358. return kl_divergence(p.base_dist, q.base_dist)
  359. @register_kl(Uniform, Uniform)
  360. def _kl_uniform_uniform(p, q):
  361. result = ((q.high - q.low) / (p.high - p.low)).log()
  362. result[(q.low > p.low) | (q.high < p.high)] = inf
  363. return result
  364. # Different distributions
  365. @register_kl(Bernoulli, Poisson)
  366. def _kl_bernoulli_poisson(p, q):
  367. return -p.entropy() - (p.probs * q.rate.log() - q.rate)
  368. @register_kl(Beta, ContinuousBernoulli)
  369. def _kl_beta_continuous_bernoulli(p, q):
  370. return -p.entropy() - p.mean * q.logits - torch.log1p(-q.probs) - q._cont_bern_log_norm()
  371. @register_kl(Beta, Pareto)
  372. def _kl_beta_infinity(p, q):
  373. return _infinite_like(p.concentration1)
  374. @register_kl(Beta, Exponential)
  375. def _kl_beta_exponential(p, q):
  376. return -p.entropy() - q.rate.log() + q.rate * (p.concentration1 / (p.concentration1 + p.concentration0))
  377. @register_kl(Beta, Gamma)
  378. def _kl_beta_gamma(p, q):
  379. t1 = -p.entropy()
  380. t2 = q.concentration.lgamma() - q.concentration * q.rate.log()
  381. t3 = (q.concentration - 1) * (p.concentration1.digamma() - (p.concentration1 + p.concentration0).digamma())
  382. t4 = q.rate * p.concentration1 / (p.concentration1 + p.concentration0)
  383. return t1 + t2 - t3 + t4
  384. # TODO: Add Beta-Laplace KL Divergence
  385. @register_kl(Beta, Normal)
  386. def _kl_beta_normal(p, q):
  387. E_beta = p.concentration1 / (p.concentration1 + p.concentration0)
  388. var_normal = q.scale.pow(2)
  389. t1 = -p.entropy()
  390. t2 = 0.5 * (var_normal * 2 * math.pi).log()
  391. t3 = (E_beta * (1 - E_beta) / (p.concentration1 + p.concentration0 + 1) + E_beta.pow(2)) * 0.5
  392. t4 = q.loc * E_beta
  393. t5 = q.loc.pow(2) * 0.5
  394. return t1 + t2 + (t3 - t4 + t5) / var_normal
  395. @register_kl(Beta, Uniform)
  396. def _kl_beta_uniform(p, q):
  397. result = -p.entropy() + (q.high - q.low).log()
  398. result[(q.low > p.support.lower_bound) | (q.high < p.support.upper_bound)] = inf
  399. return result
  400. # Note that the KL between a ContinuousBernoulli and Beta has no closed form
  401. @register_kl(ContinuousBernoulli, Pareto)
  402. def _kl_continuous_bernoulli_infinity(p, q):
  403. return _infinite_like(p.probs)
  404. @register_kl(ContinuousBernoulli, Exponential)
  405. def _kl_continuous_bernoulli_exponential(p, q):
  406. return -p.entropy() - torch.log(q.rate) + q.rate * p.mean
  407. # Note that the KL between a ContinuousBernoulli and Gamma has no closed form
  408. # TODO: Add ContinuousBernoulli-Laplace KL Divergence
  409. @register_kl(ContinuousBernoulli, Normal)
  410. def _kl_continuous_bernoulli_normal(p, q):
  411. t1 = -p.entropy()
  412. t2 = 0.5 * (math.log(2. * math.pi) + torch.square(q.loc / q.scale)) + torch.log(q.scale)
  413. t3 = (p.variance + torch.square(p.mean) - 2. * q.loc * p.mean) / (2.0 * torch.square(q.scale))
  414. return t1 + t2 + t3
  415. @register_kl(ContinuousBernoulli, Uniform)
  416. def _kl_continuous_bernoulli_uniform(p, q):
  417. result = -p.entropy() + (q.high - q.low).log()
  418. return torch.where(torch.max(torch.ge(q.low, p.support.lower_bound),
  419. torch.le(q.high, p.support.upper_bound)),
  420. torch.ones_like(result) * inf, result)
  421. @register_kl(Exponential, Beta)
  422. @register_kl(Exponential, ContinuousBernoulli)
  423. @register_kl(Exponential, Pareto)
  424. @register_kl(Exponential, Uniform)
  425. def _kl_exponential_infinity(p, q):
  426. return _infinite_like(p.rate)
  427. @register_kl(Exponential, Gamma)
  428. def _kl_exponential_gamma(p, q):
  429. ratio = q.rate / p.rate
  430. t1 = -q.concentration * torch.log(ratio)
  431. return t1 + ratio + q.concentration.lgamma() + q.concentration * _euler_gamma - (1 + _euler_gamma)
  432. @register_kl(Exponential, Gumbel)
  433. def _kl_exponential_gumbel(p, q):
  434. scale_rate_prod = p.rate * q.scale
  435. loc_scale_ratio = q.loc / q.scale
  436. t1 = scale_rate_prod.log() - 1
  437. t2 = torch.exp(loc_scale_ratio) * scale_rate_prod / (scale_rate_prod + 1)
  438. t3 = scale_rate_prod.reciprocal()
  439. return t1 - loc_scale_ratio + t2 + t3
  440. # TODO: Add Exponential-Laplace KL Divergence
  441. @register_kl(Exponential, Normal)
  442. def _kl_exponential_normal(p, q):
  443. var_normal = q.scale.pow(2)
  444. rate_sqr = p.rate.pow(2)
  445. t1 = 0.5 * torch.log(rate_sqr * var_normal * 2 * math.pi)
  446. t2 = rate_sqr.reciprocal()
  447. t3 = q.loc / p.rate
  448. t4 = q.loc.pow(2) * 0.5
  449. return t1 - 1 + (t2 - t3 + t4) / var_normal
  450. @register_kl(Gamma, Beta)
  451. @register_kl(Gamma, ContinuousBernoulli)
  452. @register_kl(Gamma, Pareto)
  453. @register_kl(Gamma, Uniform)
  454. def _kl_gamma_infinity(p, q):
  455. return _infinite_like(p.concentration)
  456. @register_kl(Gamma, Exponential)
  457. def _kl_gamma_exponential(p, q):
  458. return -p.entropy() - q.rate.log() + q.rate * p.concentration / p.rate
  459. @register_kl(Gamma, Gumbel)
  460. def _kl_gamma_gumbel(p, q):
  461. beta_scale_prod = p.rate * q.scale
  462. loc_scale_ratio = q.loc / q.scale
  463. t1 = (p.concentration - 1) * p.concentration.digamma() - p.concentration.lgamma() - p.concentration
  464. t2 = beta_scale_prod.log() + p.concentration / beta_scale_prod
  465. t3 = torch.exp(loc_scale_ratio) * (1 + beta_scale_prod.reciprocal()).pow(-p.concentration) - loc_scale_ratio
  466. return t1 + t2 + t3
  467. # TODO: Add Gamma-Laplace KL Divergence
  468. @register_kl(Gamma, Normal)
  469. def _kl_gamma_normal(p, q):
  470. var_normal = q.scale.pow(2)
  471. beta_sqr = p.rate.pow(2)
  472. t1 = 0.5 * torch.log(beta_sqr * var_normal * 2 * math.pi) - p.concentration - p.concentration.lgamma()
  473. t2 = 0.5 * (p.concentration.pow(2) + p.concentration) / beta_sqr
  474. t3 = q.loc * p.concentration / p.rate
  475. t4 = 0.5 * q.loc.pow(2)
  476. return t1 + (p.concentration - 1) * p.concentration.digamma() + (t2 - t3 + t4) / var_normal
  477. @register_kl(Gumbel, Beta)
  478. @register_kl(Gumbel, ContinuousBernoulli)
  479. @register_kl(Gumbel, Exponential)
  480. @register_kl(Gumbel, Gamma)
  481. @register_kl(Gumbel, Pareto)
  482. @register_kl(Gumbel, Uniform)
  483. def _kl_gumbel_infinity(p, q):
  484. return _infinite_like(p.loc)
  485. # TODO: Add Gumbel-Laplace KL Divergence
  486. @register_kl(Gumbel, Normal)
  487. def _kl_gumbel_normal(p, q):
  488. param_ratio = p.scale / q.scale
  489. t1 = (param_ratio / math.sqrt(2 * math.pi)).log()
  490. t2 = (math.pi * param_ratio * 0.5).pow(2) / 3
  491. t3 = ((p.loc + p.scale * _euler_gamma - q.loc) / q.scale).pow(2) * 0.5
  492. return -t1 + t2 + t3 - (_euler_gamma + 1)
  493. @register_kl(Laplace, Beta)
  494. @register_kl(Laplace, ContinuousBernoulli)
  495. @register_kl(Laplace, Exponential)
  496. @register_kl(Laplace, Gamma)
  497. @register_kl(Laplace, Pareto)
  498. @register_kl(Laplace, Uniform)
  499. def _kl_laplace_infinity(p, q):
  500. return _infinite_like(p.loc)
  501. @register_kl(Laplace, Normal)
  502. def _kl_laplace_normal(p, q):
  503. var_normal = q.scale.pow(2)
  504. scale_sqr_var_ratio = p.scale.pow(2) / var_normal
  505. t1 = 0.5 * torch.log(2 * scale_sqr_var_ratio / math.pi)
  506. t2 = 0.5 * p.loc.pow(2)
  507. t3 = p.loc * q.loc
  508. t4 = 0.5 * q.loc.pow(2)
  509. return -t1 + scale_sqr_var_ratio + (t2 - t3 + t4) / var_normal - 1
  510. @register_kl(Normal, Beta)
  511. @register_kl(Normal, ContinuousBernoulli)
  512. @register_kl(Normal, Exponential)
  513. @register_kl(Normal, Gamma)
  514. @register_kl(Normal, Pareto)
  515. @register_kl(Normal, Uniform)
  516. def _kl_normal_infinity(p, q):
  517. return _infinite_like(p.loc)
  518. @register_kl(Normal, Gumbel)
  519. def _kl_normal_gumbel(p, q):
  520. mean_scale_ratio = p.loc / q.scale
  521. var_scale_sqr_ratio = (p.scale / q.scale).pow(2)
  522. loc_scale_ratio = q.loc / q.scale
  523. t1 = var_scale_sqr_ratio.log() * 0.5
  524. t2 = mean_scale_ratio - loc_scale_ratio
  525. t3 = torch.exp(-mean_scale_ratio + 0.5 * var_scale_sqr_ratio + loc_scale_ratio)
  526. return -t1 + t2 + t3 - (0.5 * (1 + math.log(2 * math.pi)))
  527. @register_kl(Normal, Laplace)
  528. def _kl_normal_laplace(p, q):
  529. loc_diff = p.loc - q.loc
  530. scale_ratio = p.scale / q.scale
  531. loc_diff_scale_ratio = loc_diff / p.scale
  532. t1 = torch.log(scale_ratio)
  533. t2 = math.sqrt(2 / math.pi) * p.scale * torch.exp(-0.5 * loc_diff_scale_ratio.pow(2))
  534. t3 = loc_diff * torch.erf(math.sqrt(0.5) * loc_diff_scale_ratio)
  535. return -t1 + (t2 + t3) / q.scale - (0.5 * (1 + math.log(0.5 * math.pi)))
  536. @register_kl(Pareto, Beta)
  537. @register_kl(Pareto, ContinuousBernoulli)
  538. @register_kl(Pareto, Uniform)
  539. def _kl_pareto_infinity(p, q):
  540. return _infinite_like(p.scale)
  541. @register_kl(Pareto, Exponential)
  542. def _kl_pareto_exponential(p, q):
  543. scale_rate_prod = p.scale * q.rate
  544. t1 = (p.alpha / scale_rate_prod).log()
  545. t2 = p.alpha.reciprocal()
  546. t3 = p.alpha * scale_rate_prod / (p.alpha - 1)
  547. result = t1 - t2 + t3 - 1
  548. result[p.alpha <= 1] = inf
  549. return result
  550. @register_kl(Pareto, Gamma)
  551. def _kl_pareto_gamma(p, q):
  552. common_term = p.scale.log() + p.alpha.reciprocal()
  553. t1 = p.alpha.log() - common_term
  554. t2 = q.concentration.lgamma() - q.concentration * q.rate.log()
  555. t3 = (1 - q.concentration) * common_term
  556. t4 = q.rate * p.alpha * p.scale / (p.alpha - 1)
  557. result = t1 + t2 + t3 + t4 - 1
  558. result[p.alpha <= 1] = inf
  559. return result
  560. # TODO: Add Pareto-Laplace KL Divergence
  561. @register_kl(Pareto, Normal)
  562. def _kl_pareto_normal(p, q):
  563. var_normal = 2 * q.scale.pow(2)
  564. common_term = p.scale / (p.alpha - 1)
  565. t1 = (math.sqrt(2 * math.pi) * q.scale * p.alpha / p.scale).log()
  566. t2 = p.alpha.reciprocal()
  567. t3 = p.alpha * common_term.pow(2) / (p.alpha - 2)
  568. t4 = (p.alpha * common_term - q.loc).pow(2)
  569. result = t1 - t2 + (t3 + t4) / var_normal - 1
  570. result[p.alpha <= 2] = inf
  571. return result
  572. @register_kl(Poisson, Bernoulli)
  573. @register_kl(Poisson, Binomial)
  574. def _kl_poisson_infinity(p, q):
  575. return _infinite_like(p.rate)
  576. @register_kl(Uniform, Beta)
  577. def _kl_uniform_beta(p, q):
  578. common_term = p.high - p.low
  579. t1 = torch.log(common_term)
  580. t2 = (q.concentration1 - 1) * (_x_log_x(p.high) - _x_log_x(p.low) - common_term) / common_term
  581. t3 = (q.concentration0 - 1) * (_x_log_x((1 - p.high)) - _x_log_x((1 - p.low)) + common_term) / common_term
  582. t4 = q.concentration1.lgamma() + q.concentration0.lgamma() - (q.concentration1 + q.concentration0).lgamma()
  583. result = t3 + t4 - t1 - t2
  584. result[(p.high > q.support.upper_bound) | (p.low < q.support.lower_bound)] = inf
  585. return result
  586. @register_kl(Uniform, ContinuousBernoulli)
  587. def _kl_uniform_continuous_bernoulli(p, q):
  588. result = -p.entropy() - p.mean * q.logits - torch.log1p(-q.probs) - q._cont_bern_log_norm()
  589. return torch.where(torch.max(torch.ge(p.high, q.support.upper_bound),
  590. torch.le(p.low, q.support.lower_bound)),
  591. torch.ones_like(result) * inf, result)
  592. @register_kl(Uniform, Exponential)
  593. def _kl_uniform_exponetial(p, q):
  594. result = q.rate * (p.high + p.low) / 2 - ((p.high - p.low) * q.rate).log()
  595. result[p.low < q.support.lower_bound] = inf
  596. return result
  597. @register_kl(Uniform, Gamma)
  598. def _kl_uniform_gamma(p, q):
  599. common_term = p.high - p.low
  600. t1 = common_term.log()
  601. t2 = q.concentration.lgamma() - q.concentration * q.rate.log()
  602. t3 = (1 - q.concentration) * (_x_log_x(p.high) - _x_log_x(p.low) - common_term) / common_term
  603. t4 = q.rate * (p.high + p.low) / 2
  604. result = -t1 + t2 + t3 + t4
  605. result[p.low < q.support.lower_bound] = inf
  606. return result
  607. @register_kl(Uniform, Gumbel)
  608. def _kl_uniform_gumbel(p, q):
  609. common_term = q.scale / (p.high - p.low)
  610. high_loc_diff = (p.high - q.loc) / q.scale
  611. low_loc_diff = (p.low - q.loc) / q.scale
  612. t1 = common_term.log() + 0.5 * (high_loc_diff + low_loc_diff)
  613. t2 = common_term * (torch.exp(-high_loc_diff) - torch.exp(-low_loc_diff))
  614. return t1 - t2
  615. # TODO: Uniform-Laplace KL Divergence
  616. @register_kl(Uniform, Normal)
  617. def _kl_uniform_normal(p, q):
  618. common_term = p.high - p.low
  619. t1 = (math.sqrt(math.pi * 2) * q.scale / common_term).log()
  620. t2 = (common_term).pow(2) / 12
  621. t3 = ((p.high + p.low - 2 * q.loc) / 2).pow(2)
  622. return t1 + 0.5 * (t2 + t3) / q.scale.pow(2)
  623. @register_kl(Uniform, Pareto)
  624. def _kl_uniform_pareto(p, q):
  625. support_uniform = p.high - p.low
  626. t1 = (q.alpha * q.scale.pow(q.alpha) * (support_uniform)).log()
  627. t2 = (_x_log_x(p.high) - _x_log_x(p.low) - support_uniform) / support_uniform
  628. result = t2 * (q.alpha + 1) - t1
  629. result[p.low < q.support.lower_bound] = inf
  630. return result
  631. @register_kl(Independent, Independent)
  632. def _kl_independent_independent(p, q):
  633. if p.reinterpreted_batch_ndims != q.reinterpreted_batch_ndims:
  634. raise NotImplementedError
  635. result = kl_divergence(p.base_dist, q.base_dist)
  636. return _sum_rightmost(result, p.reinterpreted_batch_ndims)
  637. @register_kl(Cauchy, Cauchy)
  638. def _kl_cauchy_cauchy(p, q):
  639. # From https://arxiv.org/abs/1905.10965
  640. t1 = ((p.scale + q.scale).pow(2) + (p.loc - q.loc).pow(2)).log()
  641. t2 = (4 * p.scale * q.scale).log()
  642. return t1 - t2
  643. def _add_kl_info():
  644. """Appends a list of implemented KL functions to the doc for kl_divergence."""
  645. rows = ["KL divergence is currently implemented for the following distribution pairs:"]
  646. for p, q in sorted(_KL_REGISTRY,
  647. key=lambda p_q: (p_q[0].__name__, p_q[1].__name__)):
  648. rows.append("* :class:`~torch.distributions.{}` and :class:`~torch.distributions.{}`"
  649. .format(p.__name__, q.__name__))
  650. kl_info = '\n\t'.join(rows)
  651. if kl_divergence.__doc__:
  652. kl_divergence.__doc__ += kl_info # type: ignore[operator]