parametrizations.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492
  1. from enum import Enum, auto
  2. import torch
  3. from torch import Tensor
  4. from ..utils import parametrize
  5. from ..modules import Module
  6. from .. import functional as F
  7. from typing import Optional
  8. __all__ = ['orthogonal', 'spectral_norm']
  9. def _is_orthogonal(Q, eps=None):
  10. n, k = Q.size(-2), Q.size(-1)
  11. Id = torch.eye(k, dtype=Q.dtype, device=Q.device)
  12. # A reasonable eps, but not too large
  13. eps = 10. * n * torch.finfo(Q.dtype).eps
  14. return torch.allclose(Q.mH @ Q, Id, atol=eps)
  15. def _make_orthogonal(A):
  16. """ Assume that A is a tall matrix.
  17. Compute the Q factor s.t. A = QR (A may be complex) and diag(R) is real and non-negative
  18. """
  19. X, tau = torch.geqrf(A)
  20. Q = torch.linalg.householder_product(X, tau)
  21. # The diagonal of X is the diagonal of R (which is always real) so we normalise by its signs
  22. Q *= X.diagonal(dim1=-2, dim2=-1).sgn().unsqueeze(-2)
  23. return Q
  24. class _OrthMaps(Enum):
  25. matrix_exp = auto()
  26. cayley = auto()
  27. householder = auto()
  28. class _Orthogonal(Module):
  29. base: Tensor
  30. def __init__(self,
  31. weight,
  32. orthogonal_map: _OrthMaps,
  33. *,
  34. use_trivialization=True) -> None:
  35. super().__init__()
  36. # Note [Householder complex]
  37. # For complex tensors, it is not possible to compute the tensor `tau` necessary for
  38. # linalg.householder_product from the reflectors.
  39. # To see this, note that the reflectors have a shape like:
  40. # 0 0 0
  41. # * 0 0
  42. # * * 0
  43. # which, for complex matrices, give n(n-1) (real) parameters. Now, you need n^2 parameters
  44. # to parametrize the unitary matrices. Saving tau on its own does not work either, because
  45. # not every combination of `(A, tau)` gives a unitary matrix, meaning that if we optimise
  46. # them as independent tensors we would not maintain the constraint
  47. # An equivalent reasoning holds for rectangular matrices
  48. if weight.is_complex() and orthogonal_map == _OrthMaps.householder:
  49. raise ValueError("The householder parametrization does not support complex tensors.")
  50. self.shape = weight.shape
  51. self.orthogonal_map = orthogonal_map
  52. if use_trivialization:
  53. self.register_buffer("base", None)
  54. def forward(self, X: torch.Tensor) -> torch.Tensor:
  55. n, k = X.size(-2), X.size(-1)
  56. transposed = n < k
  57. if transposed:
  58. X = X.mT
  59. n, k = k, n
  60. # Here n > k and X is a tall matrix
  61. if self.orthogonal_map == _OrthMaps.matrix_exp or self.orthogonal_map == _OrthMaps.cayley:
  62. # We just need n x k - k(k-1)/2 parameters
  63. X = X.tril()
  64. if n != k:
  65. # Embed into a square matrix
  66. X = torch.cat([X, X.new_zeros(n, n - k).expand(*X.shape[:-2], -1, -1)], dim=-1)
  67. A = X - X.mH
  68. # A is skew-symmetric (or skew-hermitian)
  69. if self.orthogonal_map == _OrthMaps.matrix_exp:
  70. Q = torch.matrix_exp(A)
  71. elif self.orthogonal_map == _OrthMaps.cayley:
  72. # Computes the Cayley retraction (I+A/2)(I-A/2)^{-1}
  73. Id = torch.eye(n, dtype=A.dtype, device=A.device)
  74. Q = torch.linalg.solve(torch.add(Id, A, alpha=-0.5), torch.add(Id, A, alpha=0.5))
  75. # Q is now orthogonal (or unitary) of size (..., n, n)
  76. if n != k:
  77. Q = Q[..., :k]
  78. # Q is now the size of the X (albeit perhaps transposed)
  79. else:
  80. # X is real here, as we do not support householder with complex numbers
  81. A = X.tril(diagonal=-1)
  82. tau = 2. / (1. + (A * A).sum(dim=-2))
  83. Q = torch.linalg.householder_product(A, tau)
  84. # The diagonal of X is 1's and -1's
  85. # We do not want to differentiate through this or update the diagonal of X hence the casting
  86. Q = Q * X.diagonal(dim1=-2, dim2=-1).int().unsqueeze(-2)
  87. if hasattr(self, "base"):
  88. Q = self.base @ Q
  89. if transposed:
  90. Q = Q.mT
  91. return Q
  92. @torch.autograd.no_grad()
  93. def right_inverse(self, Q: torch.Tensor) -> torch.Tensor:
  94. if Q.shape != self.shape:
  95. raise ValueError(f"Expected a matrix or batch of matrices of shape {self.shape}. "
  96. f"Got a tensor of shape {Q.shape}.")
  97. Q_init = Q
  98. n, k = Q.size(-2), Q.size(-1)
  99. transpose = n < k
  100. if transpose:
  101. Q = Q.mT
  102. n, k = k, n
  103. # We always make sure to always copy Q in every path
  104. if not hasattr(self, "base"):
  105. # Note [right_inverse expm cayley]
  106. # If we do not have use_trivialization=True, we just implement the inverse of the forward
  107. # map for the Householder. To see why, think that for the Cayley map,
  108. # we would need to find the matrix X \in R^{n x k} such that:
  109. # Y = torch.cat([X.tril(), X.new_zeros(n, n - k).expand(*X.shape[:-2], -1, -1)], dim=-1)
  110. # A = Y - Y.mH
  111. # cayley(A)[:, :k]
  112. # gives the original tensor. It is not clear how to do this.
  113. # Perhaps via some algebraic manipulation involving the QR like that of
  114. # Corollary 2.2 in Edelman, Arias and Smith?
  115. if self.orthogonal_map == _OrthMaps.cayley or self.orthogonal_map == _OrthMaps.matrix_exp:
  116. raise NotImplementedError("It is not possible to assign to the matrix exponential "
  117. "or the Cayley parametrizations when use_trivialization=False.")
  118. # If parametrization == _OrthMaps.householder, make Q orthogonal via the QR decomposition.
  119. # Here Q is always real because we do not support householder and complex matrices.
  120. # See note [Householder complex]
  121. A, tau = torch.geqrf(Q)
  122. # We want to have a decomposition X = QR with diag(R) > 0, as otherwise we could
  123. # decompose an orthogonal matrix Q as Q = (-Q)@(-Id), which is a valid QR decomposition
  124. # The diagonal of Q is the diagonal of R from the qr decomposition
  125. A.diagonal(dim1=-2, dim2=-1).sign_()
  126. # Equality with zero is ok because LAPACK returns exactly zero when it does not want
  127. # to use a particular reflection
  128. A.diagonal(dim1=-2, dim2=-1)[tau == 0.] *= -1
  129. return A.mT if transpose else A
  130. else:
  131. if n == k:
  132. # We check whether Q is orthogonal
  133. if not _is_orthogonal(Q):
  134. Q = _make_orthogonal(Q)
  135. else: # Is orthogonal
  136. Q = Q.clone()
  137. else:
  138. # Complete Q into a full n x n orthogonal matrix
  139. N = torch.randn(*(Q.size()[:-2] + (n, n - k)), dtype=Q.dtype, device=Q.device)
  140. Q = torch.cat([Q, N], dim=-1)
  141. Q = _make_orthogonal(Q)
  142. self.base = Q
  143. # It is necessary to return the -Id, as we use the diagonal for the
  144. # Householder parametrization. Using -Id makes:
  145. # householder(torch.zeros(m,n)) == torch.eye(m,n)
  146. # Poor man's version of eye_like
  147. neg_Id = torch.zeros_like(Q_init)
  148. neg_Id.diagonal(dim1=-2, dim2=-1).fill_(-1.)
  149. return neg_Id
  150. def orthogonal(module: Module,
  151. name: str = 'weight',
  152. orthogonal_map: Optional[str] = None,
  153. *,
  154. use_trivialization: bool = True) -> Module:
  155. r"""Applies an orthogonal or unitary parametrization to a matrix or a batch of matrices.
  156. Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`, the parametrized
  157. matrix :math:`Q \in \mathbb{K}^{m \times n}` is **orthogonal** as
  158. .. math::
  159. \begin{align*}
  160. Q^{\text{H}}Q &= \mathrm{I}_n \mathrlap{\qquad \text{if }m \geq n}\\
  161. QQ^{\text{H}} &= \mathrm{I}_m \mathrlap{\qquad \text{if }m < n}
  162. \end{align*}
  163. where :math:`Q^{\text{H}}` is the conjugate transpose when :math:`Q` is complex
  164. and the transpose when :math:`Q` is real-valued, and
  165. :math:`\mathrm{I}_n` is the `n`-dimensional identity matrix.
  166. In plain words, :math:`Q` will have orthonormal columns whenever :math:`m \geq n`
  167. and orthonormal rows otherwise.
  168. If the tensor has more than two dimensions, we consider it as a batch of matrices of shape `(..., m, n)`.
  169. The matrix :math:`Q` may be parametrized via three different ``orthogonal_map`` in terms of the original tensor:
  170. - ``"matrix_exp"``/``"cayley"``:
  171. the :func:`~torch.matrix_exp` :math:`Q = \exp(A)` and the `Cayley map`_
  172. :math:`Q = (\mathrm{I}_n + A/2)(\mathrm{I}_n - A/2)^{-1}` are applied to a skew-symmetric
  173. :math:`A` to give an orthogonal matrix.
  174. - ``"householder"``: computes a product of Householder reflectors
  175. (:func:`~torch.linalg.householder_product`).
  176. ``"matrix_exp"``/``"cayley"`` often make the parametrized weight converge faster than
  177. ``"householder"``, but they are slower to compute for very thin or very wide matrices.
  178. If ``use_trivialization=True`` (default), the parametrization implements the "Dynamic Trivialization Framework",
  179. where an extra matrix :math:`B \in \mathbb{K}^{n \times n}` is stored under
  180. ``module.parametrizations.weight[0].base``. This helps the
  181. convergence of the parametrized layer at the expense of some extra memory use.
  182. See `Trivializations for Gradient-Based Optimization on Manifolds`_ .
  183. Initial value of :math:`Q`:
  184. If the original tensor is not parametrized and ``use_trivialization=True`` (default), the initial value
  185. of :math:`Q` is that of the original tensor if it is orthogonal (or unitary in the complex case)
  186. and it is orthogonalized via the QR decomposition otherwise (see :func:`torch.linalg.qr`).
  187. Same happens when it is not parametrized and ``orthogonal_map="householder"`` even when ``use_trivialization=False``.
  188. Otherwise, the initial value is the result of the composition of all the registered
  189. parametrizations applied to the original tensor.
  190. .. note::
  191. This function is implemented using the parametrization functionality
  192. in :func:`~torch.nn.utils.parametrize.register_parametrization`.
  193. .. _`Cayley map`: https://en.wikipedia.org/wiki/Cayley_transform#Matrix_map
  194. .. _`Trivializations for Gradient-Based Optimization on Manifolds`: https://arxiv.org/abs/1909.09501
  195. Args:
  196. module (nn.Module): module on which to register the parametrization.
  197. name (str, optional): name of the tensor to make orthogonal. Default: ``"weight"``.
  198. orthogonal_map (str, optional): One of the following: ``"matrix_exp"``, ``"cayley"``, ``"householder"``.
  199. Default: ``"matrix_exp"`` if the matrix is square or complex, ``"householder"`` otherwise.
  200. use_trivialization (bool, optional): whether to use the dynamic trivialization framework.
  201. Default: ``True``.
  202. Returns:
  203. The original module with an orthogonal parametrization registered to the specified
  204. weight
  205. Example::
  206. >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK)
  207. >>> orth_linear = orthogonal(nn.Linear(20, 40))
  208. >>> orth_linear
  209. ParametrizedLinear(
  210. in_features=20, out_features=40, bias=True
  211. (parametrizations): ModuleDict(
  212. (weight): ParametrizationList(
  213. (0): _Orthogonal()
  214. )
  215. )
  216. )
  217. >>> # xdoctest: +IGNORE_WANT
  218. >>> Q = orth_linear.weight
  219. >>> torch.dist(Q.T @ Q, torch.eye(20))
  220. tensor(4.9332e-07)
  221. """
  222. weight = getattr(module, name, None)
  223. if not isinstance(weight, Tensor):
  224. raise ValueError(
  225. "Module '{}' has no parameter or buffer with name '{}'".format(module, name)
  226. )
  227. # We could implement this for 1-dim tensors as the maps on the sphere
  228. # but I believe it'd bite more people than it'd help
  229. if weight.ndim < 2:
  230. raise ValueError("Expected a matrix or batch of matrices. "
  231. f"Got a tensor of {weight.ndim} dimensions.")
  232. if orthogonal_map is None:
  233. orthogonal_map = "matrix_exp" if weight.size(-2) == weight.size(-1) or weight.is_complex() else "householder"
  234. orth_enum = getattr(_OrthMaps, orthogonal_map, None)
  235. if orth_enum is None:
  236. raise ValueError('orthogonal_map has to be one of "matrix_exp", "cayley", "householder". '
  237. f'Got: {orthogonal_map}')
  238. orth = _Orthogonal(weight,
  239. orth_enum,
  240. use_trivialization=use_trivialization)
  241. parametrize.register_parametrization(module, name, orth, unsafe=True)
  242. return module
  243. class _SpectralNorm(Module):
  244. def __init__(
  245. self,
  246. weight: torch.Tensor,
  247. n_power_iterations: int = 1,
  248. dim: int = 0,
  249. eps: float = 1e-12
  250. ) -> None:
  251. super().__init__()
  252. ndim = weight.ndim
  253. if dim >= ndim or dim < -ndim:
  254. raise IndexError("Dimension out of range (expected to be in range of "
  255. f"[-{ndim}, {ndim - 1}] but got {dim})")
  256. if n_power_iterations <= 0:
  257. raise ValueError('Expected n_power_iterations to be positive, but '
  258. 'got n_power_iterations={}'.format(n_power_iterations))
  259. self.dim = dim if dim >= 0 else dim + ndim
  260. self.eps = eps
  261. if ndim > 1:
  262. # For ndim == 1 we do not need to approximate anything (see _SpectralNorm.forward)
  263. self.n_power_iterations = n_power_iterations
  264. weight_mat = self._reshape_weight_to_matrix(weight)
  265. h, w = weight_mat.size()
  266. u = weight_mat.new_empty(h).normal_(0, 1)
  267. v = weight_mat.new_empty(w).normal_(0, 1)
  268. self.register_buffer('_u', F.normalize(u, dim=0, eps=self.eps))
  269. self.register_buffer('_v', F.normalize(v, dim=0, eps=self.eps))
  270. # Start with u, v initialized to some reasonable values by performing a number
  271. # of iterations of the power method
  272. self._power_method(weight_mat, 15)
  273. def _reshape_weight_to_matrix(self, weight: torch.Tensor) -> torch.Tensor:
  274. # Precondition
  275. assert weight.ndim > 1
  276. if self.dim != 0:
  277. # permute dim to front
  278. weight = weight.permute(self.dim, *(d for d in range(weight.dim()) if d != self.dim))
  279. return weight.flatten(1)
  280. @torch.autograd.no_grad()
  281. def _power_method(self, weight_mat: torch.Tensor, n_power_iterations: int) -> None:
  282. # See original note at torch/nn/utils/spectral_norm.py
  283. # NB: If `do_power_iteration` is set, the `u` and `v` vectors are
  284. # updated in power iteration **in-place**. This is very important
  285. # because in `DataParallel` forward, the vectors (being buffers) are
  286. # broadcast from the parallelized module to each module replica,
  287. # which is a new module object created on the fly. And each replica
  288. # runs its own spectral norm power iteration. So simply assigning
  289. # the updated vectors to the module this function runs on will cause
  290. # the update to be lost forever. And the next time the parallelized
  291. # module is replicated, the same randomly initialized vectors are
  292. # broadcast and used!
  293. #
  294. # Therefore, to make the change propagate back, we rely on two
  295. # important behaviors (also enforced via tests):
  296. # 1. `DataParallel` doesn't clone storage if the broadcast tensor
  297. # is already on correct device; and it makes sure that the
  298. # parallelized module is already on `device[0]`.
  299. # 2. If the out tensor in `out=` kwarg has correct shape, it will
  300. # just fill in the values.
  301. # Therefore, since the same power iteration is performed on all
  302. # devices, simply updating the tensors in-place will make sure that
  303. # the module replica on `device[0]` will update the _u vector on the
  304. # parallized module (by shared storage).
  305. #
  306. # However, after we update `u` and `v` in-place, we need to **clone**
  307. # them before using them to normalize the weight. This is to support
  308. # backproping through two forward passes, e.g., the common pattern in
  309. # GAN training: loss = D(real) - D(fake). Otherwise, engine will
  310. # complain that variables needed to do backward for the first forward
  311. # (i.e., the `u` and `v` vectors) are changed in the second forward.
  312. # Precondition
  313. assert weight_mat.ndim > 1
  314. for _ in range(n_power_iterations):
  315. # Spectral norm of weight equals to `u^T W v`, where `u` and `v`
  316. # are the first left and right singular vectors.
  317. # This power iteration produces approximations of `u` and `v`.
  318. self._u = F.normalize(torch.mv(weight_mat, self._v), # type: ignore[has-type]
  319. dim=0, eps=self.eps, out=self._u) # type: ignore[has-type]
  320. self._v = F.normalize(torch.mv(weight_mat.t(), self._u),
  321. dim=0, eps=self.eps, out=self._v) # type: ignore[has-type]
  322. def forward(self, weight: torch.Tensor) -> torch.Tensor:
  323. if weight.ndim == 1:
  324. # Faster and more exact path, no need to approximate anything
  325. return F.normalize(weight, dim=0, eps=self.eps)
  326. else:
  327. weight_mat = self._reshape_weight_to_matrix(weight)
  328. if self.training:
  329. self._power_method(weight_mat, self.n_power_iterations)
  330. # See above on why we need to clone
  331. u = self._u.clone(memory_format=torch.contiguous_format)
  332. v = self._v.clone(memory_format=torch.contiguous_format)
  333. # The proper way of computing this should be through F.bilinear, but
  334. # it seems to have some efficiency issues:
  335. # https://github.com/pytorch/pytorch/issues/58093
  336. sigma = torch.dot(u, torch.mv(weight_mat, v))
  337. return weight / sigma
  338. def right_inverse(self, value: torch.Tensor) -> torch.Tensor:
  339. # we may want to assert here that the passed value already
  340. # satisfies constraints
  341. return value
  342. def spectral_norm(module: Module,
  343. name: str = 'weight',
  344. n_power_iterations: int = 1,
  345. eps: float = 1e-12,
  346. dim: Optional[int] = None) -> Module:
  347. r"""Applies spectral normalization to a parameter in the given module.
  348. .. math::
  349. \mathbf{W}_{SN} = \dfrac{\mathbf{W}}{\sigma(\mathbf{W})},
  350. \sigma(\mathbf{W}) = \max_{\mathbf{h}: \mathbf{h} \ne 0} \dfrac{\|\mathbf{W} \mathbf{h}\|_2}{\|\mathbf{h}\|_2}
  351. When applied on a vector, it simplifies to
  352. .. math::
  353. \mathbf{x}_{SN} = \dfrac{\mathbf{x}}{\|\mathbf{x}\|_2}
  354. Spectral normalization stabilizes the training of discriminators (critics)
  355. in Generative Adversarial Networks (GANs) by reducing the Lipschitz constant
  356. of the model. :math:`\sigma` is approximated performing one iteration of the
  357. `power method`_ every time the weight is accessed. If the dimension of the
  358. weight tensor is greater than 2, it is reshaped to 2D in power iteration
  359. method to get spectral norm.
  360. See `Spectral Normalization for Generative Adversarial Networks`_ .
  361. .. _`power method`: https://en.wikipedia.org/wiki/Power_iteration
  362. .. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957
  363. .. note::
  364. This function is implemented using the parametrization functionality
  365. in :func:`~torch.nn.utils.parametrize.register_parametrization`. It is a
  366. reimplementation of :func:`torch.nn.utils.spectral_norm`.
  367. .. note::
  368. When this constraint is registered, the singular vectors associated to the largest
  369. singular value are estimated rather than sampled at random. These are then updated
  370. performing :attr:`n_power_iterations` of the `power method`_ whenever the tensor
  371. is accessed with the module on `training` mode.
  372. .. note::
  373. If the `_SpectralNorm` module, i.e., `module.parametrization.weight[idx]`,
  374. is in training mode on removal, it will perform another power iteration.
  375. If you'd like to avoid this iteration, set the module to eval mode
  376. before its removal.
  377. Args:
  378. module (nn.Module): containing module
  379. name (str, optional): name of weight parameter. Default: ``"weight"``.
  380. n_power_iterations (int, optional): number of power iterations to
  381. calculate spectral norm. Default: ``1``.
  382. eps (float, optional): epsilon for numerical stability in
  383. calculating norms. Default: ``1e-12``.
  384. dim (int, optional): dimension corresponding to number of outputs.
  385. Default: ``0``, except for modules that are instances of
  386. ConvTranspose{1,2,3}d, when it is ``1``
  387. Returns:
  388. The original module with a new parametrization registered to the specified
  389. weight
  390. Example::
  391. >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK)
  392. >>> # xdoctest: +IGNORE_WANT("non-determenistic")
  393. >>> snm = spectral_norm(nn.Linear(20, 40))
  394. >>> snm
  395. ParametrizedLinear(
  396. in_features=20, out_features=40, bias=True
  397. (parametrizations): ModuleDict(
  398. (weight): ParametrizationList(
  399. (0): _SpectralNorm()
  400. )
  401. )
  402. )
  403. >>> torch.linalg.matrix_norm(snm.weight, 2)
  404. tensor(1.0081, grad_fn=<AmaxBackward0>)
  405. """
  406. weight = getattr(module, name, None)
  407. if not isinstance(weight, Tensor):
  408. raise ValueError(
  409. "Module '{}' has no parameter or buffer with name '{}'".format(module, name)
  410. )
  411. if dim is None:
  412. if isinstance(module, (torch.nn.ConvTranspose1d,
  413. torch.nn.ConvTranspose2d,
  414. torch.nn.ConvTranspose3d)):
  415. dim = 1
  416. else:
  417. dim = 0
  418. parametrize.register_parametrization(module, name, _SpectralNorm(weight, n_power_iterations, dim, eps))
  419. return module