clip_grad.py 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133
  1. import warnings
  2. from typing import Union, Iterable, List, Dict, Tuple, Optional
  3. import torch
  4. from torch import Tensor, inf
  5. from torch.utils._foreach_utils import _group_tensors_by_device_and_dtype, _has_foreach_support
  6. _tensor_or_tensors = Union[torch.Tensor, Iterable[torch.Tensor]]
  7. __all__ = ['clip_grad_norm_', 'clip_grad_norm', 'clip_grad_value_']
  8. def clip_grad_norm_(
  9. parameters: _tensor_or_tensors, max_norm: float, norm_type: float = 2.0,
  10. error_if_nonfinite: bool = False, foreach: Optional[bool] = None) -> torch.Tensor:
  11. r"""Clips gradient norm of an iterable of parameters.
  12. The norm is computed over all gradients together, as if they were
  13. concatenated into a single vector. Gradients are modified in-place.
  14. Args:
  15. parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
  16. single Tensor that will have gradients normalized
  17. max_norm (float): max norm of the gradients
  18. norm_type (float): type of the used p-norm. Can be ``'inf'`` for
  19. infinity norm.
  20. error_if_nonfinite (bool): if True, an error is thrown if the total
  21. norm of the gradients from :attr:`parameters` is ``nan``,
  22. ``inf``, or ``-inf``. Default: False (will switch to True in the future)
  23. foreach (bool): use the faster foreach-based implementation.
  24. If ``None``, use the foreach implementation for CUDA and CPU native tensors and silently
  25. fall back to the slow implementation for other device types.
  26. Default: ``None``
  27. Returns:
  28. Total norm of the parameter gradients (viewed as a single vector).
  29. """
  30. if isinstance(parameters, torch.Tensor):
  31. parameters = [parameters]
  32. grads = [p.grad for p in parameters if p.grad is not None]
  33. max_norm = float(max_norm)
  34. norm_type = float(norm_type)
  35. if len(grads) == 0:
  36. return torch.tensor(0.)
  37. first_device = grads[0].device
  38. grouped_grads: Dict[Tuple[torch.device, torch.dtype], List[List[Tensor]]] \
  39. = _group_tensors_by_device_and_dtype([[g.detach() for g in grads]]) # type: ignore[assignment]
  40. if norm_type == inf:
  41. norms = [g.detach().abs().max().to(first_device) for g in grads]
  42. total_norm = norms[0] if len(norms) == 1 else torch.max(torch.stack(norms))
  43. else:
  44. norms = []
  45. for ((device, _), [grads]) in grouped_grads.items():
  46. if (foreach is None or foreach) and _has_foreach_support(grads, device=device):
  47. norms.extend(torch._foreach_norm(grads, norm_type))
  48. elif foreach:
  49. raise RuntimeError(f'foreach=True was passed, but can\'t use the foreach API on {device.type} tensors')
  50. else:
  51. norms.extend([torch.norm(g, norm_type) for g in grads])
  52. total_norm = torch.norm(torch.stack([norm.to(first_device) for norm in norms]), norm_type)
  53. if error_if_nonfinite and torch.logical_or(total_norm.isnan(), total_norm.isinf()):
  54. raise RuntimeError(
  55. f'The total norm of order {norm_type} for gradients from '
  56. '`parameters` is non-finite, so it cannot be clipped. To disable '
  57. 'this error and scale the gradients by the non-finite norm anyway, '
  58. 'set `error_if_nonfinite=False`')
  59. clip_coef = max_norm / (total_norm + 1e-6)
  60. # Note: multiplying by the clamped coef is redundant when the coef is clamped to 1, but doing so
  61. # avoids a `if clip_coef < 1:` conditional which can require a CPU <=> device synchronization
  62. # when the gradients do not reside in CPU memory.
  63. clip_coef_clamped = torch.clamp(clip_coef, max=1.0)
  64. for ((device, _), [grads]) in grouped_grads.items():
  65. if (foreach is None or foreach) and _has_foreach_support(grads, device=device):
  66. torch._foreach_mul_(grads, clip_coef_clamped.to(device)) # type: ignore[call-overload]
  67. elif foreach:
  68. raise RuntimeError(f'foreach=True was passed, but can\'t use the foreach API on {device.type} tensors')
  69. else:
  70. clip_coef_clamped_device = clip_coef_clamped.to(device)
  71. for g in grads:
  72. g.detach().mul_(clip_coef_clamped_device)
  73. return total_norm
  74. def clip_grad_norm(
  75. parameters: _tensor_or_tensors, max_norm: float, norm_type: float = 2.,
  76. error_if_nonfinite: bool = False, foreach: Optional[bool] = None) -> torch.Tensor:
  77. r"""Clips gradient norm of an iterable of parameters.
  78. .. warning::
  79. This method is now deprecated in favor of
  80. :func:`torch.nn.utils.clip_grad_norm_`.
  81. """
  82. warnings.warn("torch.nn.utils.clip_grad_norm is now deprecated in favor "
  83. "of torch.nn.utils.clip_grad_norm_.", stacklevel=2)
  84. return clip_grad_norm_(parameters, max_norm, norm_type, error_if_nonfinite, foreach)
  85. def clip_grad_value_(parameters: _tensor_or_tensors, clip_value: float, foreach: Optional[bool] = None) -> None:
  86. r"""Clips gradient of an iterable of parameters at specified value.
  87. Gradients are modified in-place.
  88. Args:
  89. parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
  90. single Tensor that will have gradients normalized
  91. clip_value (float): maximum allowed value of the gradients.
  92. The gradients are clipped in the range
  93. :math:`\left[\text{-clip\_value}, \text{clip\_value}\right]`
  94. foreach (bool): use the faster foreach-based implementation
  95. If ``None``, use the foreach implementation for CUDA and CPU native tensors and
  96. silently fall back to the slow implementation for other device types.
  97. Default: ``None``
  98. """
  99. if isinstance(parameters, torch.Tensor):
  100. parameters = [parameters]
  101. clip_value = float(clip_value)
  102. grads = [p.grad for p in parameters if p.grad is not None]
  103. grouped_grads: Dict[Tuple[torch.device, torch.dtype], List[List[Tensor]]] \
  104. = _group_tensors_by_device_and_dtype([grads]) # type: ignore[assignment]
  105. for ((device, _), [grads]) in grouped_grads.items():
  106. if (foreach is None or foreach) and _has_foreach_support(grads, device=device):
  107. torch._foreach_clamp_min_(grads, -clip_value)
  108. torch._foreach_clamp_max_(grads, clip_value)
  109. elif foreach:
  110. raise RuntimeError(f'foreach=True was passed, but can\'t use the foreach API on {device.type} tensors')
  111. else:
  112. for grad in grads:
  113. grad.data.clamp_(min=-clip_value, max=clip_value)