sparse_adam.py 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118
  1. import torch
  2. from . import _functional as F
  3. from .optimizer import Optimizer, _maximize_doc
  4. __all__ = ['SparseAdam']
  5. class SparseAdam(Optimizer):
  6. def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, maximize: bool = False):
  7. if not 0.0 < lr:
  8. raise ValueError("Invalid learning rate: {}".format(lr))
  9. if not 0.0 < eps:
  10. raise ValueError("Invalid epsilon value: {}".format(eps))
  11. if not 0.0 <= betas[0] < 1.0:
  12. raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
  13. if not 0.0 <= betas[1] < 1.0:
  14. raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
  15. params = list(params)
  16. sparse_params = []
  17. for index, param in enumerate(params):
  18. if isinstance(param, dict):
  19. # given param group, convert given params to a list first before iterating
  20. param['params'] = list(param.get("params", []))
  21. for d_index, d_param in enumerate(param['params']):
  22. if d_param.is_sparse:
  23. sparse_params.append([index, d_index])
  24. elif param.is_sparse:
  25. sparse_params.append(index)
  26. if sparse_params:
  27. raise ValueError(
  28. f"Sparse params at indices {sparse_params}: SparseAdam requires dense parameter tensors"
  29. )
  30. defaults = dict(lr=lr, betas=betas, eps=eps, maximize=maximize)
  31. super().__init__(params, defaults)
  32. @torch.no_grad()
  33. def step(self, closure=None):
  34. """Performs a single optimization step.
  35. Args:
  36. closure (Callable, optional): A closure that reevaluates the model
  37. and returns the loss.
  38. """
  39. loss = None
  40. if closure is not None:
  41. with torch.enable_grad():
  42. loss = closure()
  43. for group in self.param_groups:
  44. params_with_grad = []
  45. grads = []
  46. exp_avgs = []
  47. exp_avg_sqs = []
  48. state_steps = []
  49. eps = group['eps']
  50. lr = group['lr']
  51. beta1, beta2 = group['betas']
  52. maximize = group.get('maximize', False)
  53. for p in group['params']:
  54. if p.grad is not None:
  55. params_with_grad.append(p)
  56. if not p.grad.is_sparse:
  57. raise RuntimeError('SparseAdam does not support dense gradients, please consider Adam instead')
  58. grads.append(p.grad)
  59. state = self.state[p]
  60. # State initialization
  61. if len(state) == 0:
  62. state['step'] = 0
  63. # Exponential moving average of gradient values
  64. state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
  65. # Exponential moving average of squared gradient values
  66. state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
  67. exp_avgs.append(state['exp_avg'])
  68. exp_avg_sqs.append(state['exp_avg_sq'])
  69. # update the steps for each param group update
  70. state['step'] += 1
  71. # record the step after step update
  72. state_steps.append(state['step'])
  73. F.sparse_adam(params_with_grad,
  74. grads,
  75. exp_avgs,
  76. exp_avg_sqs,
  77. state_steps,
  78. beta1=beta1,
  79. beta2=beta2,
  80. lr=group['lr'],
  81. eps=group['eps'],
  82. maximize=maximize)
  83. return loss
  84. SparseAdam.__doc__ = r"""Implements lazy version of Adam algorithm suitable for sparse tensors.
  85. In this variant, only moments that show up in the gradient get updated, and
  86. only those portions of the gradient get applied to the parameters.
  87. Args:
  88. params (iterable): iterable of parameters to optimize or dicts defining
  89. parameter groups
  90. lr (float, optional): learning rate (default: 1e-3)
  91. betas (Tuple[float, float], optional): coefficients used for computing
  92. running averages of gradient and its square (default: (0.9, 0.999))
  93. eps (float, optional): term added to the denominator to improve
  94. numerical stability (default: 1e-8)
  95. {maximize}
  96. .. _Adam\: A Method for Stochastic Optimization:
  97. https://arxiv.org/abs/1412.6980
  98. """.format(maximize=_maximize_doc)