123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329 |
- import torch
- from torch import Tensor
- from .optimizer import (Optimizer, required, _use_grad_for_differentiable, _default_to_fused_or_foreach,
- _differentiable_doc, _foreach_doc, _maximize_doc)
- from typing import List, Optional
- from torch.utils._foreach_utils import _group_tensors_by_device_and_dtype
- __all__ = ['SGD', 'sgd']
- class SGD(Optimizer):
- def __init__(self, params, lr=required, momentum=0, dampening=0,
- weight_decay=0, nesterov=False, *, maximize: bool = False, foreach: Optional[bool] = None,
- differentiable: bool = False):
- if lr is not required and lr < 0.0:
- raise ValueError("Invalid learning rate: {}".format(lr))
- if momentum < 0.0:
- raise ValueError("Invalid momentum value: {}".format(momentum))
- if weight_decay < 0.0:
- raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
- defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
- weight_decay=weight_decay, nesterov=nesterov,
- maximize=maximize, foreach=foreach,
- differentiable=differentiable)
- if nesterov and (momentum <= 0 or dampening != 0):
- raise ValueError("Nesterov momentum requires a momentum and zero dampening")
- super().__init__(params, defaults)
- def __setstate__(self, state):
- super().__setstate__(state)
- for group in self.param_groups:
- group.setdefault('nesterov', False)
- group.setdefault('maximize', False)
- group.setdefault('foreach', None)
- group.setdefault('differentiable', False)
- def _init_group(self, group, params_with_grad, d_p_list, momentum_buffer_list):
- has_sparse_grad = False
- for p in group['params']:
- if p.grad is not None:
- params_with_grad.append(p)
- d_p_list.append(p.grad)
- if p.grad.is_sparse:
- has_sparse_grad = True
- state = self.state[p]
- if 'momentum_buffer' not in state:
- momentum_buffer_list.append(None)
- else:
- momentum_buffer_list.append(state['momentum_buffer'])
- return has_sparse_grad
- @_use_grad_for_differentiable
- def step(self, closure=None):
- """Performs a single optimization step.
- Args:
- closure (Callable, optional): A closure that reevaluates the model
- and returns the loss.
- """
- loss = None
- if closure is not None:
- with torch.enable_grad():
- loss = closure()
- for group in self.param_groups:
- params_with_grad = []
- d_p_list = []
- momentum_buffer_list = []
- has_sparse_grad = self._init_group(group, params_with_grad, d_p_list, momentum_buffer_list)
- sgd(params_with_grad,
- d_p_list,
- momentum_buffer_list,
- weight_decay=group['weight_decay'],
- momentum=group['momentum'],
- lr=group['lr'],
- dampening=group['dampening'],
- nesterov=group['nesterov'],
- maximize=group['maximize'],
- has_sparse_grad=has_sparse_grad,
- foreach=group['foreach'])
- # update momentum_buffers in state
- for p, momentum_buffer in zip(params_with_grad, momentum_buffer_list):
- state = self.state[p]
- state['momentum_buffer'] = momentum_buffer
- return loss
- SGD.__doc__ = r"""\
- Implements stochastic gradient descent (optionally with momentum).
- .. math::
- \begin{aligned}
- &\rule{110mm}{0.4pt} \\
- &\textbf{input} : \gamma \text{ (lr)}, \: \theta_0 \text{ (params)}, \: f(\theta)
- \text{ (objective)}, \: \lambda \text{ (weight decay)}, \\
- &\hspace{13mm} \:\mu \text{ (momentum)}, \:\tau \text{ (dampening)},
- \:\textit{ nesterov,}\:\textit{ maximize} \\[-1.ex]
- &\rule{110mm}{0.4pt} \\
- &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\
- &\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\
- &\hspace{5mm}\textbf{if} \: \lambda \neq 0 \\
- &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\
- &\hspace{5mm}\textbf{if} \: \mu \neq 0 \\
- &\hspace{10mm}\textbf{if} \: t > 1 \\
- &\hspace{15mm} \textbf{b}_t \leftarrow \mu \textbf{b}_{t-1} + (1-\tau) g_t \\
- &\hspace{10mm}\textbf{else} \\
- &\hspace{15mm} \textbf{b}_t \leftarrow g_t \\
- &\hspace{10mm}\textbf{if} \: \textit{nesterov} \\
- &\hspace{15mm} g_t \leftarrow g_{t} + \mu \textbf{b}_t \\
- &\hspace{10mm}\textbf{else} \\[-1.ex]
- &\hspace{15mm} g_t \leftarrow \textbf{b}_t \\
- &\hspace{5mm}\textbf{if} \: \textit{maximize} \\
- &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} + \gamma g_t \\[-1.ex]
- &\hspace{5mm}\textbf{else} \\[-1.ex]
- &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - \gamma g_t \\[-1.ex]
- &\rule{110mm}{0.4pt} \\[-1.ex]
- &\bf{return} \: \theta_t \\[-1.ex]
- &\rule{110mm}{0.4pt} \\[-1.ex]
- \end{aligned}
- Nesterov momentum is based on the formula from
- `On the importance of initialization and momentum in deep learning`__.
- """ + r"""
- Args:
- params (iterable): iterable of parameters to optimize or dicts defining
- parameter groups
- lr (float): learning rate
- momentum (float, optional): momentum factor (default: 0)
- weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
- dampening (float, optional): dampening for momentum (default: 0)
- nesterov (bool, optional): enables Nesterov momentum (default: False)
- {maximize}
- {foreach}
- {differentiable}
- """.format(maximize=_maximize_doc, foreach=_foreach_doc, differentiable=_differentiable_doc) + r"""
- Example:
- >>> # xdoctest: +SKIP
- >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
- >>> optimizer.zero_grad()
- >>> loss_fn(model(input), target).backward()
- >>> optimizer.step()
- __ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf
- .. note::
- The implementation of SGD with Momentum/Nesterov subtly differs from
- Sutskever et. al. and implementations in some other frameworks.
- Considering the specific case of Momentum, the update can be written as
- .. math::
- \begin{aligned}
- v_{t+1} & = \mu * v_{t} + g_{t+1}, \\
- p_{t+1} & = p_{t} - \text{lr} * v_{t+1},
- \end{aligned}
- where :math:`p`, :math:`g`, :math:`v` and :math:`\mu` denote the
- parameters, gradient, velocity, and momentum respectively.
- This is in contrast to Sutskever et. al. and
- other frameworks which employ an update of the form
- .. math::
- \begin{aligned}
- v_{t+1} & = \mu * v_{t} + \text{lr} * g_{t+1}, \\
- p_{t+1} & = p_{t} - v_{t+1}.
- \end{aligned}
- The Nesterov version is analogously modified.
- Moreover, the initial value of the momentum buffer is set to the
- gradient value at the first step. This is in contrast to some other
- frameworks that initialize it to all zeros.
- """
- def sgd(params: List[Tensor],
- d_p_list: List[Tensor],
- momentum_buffer_list: List[Optional[Tensor]],
- # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
- # setting this as kwarg for now as functional API is compiled by torch/distributed/optim
- has_sparse_grad: bool = None,
- foreach: Optional[bool] = None,
- *,
- weight_decay: float,
- momentum: float,
- lr: float,
- dampening: float,
- nesterov: bool,
- maximize: bool):
- r"""Functional API that performs SGD algorithm computation.
- See :class:`~torch.optim.SGD` for details.
- """
- if foreach is None:
- # why must we be explicit about an if statement for torch.jit.is_scripting here?
- # because JIT can't handle Optionals nor fancy conditionals when scripting
- if not torch.jit.is_scripting():
- _, foreach = _default_to_fused_or_foreach(params, differentiable=False, use_fused=False)
- else:
- foreach = False
- if foreach and torch.jit.is_scripting():
- raise RuntimeError('torch.jit.script not supported with foreach optimizers')
- if foreach and not torch.jit.is_scripting():
- func = _multi_tensor_sgd
- else:
- func = _single_tensor_sgd
- func(params,
- d_p_list,
- momentum_buffer_list,
- weight_decay=weight_decay,
- momentum=momentum,
- lr=lr,
- dampening=dampening,
- nesterov=nesterov,
- has_sparse_grad=has_sparse_grad,
- maximize=maximize)
- def _single_tensor_sgd(params: List[Tensor],
- d_p_list: List[Tensor],
- momentum_buffer_list: List[Optional[Tensor]],
- *,
- weight_decay: float,
- momentum: float,
- lr: float,
- dampening: float,
- nesterov: bool,
- maximize: bool,
- has_sparse_grad: bool):
- for i, param in enumerate(params):
- d_p = d_p_list[i] if not maximize else -d_p_list[i]
- if weight_decay != 0:
- d_p = d_p.add(param, alpha=weight_decay)
- if momentum != 0:
- buf = momentum_buffer_list[i]
- if buf is None:
- buf = torch.clone(d_p).detach()
- momentum_buffer_list[i] = buf
- else:
- buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
- if nesterov:
- d_p = d_p.add(buf, alpha=momentum)
- else:
- d_p = buf
- param.add_(d_p, alpha=-lr)
- def _multi_tensor_sgd(params: List[Tensor],
- grads: List[Tensor],
- momentum_buffer_list: List[Optional[Tensor]],
- *,
- weight_decay: float,
- momentum: float,
- lr: float,
- dampening: float,
- nesterov: bool,
- maximize: bool,
- has_sparse_grad: bool):
- if len(params) == 0:
- return
- grouped_tensors = _group_tensors_by_device_and_dtype([params, grads, momentum_buffer_list], with_indices=True)
- for device_params, device_grads, device_momentum_buffer_list, indices in grouped_tensors.values():
- device_has_sparse_grad = any(grad.is_sparse for grad in device_grads)
- if maximize:
- device_grads = torch._foreach_neg(tuple(device_grads)) # type: ignore[assignment]
- if weight_decay != 0:
- device_grads = torch._foreach_add(device_grads, device_params, alpha=weight_decay)
- if momentum != 0:
- bufs = []
- all_states_with_momentum_buffer = True
- for i in range(len(device_momentum_buffer_list)):
- if device_momentum_buffer_list[i] is None:
- all_states_with_momentum_buffer = False
- break
- else:
- bufs.append(device_momentum_buffer_list[i])
- if all_states_with_momentum_buffer:
- torch._foreach_mul_(bufs, momentum)
- torch._foreach_add_(bufs, device_grads, alpha=1 - dampening)
- else:
- bufs = []
- for i in range(len(device_momentum_buffer_list)):
- if device_momentum_buffer_list[i] is None:
- buf = device_momentum_buffer_list[i] = momentum_buffer_list[indices[i]] = \
- torch.clone(device_grads[i]).detach()
- else:
- buf = device_momentum_buffer_list[i]
- buf.mul_(momentum).add_(device_grads[i], alpha=1 - dampening)
- bufs.append(buf)
- if nesterov:
- torch._foreach_add_(device_grads, bufs, alpha=momentum)
- else:
- device_grads = bufs
- if not device_has_sparse_grad:
- torch._foreach_add_(device_params, device_grads, alpha=-lr)
- else:
- # foreach APIs don't support sparse
- for i in range(len(device_params)):
- device_params[i].add_(device_grads[i], alpha=-lr)
|