swa_utils.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298
  1. import itertools
  2. import math
  3. from copy import deepcopy
  4. import warnings
  5. import torch
  6. from torch.nn import Module
  7. from torch.optim.lr_scheduler import LRScheduler
  8. __all__ = ['AveragedModel', 'update_bn', 'SWALR']
  9. class AveragedModel(Module):
  10. r"""Implements averaged model for Stochastic Weight Averaging (SWA).
  11. Stochastic Weight Averaging was proposed in `Averaging Weights Leads to
  12. Wider Optima and Better Generalization`_ by Pavel Izmailov, Dmitrii
  13. Podoprikhin, Timur Garipov, Dmitry Vetrov and Andrew Gordon Wilson
  14. (UAI 2018).
  15. AveragedModel class creates a copy of the provided module :attr:`model`
  16. on the device :attr:`device` and allows to compute running averages of the
  17. parameters of the :attr:`model`.
  18. Args:
  19. model (torch.nn.Module): model to use with SWA
  20. device (torch.device, optional): if provided, the averaged model will be
  21. stored on the :attr:`device`
  22. avg_fn (function, optional): the averaging function used to update
  23. parameters; the function must take in the current value of the
  24. :class:`AveragedModel` parameter, the current value of :attr:`model`
  25. parameter and the number of models already averaged; if None,
  26. equally weighted average is used (default: None)
  27. use_buffers (bool): if ``True``, it will compute running averages for
  28. both the parameters and the buffers of the model. (default: ``False``)
  29. Example:
  30. >>> # xdoctest: +SKIP("undefined variables")
  31. >>> loader, optimizer, model, loss_fn = ...
  32. >>> swa_model = torch.optim.swa_utils.AveragedModel(model)
  33. >>> scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,
  34. >>> T_max=300)
  35. >>> swa_start = 160
  36. >>> swa_scheduler = SWALR(optimizer, swa_lr=0.05)
  37. >>> for i in range(300):
  38. >>> for input, target in loader:
  39. >>> optimizer.zero_grad()
  40. >>> loss_fn(model(input), target).backward()
  41. >>> optimizer.step()
  42. >>> if i > swa_start:
  43. >>> swa_model.update_parameters(model)
  44. >>> swa_scheduler.step()
  45. >>> else:
  46. >>> scheduler.step()
  47. >>>
  48. >>> # Update bn statistics for the swa_model at the end
  49. >>> torch.optim.swa_utils.update_bn(loader, swa_model)
  50. You can also use custom averaging functions with `avg_fn` parameter.
  51. If no averaging function is provided, the default is to compute
  52. equally-weighted average of the weights.
  53. Example:
  54. >>> # xdoctest: +SKIP("undefined variables")
  55. >>> # Compute exponential moving averages of the weights and buffers
  56. >>> ema_avg = lambda averaged_model_parameter, model_parameter, num_averaged: (
  57. ... 0.1 * averaged_model_parameter + 0.9 * model_parameter)
  58. >>> swa_model = torch.optim.swa_utils.AveragedModel(model, avg_fn=ema_avg, use_buffers=True)
  59. .. note::
  60. When using SWA with models containing Batch Normalization you may
  61. need to update the activation statistics for Batch Normalization.
  62. This can be done either by using the :meth:`torch.optim.swa_utils.update_bn`
  63. or by setting :attr:`use_buffers` to `True`. The first approach updates the
  64. statistics in a post-training step by passing data through the model. The
  65. second does it during the parameter update phase by averaging all buffers.
  66. Empirical evidence has shown that updating the statistics in normalization
  67. layers increases accuracy, but you may wish to empirically test which
  68. approach yields the best results in your problem.
  69. .. note::
  70. :attr:`avg_fn` is not saved in the :meth:`state_dict` of the model.
  71. .. note::
  72. When :meth:`update_parameters` is called for the first time (i.e.
  73. :attr:`n_averaged` is `0`) the parameters of `model` are copied
  74. to the parameters of :class:`AveragedModel`. For every subsequent
  75. call of :meth:`update_parameters` the function `avg_fn` is used
  76. to update the parameters.
  77. .. _Averaging Weights Leads to Wider Optima and Better Generalization:
  78. https://arxiv.org/abs/1803.05407
  79. .. _There Are Many Consistent Explanations of Unlabeled Data: Why You Should
  80. Average:
  81. https://arxiv.org/abs/1806.05594
  82. .. _SWALP: Stochastic Weight Averaging in Low-Precision Training:
  83. https://arxiv.org/abs/1904.11943
  84. .. _Stochastic Weight Averaging in Parallel: Large-Batch Training That
  85. Generalizes Well:
  86. https://arxiv.org/abs/2001.02312
  87. """
  88. def __init__(self, model, device=None, avg_fn=None, use_buffers=False):
  89. super().__init__()
  90. self.module = deepcopy(model)
  91. if device is not None:
  92. self.module = self.module.to(device)
  93. self.register_buffer('n_averaged',
  94. torch.tensor(0, dtype=torch.long, device=device))
  95. if avg_fn is None:
  96. def avg_fn(averaged_model_parameter, model_parameter, num_averaged):
  97. return averaged_model_parameter + \
  98. (model_parameter - averaged_model_parameter) / (num_averaged + 1)
  99. self.avg_fn = avg_fn
  100. self.use_buffers = use_buffers
  101. def forward(self, *args, **kwargs):
  102. return self.module(*args, **kwargs)
  103. def update_parameters(self, model):
  104. self_param = (
  105. itertools.chain(self.module.parameters(), self.module.buffers())
  106. if self.use_buffers else self.parameters()
  107. )
  108. model_param = (
  109. itertools.chain(model.parameters(), model.buffers())
  110. if self.use_buffers else model.parameters()
  111. )
  112. for p_swa, p_model in zip(self_param, model_param):
  113. device = p_swa.device
  114. p_model_ = p_model.detach().to(device)
  115. if self.n_averaged == 0:
  116. p_swa.detach().copy_(p_model_)
  117. else:
  118. p_swa.detach().copy_(self.avg_fn(p_swa.detach(), p_model_,
  119. self.n_averaged.to(device)))
  120. if not self.use_buffers:
  121. # If not apply running averages to the buffers,
  122. # keep the buffers in sync with the source model.
  123. for b_swa, b_model in zip(self.module.buffers(), model.buffers()):
  124. b_swa.detach().copy_(b_model.detach().to(device))
  125. self.n_averaged += 1
  126. @torch.no_grad()
  127. def update_bn(loader, model, device=None):
  128. r"""Updates BatchNorm running_mean, running_var buffers in the model.
  129. It performs one pass over data in `loader` to estimate the activation
  130. statistics for BatchNorm layers in the model.
  131. Args:
  132. loader (torch.utils.data.DataLoader): dataset loader to compute the
  133. activation statistics on. Each data batch should be either a
  134. tensor, or a list/tuple whose first element is a tensor
  135. containing data.
  136. model (torch.nn.Module): model for which we seek to update BatchNorm
  137. statistics.
  138. device (torch.device, optional): If set, data will be transferred to
  139. :attr:`device` before being passed into :attr:`model`.
  140. Example:
  141. >>> # xdoctest: +SKIP("Undefined variables")
  142. >>> loader, model = ...
  143. >>> torch.optim.swa_utils.update_bn(loader, model)
  144. .. note::
  145. The `update_bn` utility assumes that each data batch in :attr:`loader`
  146. is either a tensor or a list or tuple of tensors; in the latter case it
  147. is assumed that :meth:`model.forward()` should be called on the first
  148. element of the list or tuple corresponding to the data batch.
  149. """
  150. momenta = {}
  151. for module in model.modules():
  152. if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
  153. module.running_mean = torch.zeros_like(module.running_mean)
  154. module.running_var = torch.ones_like(module.running_var)
  155. momenta[module] = module.momentum
  156. if not momenta:
  157. return
  158. was_training = model.training
  159. model.train()
  160. for module in momenta.keys():
  161. module.momentum = None
  162. module.num_batches_tracked *= 0
  163. for input in loader:
  164. if isinstance(input, (list, tuple)):
  165. input = input[0]
  166. if device is not None:
  167. input = input.to(device)
  168. model(input)
  169. for bn_module in momenta.keys():
  170. bn_module.momentum = momenta[bn_module]
  171. model.train(was_training)
  172. class SWALR(LRScheduler):
  173. r"""Anneals the learning rate in each parameter group to a fixed value.
  174. This learning rate scheduler is meant to be used with Stochastic Weight
  175. Averaging (SWA) method (see `torch.optim.swa_utils.AveragedModel`).
  176. Args:
  177. optimizer (torch.optim.Optimizer): wrapped optimizer
  178. swa_lrs (float or list): the learning rate value for all param groups
  179. together or separately for each group.
  180. annealing_epochs (int): number of epochs in the annealing phase
  181. (default: 10)
  182. annealing_strategy (str): "cos" or "linear"; specifies the annealing
  183. strategy: "cos" for cosine annealing, "linear" for linear annealing
  184. (default: "cos")
  185. last_epoch (int): the index of the last epoch (default: -1)
  186. The :class:`SWALR` scheduler can be used together with other
  187. schedulers to switch to a constant learning rate late in the training
  188. as in the example below.
  189. Example:
  190. >>> # xdoctest: +SKIP("Undefined variables")
  191. >>> loader, optimizer, model = ...
  192. >>> lr_lambda = lambda epoch: 0.9
  193. >>> scheduler = torch.optim.lr_scheduler.MultiplicativeLR(optimizer,
  194. >>> lr_lambda=lr_lambda)
  195. >>> swa_scheduler = torch.optim.swa_utils.SWALR(optimizer,
  196. >>> anneal_strategy="linear", anneal_epochs=20, swa_lr=0.05)
  197. >>> swa_start = 160
  198. >>> for i in range(300):
  199. >>> for input, target in loader:
  200. >>> optimizer.zero_grad()
  201. >>> loss_fn(model(input), target).backward()
  202. >>> optimizer.step()
  203. >>> if i > swa_start:
  204. >>> swa_scheduler.step()
  205. >>> else:
  206. >>> scheduler.step()
  207. .. _Averaging Weights Leads to Wider Optima and Better Generalization:
  208. https://arxiv.org/abs/1803.05407
  209. """
  210. def __init__(self, optimizer, swa_lr, anneal_epochs=10, anneal_strategy='cos', last_epoch=-1):
  211. swa_lrs = self._format_param(optimizer, swa_lr)
  212. for swa_lr, group in zip(swa_lrs, optimizer.param_groups):
  213. group['swa_lr'] = swa_lr
  214. if anneal_strategy not in ['cos', 'linear']:
  215. raise ValueError("anneal_strategy must by one of 'cos' or 'linear', "
  216. f"instead got {anneal_strategy}")
  217. elif anneal_strategy == 'cos':
  218. self.anneal_func = self._cosine_anneal
  219. elif anneal_strategy == 'linear':
  220. self.anneal_func = self._linear_anneal
  221. if not isinstance(anneal_epochs, int) or anneal_epochs < 0:
  222. raise ValueError(f"anneal_epochs must be equal or greater than 0, got {anneal_epochs}")
  223. self.anneal_epochs = anneal_epochs
  224. super().__init__(optimizer, last_epoch)
  225. @staticmethod
  226. def _format_param(optimizer, swa_lrs):
  227. if isinstance(swa_lrs, (list, tuple)):
  228. if len(swa_lrs) != len(optimizer.param_groups):
  229. raise ValueError("swa_lr must have the same length as "
  230. f"optimizer.param_groups: swa_lr has {len(swa_lrs)}, "
  231. f"optimizer.param_groups has {len(optimizer.param_groups)}")
  232. return swa_lrs
  233. else:
  234. return [swa_lrs] * len(optimizer.param_groups)
  235. @staticmethod
  236. def _linear_anneal(t):
  237. return t
  238. @staticmethod
  239. def _cosine_anneal(t):
  240. return (1 - math.cos(math.pi * t)) / 2
  241. @staticmethod
  242. def _get_initial_lr(lr, swa_lr, alpha):
  243. if alpha == 1:
  244. return swa_lr
  245. return (lr - alpha * swa_lr) / (1 - alpha)
  246. def get_lr(self):
  247. if not self._get_lr_called_within_step:
  248. warnings.warn("To get the last learning rate computed by the scheduler, "
  249. "please use `get_last_lr()`.", UserWarning)
  250. step = self._step_count - 1
  251. if self.anneal_epochs == 0:
  252. step = max(1, step)
  253. prev_t = max(0, min(1, (step - 1) / max(1, self.anneal_epochs)))
  254. prev_alpha = self.anneal_func(prev_t)
  255. prev_lrs = [self._get_initial_lr(group['lr'], group['swa_lr'], prev_alpha)
  256. for group in self.optimizer.param_groups]
  257. t = max(0, min(1, step / max(1, self.anneal_epochs)))
  258. alpha = self.anneal_func(t)
  259. return [group['swa_lr'] * alpha + lr * (1 - alpha)
  260. for group, lr in zip(self.optimizer.param_groups, prev_lrs)]