grad_scaler.py 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576
  1. from collections import defaultdict, abc
  2. from enum import Enum
  3. from typing import Any, Dict, List, Optional, Tuple, cast
  4. import inspect
  5. import warnings
  6. import torch
  7. from .common import amp_definitely_not_available
  8. __all__ = ["OptState", "GradScaler"]
  9. class _MultiDeviceReplicator:
  10. """
  11. Lazily serves copies of a tensor to requested devices. Copies are cached per-device.
  12. """
  13. def __init__(self, master_tensor: torch.Tensor) -> None:
  14. assert master_tensor.is_cuda or master_tensor.device.type == 'xla'
  15. self.master = master_tensor
  16. self._per_device_tensors: Dict[torch.device, torch.Tensor] = {}
  17. def get(self, device) -> torch.Tensor:
  18. retval = self._per_device_tensors.get(device, None)
  19. if retval is None:
  20. retval = self.master.to(device=device, non_blocking=True, copy=True)
  21. self._per_device_tensors[device] = retval
  22. return retval
  23. # Defines default_factory for GradScaler's _per_optimizer_states defaultdict,
  24. # as well as associated "enum" values. Prefers defining these at top level because
  25. # - Lambdas can't be pickled, so we don't want to supply a lambda as the factory.
  26. # - Defining READY, UNSCALED, STEPPED and _refresh_per_optimizer_state within GradScaler
  27. # causes a circular reference, which we'd rather avoid.
  28. class OptState(Enum):
  29. READY = 0
  30. UNSCALED = 1
  31. STEPPED = 2
  32. def _refresh_per_optimizer_state():
  33. return {"stage": OptState.READY, "found_inf_per_device": {}}
  34. class GradScaler:
  35. _scale: Optional[torch.Tensor]
  36. _grows_tracker: Optional[torch.Tensor]
  37. _per_optimizer_states: Dict[int, Dict[str, Any]]
  38. """
  39. An instance ``scaler`` of :class:`GradScaler` helps perform the steps of gradient scaling
  40. conveniently.
  41. * ``scaler.scale(loss)`` multiplies a given loss by ``scaler``'s current scale factor.
  42. * ``scaler.step(optimizer)`` safely unscales gradients and calls ``optimizer.step()``.
  43. * ``scaler.update()`` updates ``scaler``'s scale factor.
  44. Example::
  45. # Creates a GradScaler once at the beginning of training.
  46. scaler = GradScaler()
  47. for epoch in epochs:
  48. for input, target in data:
  49. optimizer.zero_grad()
  50. output = model(input)
  51. loss = loss_fn(output, target)
  52. # Scales loss. Calls backward() on scaled loss to create scaled gradients.
  53. scaler.scale(loss).backward()
  54. # scaler.step() first unscales gradients of the optimizer's params.
  55. # If gradients don't contain infs/NaNs, optimizer.step() is then called,
  56. # otherwise, optimizer.step() is skipped.
  57. scaler.step(optimizer)
  58. # Updates the scale for next iteration.
  59. scaler.update()
  60. See the :ref:`Automatic Mixed Precision examples<amp-examples>` for usage
  61. (along with autocasting) in more complex cases like gradient clipping, gradient accumulation, gradient penalty,
  62. and multiple losses/optimizers.
  63. ``scaler`` dynamically estimates the scale factor each iteration. To minimize gradient underflow,
  64. a large scale factor should be used. However, ``float16`` values can "overflow" (become inf or NaN) if
  65. the scale factor is too large. Therefore, the optimal scale factor is the largest factor that can be used
  66. without incurring inf or NaN gradient values.
  67. ``scaler`` approximates the optimal scale factor over time by checking the gradients for infs and NaNs during every
  68. ``scaler.step(optimizer)`` (or optional separate ``scaler.unscale_(optimizer)``, see :meth:`unscale_`).
  69. * If infs/NaNs are found, ``scaler.step(optimizer)`` skips the underlying ``optimizer.step()`` (so the params
  70. themselves remain uncorrupted) and ``update()`` multiplies the scale by ``backoff_factor``.
  71. * If no infs/NaNs are found, ``scaler.step(optimizer)`` runs the underlying ``optimizer.step()`` as usual.
  72. If ``growth_interval`` unskipped iterations occur consecutively, ``update()`` multiplies the scale by
  73. ``growth_factor``.
  74. The scale factor often causes infs/NaNs to appear in gradients for the first few iterations as its
  75. value calibrates. ``scaler.step`` will skip the underlying ``optimizer.step()`` for these
  76. iterations. After that, step skipping should occur rarely (once every few hundred or thousand iterations).
  77. Args:
  78. init_scale (float, optional, default=2.**16): Initial scale factor.
  79. growth_factor (float, optional, default=2.0): Factor by which the scale is multiplied during
  80. :meth:`update` if no inf/NaN gradients occur for ``growth_interval`` consecutive iterations.
  81. backoff_factor (float, optional, default=0.5): Factor by which the scale is multiplied during
  82. :meth:`update` if inf/NaN gradients occur in an iteration.
  83. growth_interval (int, optional, default=2000): Number of consecutive iterations without inf/NaN gradients
  84. that must occur for the scale to be multiplied by ``growth_factor``.
  85. enabled (bool, optional): If ``False``, disables gradient scaling. :meth:`step` simply
  86. invokes the underlying ``optimizer.step()``, and other methods become no-ops.
  87. Default: ``True``
  88. """
  89. def __init__(self,
  90. init_scale=2.**16,
  91. growth_factor=2.0,
  92. backoff_factor=0.5,
  93. growth_interval=2000,
  94. enabled=True):
  95. if enabled and amp_definitely_not_available():
  96. warnings.warn("torch.cuda.amp.GradScaler is enabled, but CUDA is not available. Disabling.")
  97. self._enabled = False
  98. else:
  99. self._enabled = enabled
  100. if self._enabled:
  101. assert growth_factor > 1.0, "The growth factor must be > 1.0."
  102. assert backoff_factor < 1.0, "The backoff factor must be < 1.0."
  103. self._init_scale = init_scale
  104. # self._scale will be lazily initialized during the first call to scale()
  105. self._scale = None
  106. self._growth_factor = growth_factor
  107. self._backoff_factor = backoff_factor
  108. self._growth_interval = growth_interval
  109. self._init_growth_tracker = 0
  110. # self._growth_tracker will be lazily initialized during the first call to scale()
  111. self._growth_tracker = None
  112. self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state)
  113. def _check_scale_growth_tracker(self, funcname) -> Tuple[torch.Tensor, torch.Tensor]:
  114. fix = "This may indicate your script did not use scaler.scale(loss or outputs) earlier in the iteration."
  115. assert self._scale is not None, "Attempted {} but _scale is None. ".format(funcname) + fix
  116. assert self._growth_tracker is not None, "Attempted {} but _growth_tracker is None. ".format(funcname) + fix
  117. return (self._scale, self._growth_tracker)
  118. def _lazy_init_scale_growth_tracker(self, dev):
  119. assert self._growth_tracker is None, "_growth_tracker initialized before _scale"
  120. self._scale = torch.full((1,), self._init_scale, dtype=torch.float32, device=dev)
  121. self._growth_tracker = torch.full((1,), self._init_growth_tracker, dtype=torch.int32, device=dev)
  122. def scale(self, outputs):
  123. """
  124. Multiplies ('scales') a tensor or list of tensors by the scale factor.
  125. Returns scaled outputs. If this instance of :class:`GradScaler` is not enabled, outputs are returned
  126. unmodified.
  127. Args:
  128. outputs (Tensor or iterable of Tensors): Outputs to scale.
  129. """
  130. if not self._enabled:
  131. return outputs
  132. # Short-circuit for the common case.
  133. if isinstance(outputs, torch.Tensor):
  134. assert outputs.is_cuda or outputs.device.type == 'xla'
  135. if self._scale is None:
  136. self._lazy_init_scale_growth_tracker(outputs.device)
  137. assert self._scale is not None
  138. return outputs * self._scale.to(device=outputs.device, non_blocking=True)
  139. # Invoke the more complex machinery only if we're treating multiple outputs.
  140. stash: List[_MultiDeviceReplicator] = [] # holds a reference that can be overwritten by apply_scale
  141. def apply_scale(val):
  142. if isinstance(val, torch.Tensor):
  143. assert val.is_cuda or val.device.type == 'xla'
  144. if len(stash) == 0:
  145. if self._scale is None:
  146. self._lazy_init_scale_growth_tracker(val.device)
  147. assert self._scale is not None
  148. stash.append(_MultiDeviceReplicator(self._scale))
  149. return val * stash[0].get(val.device)
  150. elif isinstance(val, abc.Iterable):
  151. iterable = map(apply_scale, val)
  152. if isinstance(val, (list, tuple)):
  153. return type(val)(iterable)
  154. else:
  155. return iterable
  156. else:
  157. raise ValueError("outputs must be a Tensor or an iterable of Tensors")
  158. return apply_scale(outputs)
  159. def _unscale_grads_(self, optimizer, inv_scale, found_inf, allow_fp16):
  160. per_device_inv_scale = _MultiDeviceReplicator(inv_scale)
  161. per_device_found_inf = _MultiDeviceReplicator(found_inf)
  162. # To set up _amp_foreach_non_finite_check_and_unscale_, split grads by device and dtype.
  163. # There could be hundreds of grads, so we'd like to iterate through them just once.
  164. # However, we don't know their devices or dtypes in advance.
  165. # https://stackoverflow.com/questions/5029934/defaultdict-of-defaultdict
  166. # Google says mypy struggles with defaultdicts type annotations.
  167. per_device_and_dtype_grads = defaultdict(lambda: defaultdict(list)) # type: ignore[var-annotated]
  168. with torch.no_grad():
  169. for group in optimizer.param_groups:
  170. for param in group["params"]:
  171. if param.grad is None:
  172. continue
  173. if (not allow_fp16) and param.grad.dtype == torch.float16:
  174. raise ValueError("Attempting to unscale FP16 gradients.")
  175. if param.grad.is_sparse:
  176. # is_coalesced() == False means the sparse grad has values with duplicate indices.
  177. # coalesce() deduplicates indices and adds all values that have the same index.
  178. # For scaled fp16 values, there's a good chance coalescing will cause overflow,
  179. # so we should check the coalesced _values().
  180. if param.grad.dtype is torch.float16:
  181. param.grad = param.grad.coalesce()
  182. to_unscale = param.grad._values()
  183. else:
  184. to_unscale = param.grad
  185. # TODO: is there a way to split by device and dtype without appending in the inner loop?
  186. per_device_and_dtype_grads[to_unscale.device][to_unscale.dtype].append(to_unscale)
  187. for device, per_dtype_grads in per_device_and_dtype_grads.items():
  188. for grads in per_dtype_grads.values():
  189. torch._amp_foreach_non_finite_check_and_unscale_(grads,
  190. per_device_found_inf.get(device),
  191. per_device_inv_scale.get(device))
  192. return per_device_found_inf._per_device_tensors
  193. def unscale_(self, optimizer):
  194. """
  195. Divides ("unscales") the optimizer's gradient tensors by the scale factor.
  196. :meth:`unscale_` is optional, serving cases where you need to
  197. :ref:`modify or inspect gradients<working-with-unscaled-gradients>`
  198. between the backward pass(es) and :meth:`step`.
  199. If :meth:`unscale_` is not called explicitly, gradients will be unscaled automatically during :meth:`step`.
  200. Simple example, using :meth:`unscale_` to enable clipping of unscaled gradients::
  201. ...
  202. scaler.scale(loss).backward()
  203. scaler.unscale_(optimizer)
  204. torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
  205. scaler.step(optimizer)
  206. scaler.update()
  207. Args:
  208. optimizer (torch.optim.Optimizer): Optimizer that owns the gradients to be unscaled.
  209. .. note::
  210. :meth:`unscale_` does not incur a CPU-GPU sync.
  211. .. warning::
  212. :meth:`unscale_` should only be called once per optimizer per :meth:`step` call,
  213. and only after all gradients for that optimizer's assigned parameters have been accumulated.
  214. Calling :meth:`unscale_` twice for a given optimizer between each :meth:`step` triggers a RuntimeError.
  215. .. warning::
  216. :meth:`unscale_` may unscale sparse gradients out of place, replacing the ``.grad`` attribute.
  217. """
  218. if not self._enabled:
  219. return
  220. self._check_scale_growth_tracker("unscale_")
  221. optimizer_state = self._per_optimizer_states[id(optimizer)]
  222. if optimizer_state["stage"] is OptState.UNSCALED:
  223. raise RuntimeError("unscale_() has already been called on this optimizer since the last update().")
  224. elif optimizer_state["stage"] is OptState.STEPPED:
  225. raise RuntimeError("unscale_() is being called after step().")
  226. # FP32 division can be imprecise for certain compile options, so we carry out the reciprocal in FP64.
  227. assert self._scale is not None
  228. inv_scale = self._scale.double().reciprocal().float()
  229. found_inf = torch.full((1,), 0.0, dtype=torch.float32, device=self._scale.device)
  230. optimizer_state["found_inf_per_device"] = self._unscale_grads_(optimizer, inv_scale, found_inf, False)
  231. optimizer_state["stage"] = OptState.UNSCALED
  232. def _maybe_opt_step(self, optimizer, optimizer_state, *args, **kwargs):
  233. retval = None
  234. if not sum(v.item() for v in optimizer_state["found_inf_per_device"].values()):
  235. retval = optimizer.step(*args, **kwargs)
  236. return retval
  237. def step(self, optimizer, *args, **kwargs):
  238. """
  239. :meth:`step` carries out the following two operations:
  240. 1. Internally invokes ``unscale_(optimizer)`` (unless :meth:`unscale_` was explicitly called for ``optimizer``
  241. earlier in the iteration). As part of the :meth:`unscale_`, gradients are checked for infs/NaNs.
  242. 2. If no inf/NaN gradients are found, invokes ``optimizer.step()`` using the unscaled
  243. gradients. Otherwise, ``optimizer.step()`` is skipped to avoid corrupting the params.
  244. ``*args`` and ``**kwargs`` are forwarded to ``optimizer.step()``.
  245. Returns the return value of ``optimizer.step(*args, **kwargs)``.
  246. Args:
  247. optimizer (torch.optim.Optimizer): Optimizer that applies the gradients.
  248. args: Any arguments.
  249. kwargs: Any keyword arguments.
  250. .. warning::
  251. Closure use is not currently supported.
  252. """
  253. if (not self._enabled):
  254. return optimizer.step(*args, **kwargs)
  255. if "closure" in kwargs:
  256. raise RuntimeError("Closure use is not currently supported if GradScaler is enabled.")
  257. self._check_scale_growth_tracker("step")
  258. optimizer_state = self._per_optimizer_states[id(optimizer)]
  259. if optimizer_state["stage"] is OptState.STEPPED:
  260. raise RuntimeError("step() has already been called since the last update().")
  261. retval = None
  262. if (hasattr(optimizer, "_step_supports_amp_scaling") and optimizer._step_supports_amp_scaling):
  263. # This optimizer has customized scale-handling logic, so we can call optimizer.step() directly.
  264. # The contract with custom optimizers is that their step() should accept an additional,
  265. # optional grad_scaler kwarg. We append self to the kwargs so the custom optimizer has full information:
  266. # it can query its own state, invoke unscale_ on itself, etc
  267. # The contract above is being deprecated to avoid introducing `grad_scaler: GradScaler` argument
  268. # to `Optimizer.step`. The new behavior is going to add two Tensor attributes of `grad_scale`
  269. # and `found_inf` to the passed optimizer so that the optimizer can utilize those
  270. # to skip the parameter updates or unscale gradients before updating parameters in
  271. # the fused kernel, e.g. `FusedAdamMathFunctor`.
  272. kwargs_ = kwargs
  273. has_grad_scaler_kwarg = "grad_scaler" in inspect.signature(optimizer.step).parameters
  274. if has_grad_scaler_kwarg:
  275. warnings.warn(
  276. "GradScaler is going to stop passing itself as a keyword argument to the passed "
  277. "optimizer. In the near future GradScaler registers `grad_scale: Tensor` and "
  278. "`found_inf: Tensor` to the passed optimizer and let the optimizer use them directly.",
  279. FutureWarning)
  280. kwargs_.update({"grad_scaler": self})
  281. else:
  282. scaler = self._get_scale_async()
  283. found_inf = cast(
  284. torch.Tensor,
  285. sum([
  286. t.to(scaler.device, non_blocking=True) for t in self._check_inf_per_device(optimizer).values()
  287. ])
  288. )
  289. optimizer.grad_scale = None if optimizer_state["stage"] == OptState.UNSCALED else scaler
  290. optimizer.found_inf = found_inf
  291. retval = optimizer.step(*args, **kwargs_)
  292. optimizer_state["stage"] = OptState.STEPPED
  293. if not has_grad_scaler_kwarg:
  294. del optimizer.grad_scale
  295. del optimizer.found_inf
  296. return retval
  297. if optimizer_state["stage"] is OptState.READY:
  298. self.unscale_(optimizer)
  299. assert len(optimizer_state["found_inf_per_device"]) > 0, "No inf checks were recorded for this optimizer."
  300. retval = self._maybe_opt_step(optimizer, optimizer_state, *args, **kwargs)
  301. optimizer_state["stage"] = OptState.STEPPED
  302. return retval
  303. def update(self, new_scale=None):
  304. """
  305. Updates the scale factor.
  306. If any optimizer steps were skipped the scale is multiplied by ``backoff_factor``
  307. to reduce it. If ``growth_interval`` unskipped iterations occurred consecutively,
  308. the scale is multiplied by ``growth_factor`` to increase it.
  309. Passing ``new_scale`` sets the new scale value manually. (``new_scale`` is not
  310. used directly, it's used to fill GradScaler's internal scale tensor. So if
  311. ``new_scale`` was a tensor, later in-place changes to that tensor will not further
  312. affect the scale GradScaler uses internally.)
  313. Args:
  314. new_scale (float or :class:`torch.cuda.FloatTensor`, optional, default=None): New scale factor.
  315. .. warning::
  316. :meth:`update` should only be called at the end of the iteration, after ``scaler.step(optimizer)`` has
  317. been invoked for all optimizers used this iteration.
  318. """
  319. if not self._enabled:
  320. return
  321. _scale, _growth_tracker = self._check_scale_growth_tracker("update")
  322. if new_scale is not None:
  323. # Accept a new user-defined scale.
  324. if isinstance(new_scale, float):
  325. self._scale.fill_(new_scale) # type: ignore[union-attr]
  326. else:
  327. reason = "new_scale should be a float or a 1-element torch.cuda.FloatTensor with requires_grad=False."
  328. assert isinstance(new_scale, torch.cuda.FloatTensor), reason # type: ignore[attr-defined]
  329. assert new_scale.numel() == 1, reason
  330. assert new_scale.requires_grad is False, reason
  331. self._scale.copy_(new_scale) # type: ignore[union-attr]
  332. else:
  333. # Consume shared inf/nan data collected from optimizers to update the scale.
  334. # If all found_inf tensors are on the same device as self._scale, this operation is asynchronous.
  335. found_infs = [found_inf.to(device=_scale.device, non_blocking=True)
  336. for state in self._per_optimizer_states.values()
  337. for found_inf in state["found_inf_per_device"].values()]
  338. assert len(found_infs) > 0, "No inf checks were recorded prior to update."
  339. found_inf_combined = found_infs[0]
  340. if len(found_infs) > 1:
  341. for i in range(1, len(found_infs)):
  342. found_inf_combined += found_infs[i]
  343. torch._amp_update_scale_(_scale,
  344. _growth_tracker,
  345. found_inf_combined,
  346. self._growth_factor,
  347. self._backoff_factor,
  348. self._growth_interval)
  349. # To prepare for next iteration, clear the data collected from optimizers this iteration.
  350. self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state)
  351. def _get_scale_async(self):
  352. return self._scale
  353. def get_scale(self):
  354. """
  355. Returns a Python float containing the current scale, or 1.0 if scaling is disabled.
  356. .. warning::
  357. :meth:`get_scale` incurs a CPU-GPU sync.
  358. """
  359. if self._enabled:
  360. return self._init_scale if self._scale is None else self._get_scale_async().item()
  361. else:
  362. return 1.0
  363. def get_growth_factor(self):
  364. r"""
  365. Returns a Python float containing the scale growth factor.
  366. """
  367. return self._growth_factor
  368. def set_growth_factor(self, new_factor):
  369. r"""
  370. Args:
  371. new_scale (float): Value to use as the new scale growth factor.
  372. """
  373. self._growth_factor = new_factor
  374. def get_backoff_factor(self):
  375. r"""
  376. Returns a Python float containing the scale backoff factor.
  377. """
  378. return self._backoff_factor
  379. def set_backoff_factor(self, new_factor):
  380. r"""
  381. Args:
  382. new_scale (float): Value to use as the new scale backoff factor.
  383. """
  384. self._backoff_factor = new_factor
  385. def get_growth_interval(self):
  386. r"""
  387. Returns a Python int containing the growth interval.
  388. """
  389. return self._growth_interval
  390. def set_growth_interval(self, new_interval):
  391. r"""
  392. Args:
  393. new_interval (int): Value to use as the new growth interval.
  394. """
  395. self._growth_interval = new_interval
  396. def _get_growth_tracker(self):
  397. if self._enabled:
  398. return self._init_growth_tracker if self._growth_tracker is None else self._growth_tracker.item()
  399. else:
  400. return 0
  401. def is_enabled(self):
  402. r"""
  403. Returns a bool indicating whether this instance is enabled.
  404. """
  405. return self._enabled
  406. def state_dict(self):
  407. r"""
  408. Returns the state of the scaler as a :class:`dict`. It contains five entries:
  409. * ``"scale"`` - a Python float containing the current scale
  410. * ``"growth_factor"`` - a Python float containing the current growth factor
  411. * ``"backoff_factor"`` - a Python float containing the current backoff factor
  412. * ``"growth_interval"`` - a Python int containing the current growth interval
  413. * ``"_growth_tracker"`` - a Python int containing the number of recent consecutive unskipped steps.
  414. If this instance is not enabled, returns an empty dict.
  415. .. note::
  416. If you wish to checkpoint the scaler's state after a particular iteration, :meth:`state_dict`
  417. should be called after :meth:`update`.
  418. """
  419. return {"scale": self.get_scale(),
  420. "growth_factor": self._growth_factor,
  421. "backoff_factor": self._backoff_factor,
  422. "growth_interval": self._growth_interval,
  423. "_growth_tracker": self._get_growth_tracker()} if self._enabled else {}
  424. def load_state_dict(self, state_dict):
  425. r"""
  426. Loads the scaler state. If this instance is disabled, :meth:`load_state_dict` is a no-op.
  427. Args:
  428. state_dict(dict): scaler state. Should be an object returned from a call to :meth:`state_dict`.
  429. """
  430. if not self._enabled:
  431. return
  432. if len(state_dict) == 0:
  433. raise RuntimeError("The source state dict is empty, possibly because it was saved "
  434. "from a disabled instance of GradScaler.")
  435. self._init_scale = state_dict["scale"]
  436. if self._scale is not None:
  437. self._scale.fill_(state_dict["scale"])
  438. self._growth_factor = state_dict["growth_factor"]
  439. self._backoff_factor = state_dict["backoff_factor"]
  440. self._growth_interval = state_dict["growth_interval"]
  441. self._init_growth_tracker = state_dict["_growth_tracker"]
  442. if self._growth_tracker is not None:
  443. self._growth_tracker.fill_(state_dict["_growth_tracker"])
  444. def __getstate__(self):
  445. state = self.__dict__.copy()
  446. if self._enabled:
  447. assert len(self._per_optimizer_states) == 0, "A GradScaler instance may only be pickled at the beginning "\
  448. "of an iteration, or at the end after scaler.update()."
  449. # Pickling _scale and _growth_tracker Tensors directly triggers
  450. # "warnings.warn("pickle support for Storage will be removed in 1.5..."
  451. # so instead, we set the unpickled instance up to reinitialize them lazily.
  452. state['_init_scale'] = self.get_scale()
  453. state['_init_growth_tracker'] = self._get_growth_tracker()
  454. state['_scale'] = None
  455. state['_growth_tracker'] = None
  456. return state
  457. def __setstate__(self, state):
  458. self.__dict__.update(state)
  459. def _check_inf_per_device(self, optimizer):
  460. _scale, _ = self._check_scale_growth_tracker("_check_inf_per_device")
  461. dummy_inv_scale = torch.full((1,), 1.0, dtype=torch.float32, device=_scale.device)
  462. found_inf = torch.full((1,), 0.0, dtype=torch.float32, device=_scale.device)
  463. self._per_optimizer_states[id(optimizer)]["found_inf_per_device"] = \
  464. self._unscale_grads_(optimizer, dummy_inv_scale, found_inf, True)
  465. return self._per_optimizer_states[id(optimizer)]["found_inf_per_device"]
  466. def _found_inf_per_device(self, optimizer):
  467. return self._per_optimizer_states[id(optimizer)]["found_inf_per_device"]