zero_redundancy_optimizer.py 70 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649
  1. # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
  2. #
  3. # This source code is licensed under the BSD license found in the
  4. # LICENSE file in the root directory of this source tree.
  5. import collections
  6. import copy
  7. import enum
  8. import inspect
  9. import io
  10. import logging
  11. from itertools import chain
  12. from typing import Any, Callable, Dict, List, Optional, Set, Type, Union
  13. import torch
  14. import torch.distributed as dist
  15. from torch.distributed.algorithms.join import Join, Joinable, JoinHook
  16. from torch.distributed.optim.utils import functional_optim_map
  17. from torch.optim import Optimizer
  18. logger = logging.getLogger(__name__)
  19. __all__ = ["ZeroRedundancyOptimizer"]
  20. # Credits: classy_vision/generic/distributed_util.py
  21. def _recursive_copy_to_device(
  22. value: Any,
  23. non_blocking: bool,
  24. device: torch.device,
  25. ) -> Any:
  26. r"""
  27. Recursively searches lists, tuples, dicts and copies tensors to device if
  28. possible. Non-tensor values are passed as-is in the result.
  29. .. note: These are all copies, so if there are two objects that reference
  30. the same object, then after this call, there will be two different objects
  31. referenced on the device.
  32. """
  33. if isinstance(value, torch.Tensor):
  34. return value.to(device, non_blocking=non_blocking)
  35. if isinstance(value, (list, tuple)):
  36. values = [
  37. _recursive_copy_to_device(val, non_blocking=non_blocking, device=device)
  38. for val in value
  39. ]
  40. return values if isinstance(value, list) else tuple(values)
  41. if isinstance(value, collections.abc.Mapping):
  42. return {
  43. key: _recursive_copy_to_device(
  44. val, non_blocking=non_blocking, device=device
  45. )
  46. for key, val in value.items()
  47. }
  48. return value
  49. def _is_trainable(param: torch.Tensor) -> bool:
  50. r"""
  51. Returns if a parameter is trainable, where trainability is equivalent to
  52. requiring a gradient.
  53. """
  54. return param.requires_grad
  55. def _broadcast_object(
  56. obj: Any,
  57. src_rank: int,
  58. group: object = dist.group.WORLD,
  59. device: torch.device = torch.device("cpu"),
  60. ) -> Any:
  61. r"""
  62. Broadcasts an object to the given group, sending the object if called from
  63. the source rank and receiving the object otherwise.
  64. Arguments:
  65. obj: object to broadcast; only used if called on the source rank.
  66. src_rank (int): source rank.
  67. group (``ProcessGroup``, optional): group used for the broadcast
  68. (default: ``dist.group.WORLD``).
  69. device (``torch.device``, optional): device to send from or receive
  70. to (default: ``torch.device("cpu")``).
  71. Returns:
  72. The broadcasted object.
  73. """
  74. if dist.get_rank() == src_rank:
  75. # Send the object
  76. buffer = io.BytesIO()
  77. torch.save(obj, buffer)
  78. data = bytearray(buffer.getbuffer())
  79. length_tensor = torch.LongTensor([len(data)]).to(device)
  80. data_send_tensor = torch.ByteTensor(data).to(device)
  81. dist.broadcast(length_tensor, src=src_rank, group=group, async_op=False)
  82. dist.broadcast(data_send_tensor, src=src_rank, group=group, async_op=False)
  83. else:
  84. # Receive the object
  85. length_tensor = torch.LongTensor([0]).to(device)
  86. dist.broadcast(length_tensor, src=src_rank, group=group, async_op=False)
  87. data_recv_tensor = torch.empty(
  88. [int(length_tensor.item())], dtype=torch.uint8, device=device
  89. )
  90. dist.broadcast(data_recv_tensor, src=src_rank, group=group, async_op=False)
  91. buffer = io.BytesIO(data_recv_tensor.cpu().numpy())
  92. obj = torch.load(buffer, map_location=device)
  93. return obj
  94. class _ZeROJoinHook(JoinHook):
  95. def __init__(self, zero):
  96. assert isinstance(zero, ZeroRedundancyOptimizer), (
  97. "ZeRO join hook requires passing in a ZeroRedundancyOptimizer "
  98. "instance as the state"
  99. )
  100. self.zero = zero
  101. super().__init__()
  102. def main_hook(self):
  103. """
  104. Performs an optimizer step, which updates the joined process's shard of
  105. the parameters and broadcasts those parameters.
  106. """
  107. self.zero.step()
  108. class _DDPBucketAssignment:
  109. r"""
  110. This represents a :class:`DistributedDataParallel` bucket assignment,
  111. meaning a (possibly non-strict) subset of the parameters corresponding to
  112. a DDP bucket assigned to a rank to update.
  113. Attributes:
  114. bucket_index (int): index of the bucket determined by the DDP gradient
  115. bucket all-reduce order.
  116. parameters (List[torch.Tensor]): model parameters in the bucket
  117. assigned to this rank.
  118. offset (int): offset into the :class:`GradBucket` 's :meth:`parameters`
  119. giving the index of the first element in the passed-in
  120. ``parameters``; this equivalently indexes into the
  121. :class:`GradBucket` 's :meth:`gradients`.
  122. device (torch.device): device on which the parameters are stored.
  123. tensor (torch.Tensor): flattened tensor giving the data of the
  124. parameter subset assigned to the rank.
  125. """
  126. def __init__(
  127. self,
  128. bucket_index: int,
  129. parameters: List[torch.Tensor],
  130. offset: int,
  131. ):
  132. self.bucket_index = bucket_index
  133. self.parameters = parameters
  134. self.offset = offset
  135. if len(self.parameters) == 0:
  136. raise ValueError("Empty bucket assignment")
  137. # DDP guarantees all parameters in the bucket have the same device
  138. self.device: torch.device = self.parameters[0].device
  139. self.tensor: Optional[torch.Tensor] = None
  140. class _OverlapStatus(enum.IntEnum):
  141. r"""
  142. This defines the three possible statuses that
  143. :class:`ZeroRedundancyOptimizer` can be in when overlapping with
  144. :class:`DistributedDataParallel`.
  145. ``UNINITIALIZED``: The ZeRO instance is effectively uninitialized and
  146. is waiting for DDP to finalize its bucketing.
  147. ``DDP_HAS_REBUILT_BUCKETS``: DDP has rebuilt its buckets, meaning that
  148. its bucketing is finalized. The ZeRO instance can now collect the
  149. necessary information about the DDP bucketing.
  150. ``INITIALIZED``: The ZeRO instance is fully initialized and can now
  151. optimize parameters.
  152. """
  153. UNINITIALIZED = 0
  154. DDP_HAS_REBUILT_BUCKETS = 1
  155. INITIALIZED = 2
  156. class _OverlapInfo:
  157. r"""
  158. This contains the information needed by :class:`ZeroRedundancyOptimizer`
  159. to overlap with :class:`DistributedDataParallel`.
  160. Arguments:
  161. world_size (int): world size of the process group being used.
  162. Attributes:
  163. shard_buckets (bool): if ``True``, then the assignment of each
  164. :class:`DistributedDataParallel` bucket is partitioned across
  165. possibly multiple :class:`ZeroRedundancyOptimizer` instances (i.e.
  166. across possibly multiple ranks) to approximate uniformity following
  167. a threshold given by the total parameter size divided by the world
  168. size; if ``False``, then each bucket is wholly assigned to a single
  169. :class:`ZeroRedundancyOptimizer` instance (i.e. to a single rank);
  170. this should be set to the value passed into the hook constructor.
  171. status (_OverlapStatus): current status; see :class:`_OverlapStatus`
  172. for more information.
  173. params_per_bucket (List[List[torch.Tensor]]): ``params_per_bucket[i]``
  174. gives the model parameters in the ``i``th bucket.
  175. params_per_rank (List[List[torch.Tensor]]): ``params_per_rank[i]``
  176. gives the model parameters assigned to the ``i``th rank, where the
  177. parameters are grouped by increasing bucket indices.
  178. offsets (Dict[int, int]): maps from bucket index to the offset in
  179. ``self.params_per_rank[rank]`` giving the index of the first
  180. parameter in that bucket, where ``rank`` is this process's own
  181. rank; the keys of this :class:`dict` are the bucket indices
  182. assigned to this rank.
  183. num_bucket_assignments (int): total number of bucket assignments across
  184. all ranks; this is equal to the number of
  185. :class:`DistributedDataParallel` gradient buckets if
  186. ``shard_buckets=False`` and possibly greater otherwise.
  187. total_size (int, optional): total size of all buckets (i.e. sum of
  188. ``param.numel()`` for all ``param`` across all buckets) if
  189. ``shard_buckets=True``; otherwise, ``None``.
  190. broadcast_handles (List[Work]): :class:`list` of async work handles for
  191. the parameter broadcasts.
  192. bucket_index_to_future (Dict[int, torch.futures.Future]):
  193. :class:`dict` mapping bucket index to the corresponding all-reduce
  194. future.
  195. bucket_index_to_bucket (Dict[int, dist.GradBucket]): :class:`dict`
  196. mapping bucket index to the corresponding bucket.
  197. bucket_indices_seen (List[int]): :class:`list` of the bucket indices
  198. seen on this iteration.
  199. """
  200. def __init__(self, world_size) -> None:
  201. self.status: _OverlapStatus = _OverlapStatus.UNINITIALIZED
  202. self.shard_buckets: bool = False
  203. # Modified per bucket reconstruction
  204. self.params_per_bucket: List[List[torch.Tensor]] = []
  205. self.params_per_rank: List[List[torch.Tensor]] = [[] for _ in range(world_size)]
  206. self.offsets: Dict[int, int] = {}
  207. # Group Ranks
  208. self.assigned_ranks_per_bucket: List[Set[int]] = []
  209. self.num_bucket_assignments: int = 0
  210. self.total_size: Optional[int] = None
  211. # Modified per iteration
  212. self.broadcast_handles: List[Any] = []
  213. self.bucket_indices_seen: List[int] = []
  214. # Used by `hook_with_zero_step()`
  215. self.bucket_index_to_future: Dict[int, torch.futures.Future] = {}
  216. self.bucket_index_to_bucket: Dict[int, dist.GradBucket] = {}
  217. def wait_for_broadcasts(self) -> None:
  218. r"""
  219. Waits for all parameter broadcasts. This should be called once all
  220. broadcasts have been scheduled, meaning ``self.broadcast_handles`` is
  221. filled. This clears ``self.broadcast_handles`` in preparation for the
  222. next iteration.
  223. """
  224. assert (
  225. len(self.broadcast_handles) == self.num_bucket_assignments
  226. ), f"Missing at least one broadcast handle on rank {dist.get_rank()}"
  227. _ = list(map(lambda x: x.wait(), self.broadcast_handles))
  228. self.broadcast_handles.clear()
  229. def clear_per_iter_info(self) -> None:
  230. r"""
  231. Clears the data structures that are modified per-iteration. This should
  232. be called at the end of an iteration.
  233. """
  234. self.bucket_indices_seen.clear()
  235. self.bucket_index_to_future.clear()
  236. self.bucket_index_to_bucket.clear()
  237. class ZeroRedundancyOptimizer(Optimizer, Joinable):
  238. r"""
  239. This class wraps an arbitrary :class:`optim.Optimizer
  240. <torch.optim.Optimizer>` and shards its states across ranks in the group as
  241. described by ZeRO_. The local optimizer instance in each rank is only
  242. responsible for updating approximately ``1 / world_size`` parameters and
  243. hence only needs to keep ``1 / world_size`` optimizer states. After
  244. parameters are updated locally, each rank will broadcast its parameters to
  245. all other peers to keep all model replicas in the same state.
  246. ``ZeroRedundancyOptimizer`` can be used in conjunction with
  247. :class:`torch.nn.parallel.DistributedDataParallel` to reduce per-rank peak
  248. memory consumption.
  249. ``ZeroRedundancyOptimizer`` uses a sorted-greedy algorithm to pack a number
  250. of parameters at each rank. Each parameter belongs to a single rank and is
  251. not divided among ranks. The partition is arbitrary and might not match the
  252. the parameter registration or usage order.
  253. Arguments:
  254. params (``Iterable``): an ``Iterable`` of :class:`torch.Tensor` s
  255. or :class:`dict` s giving all parameters, which will be sharded
  256. across ranks.
  257. Keyword Args:
  258. optimizer_class (:class:`torch.nn.Optimizer`): the class of the local
  259. optimizer.
  260. process_group (``ProcessGroup``, optional): ``torch.distributed``
  261. ``ProcessGroup`` (default: ``dist.group.WORLD`` initialized by
  262. :meth:`torch.distributed.init_process_group`).
  263. parameters_as_bucket_view (bool, optional): if ``True``, parameters are
  264. packed into buckets to speed up communication, and ``param.data``
  265. fields point to bucket views at different offsets; if ``False``,
  266. each individual parameter is communicated separately, and each
  267. ``params.data`` stays intact (default: ``False``).
  268. overlap_with_ddp (bool, optional): if ``True``, :meth:`step` is
  269. overlapped with :class:`DistributedDataParallel` 's gradient
  270. synchronization; this requires (1) either a functional optimizer
  271. for the ``optimizer_class`` argument or one with a functional
  272. equivalent and (2) registering a DDP communication hook
  273. constructed from one of the functions in ``ddp_zero_hook.py``;
  274. parameters are packed into buckets matching those in
  275. :class:`DistributedDataParallel`, meaning that the
  276. ``parameters_as_bucket_view`` argument is ignored.
  277. If ``False``, :meth:`step` runs disjointly after the backward pass
  278. (per normal).
  279. (default: ``False``)
  280. **defaults: any trailing arguments, which are forwarded to the local
  281. optimizer.
  282. Example::
  283. >>> # xdoctest: +SKIP
  284. >>> import torch.nn as nn
  285. >>> from torch.distributed.optim import ZeroRedundancyOptimizer
  286. >>> from torch.nn.parallel import DistributedDataParallel as DDP
  287. >>> model = nn.Sequential(*[nn.Linear(2000, 2000).to(rank) for _ in range(20)])
  288. >>> ddp = DDP(model, device_ids=[rank])
  289. >>> opt = ZeroRedundancyOptimizer(
  290. >>> ddp.parameters(),
  291. >>> optimizer_class=torch.optim.Adam,
  292. >>> lr=0.01
  293. >>> )
  294. >>> ddp(inputs).sum().backward()
  295. >>> opt.step()
  296. .. warning::
  297. Currently, ``ZeroRedundancyOptimizer`` requires that all of the
  298. passed-in parameters are the same dense type.
  299. .. warning::
  300. If you pass ``overlap_with_ddp=True``, be wary of the following: Given
  301. the way that overlapping :class:`DistributedDataParallel` with
  302. :class:`ZeroRedundancyOptimizer` is currently implemented, the first
  303. two or three training iterations do not perform parameter updates in
  304. the optimizer step, depending on if ``static_graph=False`` or
  305. ``static_graph=True``, respectively. This is because it needs
  306. information about the gradient bucketing strategy used by
  307. :class:`DistributedDataParallel`, which is not finalized until the
  308. second forward pass if ``static_graph=False`` or until the third
  309. forward pass if ``static_graph=True``. To adjust for this, one option
  310. is to prepend dummy inputs.
  311. .. warning:: ZeroRedundancyOptimizer is experimental and subject to change.
  312. .. _ZeRO: https://arxiv.org/abs/1910.02054
  313. """
  314. def __init__(
  315. self,
  316. params,
  317. optimizer_class: Type[Optimizer],
  318. process_group: Optional[Any] = None,
  319. parameters_as_bucket_view: bool = False,
  320. overlap_with_ddp: bool = False,
  321. **defaults: Any,
  322. ):
  323. # Perform type and assumption checks on the input parameters
  324. params = self._verify_and_init_params(params)
  325. self._verify_same_dense_param_type()
  326. # NOTE: The parent constructor uses `add_param_group()` which is
  327. # partially overloaded in ZeroRedundancyOptimizer, so we use the
  328. # `initialized` flag to dissociate the behaviour of `add_param_group()`
  329. # between the parent and child.
  330. self.initialized = False
  331. Optimizer.__init__(self, params, defaults)
  332. Joinable.__init__(self)
  333. # Now, all parameters are held in both `self._all_params` and
  334. # `self.param_groups`
  335. # Internal data structures (`_cache` indicates lazily evaluated)
  336. self._param_to_rank_cache: Dict[torch.Tensor, int] = {}
  337. self._param_to_index_cache: Dict[torch.Tensor, int] = {}
  338. self._partition_parameters_cache: List[List[Dict]] = []
  339. self._index_to_param_cache: List[torch.Tensor] = []
  340. self._device_to_params_per_rank_cache: Dict[
  341. torch.device, List[List[torch.Tensor]]
  342. ] = {}
  343. self._bucket_assignments_per_rank_cache: List[
  344. Dict[int, _DDPBucketAssignment]
  345. ] = []
  346. self._is_trainable_mask = self._get_is_trainable_mask()
  347. # Default device for collective communication and buckets
  348. self._default_device = self._all_params[0].device
  349. self.process_group = (
  350. process_group if process_group is not None else dist.group.WORLD
  351. )
  352. self.world_size: int = dist.get_world_size(self.process_group)
  353. self.rank: int = dist.get_rank(self.process_group)
  354. self.global_rank: int = dist.distributed_c10d.get_global_rank(
  355. self.process_group, self.rank
  356. )
  357. self._overlap_with_ddp: bool = overlap_with_ddp
  358. self._optim_defaults = defaults
  359. self._optim_constructor = self._get_optimizer_constructor(optimizer_class)
  360. # If `overlap_with_ddp=True`, local optimizer initialization is delayed
  361. # to run time after the necessary information has been collected
  362. if not overlap_with_ddp:
  363. self._init_local_optimizer()
  364. else:
  365. self._overlap_info: _OverlapInfo = _OverlapInfo(self.world_size)
  366. if parameters_as_bucket_view:
  367. logger.warning(
  368. "`parameters_as_bucket_view=True` will be ignored since "
  369. "`overlap_with_ddp=True`; instead, a different bucketing "
  370. "strategy will be used"
  371. )
  372. # `self._buckets` is used if `parameters_as_bucket_view=True`, in
  373. # which case parameter data is flattened into contiguous bucket tensors
  374. self.parameters_as_bucket_view = parameters_as_bucket_view
  375. self._buckets: List[List[torch.Tensor]] = []
  376. self._build_param_buckets()
  377. # Optional consolidated optimizer state, only populated if this rank
  378. # is the target in `consolidate_state_dict()`
  379. self._all_state_dicts: List[Dict[str, Any]] = []
  380. self.initialized = True
  381. def _clear_cache(self) -> None:
  382. r"""
  383. Clears the cached data structures giving partition information.
  384. """
  385. self._partition_parameters_cache.clear()
  386. self._param_to_rank_cache.clear()
  387. self._index_to_param_cache.clear()
  388. self._param_to_index_cache.clear()
  389. self._device_to_params_per_rank_cache.clear()
  390. self._bucket_assignments_per_rank_cache.clear()
  391. def add_param_group(self, param_group: dict) -> None:
  392. r"""
  393. Add a parameter group to the :class:`Optimizer` 's ``param_groups``.
  394. This can be useful when fine tuning a pre-trained network, as frozen
  395. layers can be made trainable and added to the :class:`Optimizer` as
  396. training progresses.
  397. Arguments:
  398. param_group (dict): specifies the parameters to be optimized and
  399. group-specific optimization options.
  400. .. warning:: This method handles updating the shards on all partitions
  401. but needs to be called on all ranks. Calling this on a subset of
  402. the ranks will cause the training to hang because communication
  403. primitives are called depending on the managed parameters and
  404. expect all the ranks to participate on the same set of parameters.
  405. """
  406. if self.initialized and self._overlap_with_ddp:
  407. raise RuntimeError(
  408. "ZeroRedundancyOptimizer with `overlap_with_ddp=True` only "
  409. "supports a single parameter group"
  410. )
  411. super().add_param_group(param_group)
  412. # NOTE: The rest of the method assumes that the call to the parent's
  413. # `add_param_group()` appends the new parameter group and preserves
  414. # the previous parameter-group ordering
  415. if self.initialized:
  416. # Force a re-partitioning of the parameters
  417. self._clear_cache()
  418. param_groups = self._partition_parameters()[self.rank]
  419. # NOTE: All parameters in the old parameter groups should be
  420. # assigned to the same ranks so that the local optimizers do not
  421. # need to be reinitialized
  422. # Add the parameters assigned to this rank from the new parameter
  423. # group to the local optimizer, if any
  424. if len(param_groups) == len(self.optim.param_groups) + 1:
  425. self.optim.add_param_group(param_groups[-1])
  426. # Update the bucketing strategy accordingly
  427. if self.parameters_as_bucket_view:
  428. self._build_param_buckets()
  429. def consolidate_state_dict(self, to: int = 0) -> None:
  430. r"""
  431. Consolidate a list of ``state_dict`` s (one per rank) on the target
  432. rank.
  433. Arguments:
  434. to (int): the rank that receives the optimizer states (default: 0).
  435. Raises:
  436. RuntimeError: if ``overlap_with_ddp=True`` and this method is
  437. called before this :class:`ZeroRedundancyOptimizer` instance
  438. has been fully initialized, which happens once
  439. :class:`DistributedDataParallel` gradient buckets have been
  440. rebuilt.
  441. .. warning:: This needs to be called on all ranks.
  442. """
  443. self._check_overlap_initialized()
  444. # Sync the exposed `param_groups` attributes to the local optimizer in
  445. # case they have been updated
  446. self._sync_param_groups(self.param_groups, self.optim.param_groups)
  447. # Pull the sharded state from all ranks and store them in rank order
  448. empty_messenger = torch.tensor(
  449. [0], dtype=torch.uint8, device=self._default_device
  450. )
  451. # NOTE: We wastefully use `broadcast()` (e.g. instead of `gather()`)
  452. # due to compatibility issues with NCCL backend; a possible follow-up
  453. # is to move all sharded state management to RPC RRef
  454. self._all_state_dicts = []
  455. for rank in range(self.world_size):
  456. global_rank = dist.distributed_c10d.get_global_rank(
  457. self.process_group, rank
  458. )
  459. if self.rank == to:
  460. # Consolidate all local `state_dict`s on this rank, storing on
  461. # CPU to save GPU memory
  462. if rank == self.rank:
  463. # Directly append own optimizer state
  464. self._all_state_dicts.append(
  465. _recursive_copy_to_device(
  466. self.optim.state_dict(),
  467. non_blocking=True,
  468. device=torch.device("cpu"),
  469. )
  470. )
  471. else:
  472. # Receive the optimizer state from the source rank
  473. local_state_dict = _broadcast_object(
  474. empty_messenger,
  475. src_rank=global_rank,
  476. group=self.process_group,
  477. device=self._default_device,
  478. )
  479. self._all_state_dicts.append(
  480. _recursive_copy_to_device(
  481. local_state_dict,
  482. non_blocking=True,
  483. device=torch.device("cpu"),
  484. )
  485. )
  486. else:
  487. if rank == self.rank:
  488. # Send the optimizer state to the target rank
  489. _ = _broadcast_object(
  490. self.optim.state_dict(),
  491. src_rank=self.global_rank,
  492. group=self.process_group,
  493. device=self._default_device,
  494. )
  495. elif rank != to:
  496. # Discard the received object; `broadcast()` is used for
  497. # compatibility reasons
  498. _ = _broadcast_object(
  499. empty_messenger,
  500. src_rank=global_rank,
  501. group=self.process_group,
  502. device=self._default_device,
  503. )
  504. def _verify_params_per_rank(
  505. self,
  506. params_per_rank: List[List[torch.Tensor]],
  507. ) -> None:
  508. r"""
  509. Verifies ``params_per_rank`` for :meth:`_partition_parameters`,
  510. checking that ``params_per_rank`` has length equal to the world size
  511. and that it does not contain any parameters not passed into the
  512. :class:`ZeroRedundancyOptimizer` constructor.
  513. The parameters in ``params_per_rank`` being a strict subset of those
  514. passed into the constructor is valid since some parameters may be
  515. frozen.
  516. Raises:
  517. ValueError: if ``params_per_rank`` does not have length equal to
  518. the world size or if it contains a parameter that was not
  519. passed into the :class:`ZeroRedundancyOptimizer` constructor.
  520. """
  521. if len(params_per_rank) != self.world_size:
  522. raise ValueError(
  523. "`params_per_rank` must have length equal to the world size"
  524. )
  525. all_params_set = set(self._all_params)
  526. for params in params_per_rank:
  527. for param in params:
  528. if param not in all_params_set:
  529. raise ValueError(
  530. "Passing a new parameter in `params_per_rank` that "
  531. "was not passed into the ZeroRedundancyOptimizer "
  532. "constructor"
  533. )
  534. def _partition_param_group(
  535. self, param_group: Dict[str, Any], params_per_rank: List[List[torch.Tensor]]
  536. ) -> None:
  537. r"""
  538. Partitions the parameter group ``param_group`` according to
  539. ``params_per_rank`` by modifying ``self._partition_parameters_cache``.
  540. This method should only be used as a subroutine for
  541. :meth:`_partition_parameters`.
  542. Arguments:
  543. param_group (dict[str, Any]): a parameter group as normally defined
  544. in an optimizer state.
  545. params_per_rank (list[list[torch.Tensor]]): a :class:`list` of
  546. length world size containing :class:`list` s of parameters to
  547. assign to each rank.
  548. """
  549. for rank, params in enumerate(params_per_rank):
  550. rank_param_group = copy.copy(param_group)
  551. rank_param_group["params"] = params
  552. self._partition_parameters_cache[rank].append(rank_param_group)
  553. def _partition_parameters(
  554. self,
  555. params_per_rank: Optional[List[List[torch.Tensor]]] = None,
  556. ) -> List[List[Dict]]:
  557. r"""
  558. Partitions parameters across distributed data parallel ranks.
  559. Arguments:
  560. params_per_rank (list[list[torch.Tensor]], optional): a
  561. :class:`list` of length world size containing :class:`list` s
  562. of parameters to assign to each rank; this provides a way to
  563. specify a partition manually.
  564. If ``None``, the parameters are partitioned according to an
  565. internal algorithm.
  566. (default: ``None``)
  567. Returns:
  568. A :class:`list` where each element of the list contains the
  569. ``param_groups`` for a rank (which itself is a :class:`list` of
  570. :class:`dict`); element 0 corresponds to rank 0, etc.; each rank
  571. stores the ``param_groups`` for all ranks for the collective
  572. communication in :meth:`step`.
  573. Raises:
  574. ValueError: see :meth:`_validate_params_per_rank`.
  575. RuntimeError: if ``params_per_rank`` is not ``None`` and this
  576. :class:`ZeroRedundancyOptimizer` instance is using more than
  577. one parameter group.
  578. """
  579. if params_per_rank is None:
  580. # Partition the parameters optimizing for uniformity
  581. if len(self._partition_parameters_cache) == 0:
  582. self._partition_parameters_cache = [[] for _ in range(self.world_size)]
  583. sizes = [0] * self.world_size
  584. for param_group in self.param_groups:
  585. param_group_params_per_rank: List[List] = [
  586. [] for _ in range(self.world_size)
  587. ]
  588. # Sort the parameters by size (largest first)
  589. params_sorted = sorted(
  590. param_group["params"], key=lambda t: t.numel(), reverse=True
  591. )
  592. for param in params_sorted:
  593. # Greedily add the parameter to rank with smallest size so far
  594. rank = self._get_min_index(sizes)
  595. param_group_params_per_rank[rank].append(param)
  596. sizes[rank] += param.numel()
  597. # Apply the constructed partition of the parameter group
  598. self._partition_param_group(
  599. param_group, param_group_params_per_rank
  600. )
  601. return self._partition_parameters_cache
  602. # Partition the parameters according to `params_per_rank`
  603. assert len(self._partition_parameters_cache) == 0, (
  604. "Specifying `params_per_rank` should only be done when the "
  605. "parameters have not been partitioned yet"
  606. )
  607. if len(self.param_groups) != 1:
  608. raise RuntimeError(
  609. "Specifying `params_per_rank` only supports a single " "parameter group"
  610. )
  611. self._verify_params_per_rank(params_per_rank)
  612. self._partition_parameters_cache = [[] for _ in range(self.world_size)]
  613. # Apply the passed-in partition of the parameter group
  614. param_group = self.param_groups[0]
  615. self._partition_param_group(param_group, params_per_rank)
  616. return self._partition_parameters_cache
  617. @property
  618. def _param_to_rank(self) -> Dict[torch.Tensor, int]:
  619. r"""
  620. :class:`dict` mapping parameters to their assigned data parallel rank
  621. in the partition.
  622. """
  623. if len(self._param_to_rank_cache) == 0:
  624. for rank, param_groups in enumerate(self._partition_parameters()):
  625. for param_group in param_groups:
  626. for param in param_group["params"]:
  627. self._param_to_rank_cache[param] = rank
  628. return self._param_to_rank_cache
  629. @property
  630. def _param_to_index(self) -> Dict[torch.Tensor, int]:
  631. r"""
  632. :class:`dict` mapping parameters to their indices in the global
  633. optimizer state.
  634. NOTE: This assumes that the global optimizer state's indexing (in
  635. ``state_dict``) follows a linear ordering over the parameter groups.
  636. """
  637. if len(self._param_to_index_cache) == 0:
  638. self._param_to_index_cache = {
  639. p: i
  640. for i, p in enumerate(chain(*(g["params"] for g in self.param_groups)))
  641. }
  642. return self._param_to_index_cache
  643. @property
  644. def _index_to_param(self) -> List[torch.Tensor]:
  645. r"""
  646. List mapping parameter indices in the global optimizer scheme to the
  647. actual params.
  648. """
  649. if len(self._index_to_param_cache) == 0:
  650. self._index_to_param_cache = list(
  651. chain(*(g["params"] for g in self.param_groups))
  652. )
  653. return self._index_to_param_cache
  654. def _broadcast_params_from_rank(self, rank: int):
  655. r"""
  656. Broadcasts the shard of parameters from a given rank to all other
  657. ranks asynchronously.
  658. Arguments:
  659. rank (int): the source rank.
  660. Returns:
  661. A :class:`list` of async work handles for the ``broadcast()`` s
  662. performed to synchronize the parameters.
  663. """
  664. assert not self._overlap_with_ddp, (
  665. "`_broadcast_params_from_rank()` should not be used if "
  666. "`overlap_with_ddp=True`; instead, the broadcasting should "
  667. "happen in the DDP communication hook"
  668. )
  669. handles = []
  670. if self.parameters_as_bucket_view:
  671. for dev_i_buckets in self._buckets:
  672. bucket = dev_i_buckets[rank]
  673. global_rank = dist.distributed_c10d.get_global_rank(
  674. self.process_group, rank
  675. )
  676. handles.append(
  677. dist.broadcast(
  678. tensor=bucket,
  679. src=global_rank,
  680. group=self.process_group,
  681. async_op=True,
  682. )
  683. )
  684. else:
  685. param_groups = self._partition_parameters()[rank]
  686. global_rank = dist.distributed_c10d.get_global_rank(
  687. self.process_group, rank
  688. )
  689. for param_group in param_groups:
  690. for param in param_group["params"]:
  691. handles.append(
  692. dist.broadcast(
  693. tensor=param.data,
  694. src=global_rank,
  695. group=self.process_group,
  696. async_op=True,
  697. )
  698. )
  699. return handles
  700. def _sync_params(self):
  701. r"""
  702. Syncs all parameter shards across the ranks.
  703. This rank sends its shard of the parameters to all other ranks and
  704. receives a shard from each other rank. This is done using
  705. ``broadcast()``. Parameters are sent bucket-by-bucket if
  706. ``parameters_as_bucket_view=True``and sent parameter-by-parameter
  707. otherwise.
  708. """
  709. handles = []
  710. for rank in range(self.world_size):
  711. handles.extend(self._broadcast_params_from_rank(rank))
  712. _ = list(map(lambda x: x.wait(), handles))
  713. @property
  714. def _device_to_params_per_rank(
  715. self,
  716. ) -> Dict[torch.device, List[List[torch.Tensor]]]:
  717. r"""
  718. :class:`dict` mapping each device to a :class:`list` of the per-rank parameter
  719. lists filtered to only include the parameters stored on that device.
  720. Each per-rank parameter list gives the parameters assigned to that rank
  721. to update.
  722. This is used for constructing the parameter buckets if
  723. ``parameters_as_bucket_view=True``.
  724. Let ``dev_i`` denote the ``i``th device for this rank. Then:
  725. ``dev_0`` maps to a list containing:
  726. rank 0's assigned parameters stored on ``dev_0``,
  727. rank 1's assigned parameters stored on ``dev_0``,
  728. ...
  729. ``dev_1`` maps to a list containing:
  730. rank 0's assigned parameters stored on ``dev_1``,
  731. rank 1's assigned parameters stored on ``dev_1``,
  732. ...
  733. ...
  734. """
  735. assert self.parameters_as_bucket_view, (
  736. "`_device_to_params_per_rank` should only be used if "
  737. "`parameters_as_bucket_view=True`"
  738. )
  739. if len(self._device_to_params_per_rank_cache) == 0:
  740. for rank, param_groups in enumerate(self._partition_parameters()):
  741. for param_group in param_groups:
  742. for param in param_group["params"]:
  743. device = param.device
  744. if device not in self._device_to_params_per_rank_cache:
  745. self._device_to_params_per_rank_cache[device] = [
  746. [] for _ in range(self.world_size)
  747. ]
  748. self._device_to_params_per_rank_cache[device][rank].append(
  749. param
  750. )
  751. return self._device_to_params_per_rank_cache
  752. def _get_min_index(
  753. self,
  754. values: List[int],
  755. disallowed_indices: Optional[Set[int]] = None,
  756. ) -> int:
  757. r"""
  758. Returns ``values.index(min(values))``, except only uses one pass. It
  759. also excludes any indices in ``disallowed_indices`` if provided.
  760. Arguments:
  761. values: (List[int]): :class:`list` of values.
  762. disallowed_indices (Optional[Set[int]]): indices that are
  763. disallowed from being the returned min index.
  764. """
  765. min_index = -1
  766. min_value = float("inf")
  767. for i, value in enumerate(values):
  768. if disallowed_indices and i in disallowed_indices:
  769. continue
  770. if value < min_value:
  771. min_value = value
  772. min_index = i
  773. assert min_index >= 0, "All indices are disallowed"
  774. return min_index
  775. def _assign_bucket_subset_to_rank(
  776. self,
  777. bucket_index: int,
  778. bucket_params: List[torch.Tensor],
  779. bucket_offset: int,
  780. assigned_rank: int,
  781. assigned_ranks_per_bucket: List[Set[int]],
  782. ) -> None:
  783. r"""
  784. Assigns the model parameters given by ``bucket_params``, representing a
  785. (possibly non-strict) subset of the parameters corresponding to a
  786. :class:`DistributedDataParallel` bucket, to the rank with the least
  787. size assigned so far and collects relevant information.
  788. Arguments:
  789. bucket_index (int): index of the :class:`DistributedDataParallel`
  790. gradient bucket.
  791. bucket_params (List[torch.Tensor]): subset of the parameters
  792. corresponding to the bucket to assign.
  793. bucket_offset (int): offset giving the index of the first element
  794. in ``bucket_params`` in the bucket's full parameter list.
  795. assigned_rank (int): group rank to assign to.
  796. assigned_ranks_per_bucket (List[Set[int]]): :class:`set` of group ranks
  797. assigned to each bucket.
  798. """
  799. overlap_info = self._overlap_info
  800. if len(bucket_params) == 0:
  801. raise ValueError("Empty bucket assignment")
  802. params_per_rank = overlap_info.params_per_rank
  803. offsets = overlap_info.offsets
  804. self._bucket_assignments_per_rank_cache[assigned_rank][
  805. bucket_index
  806. ] = _DDPBucketAssignment(bucket_index, bucket_params, bucket_offset)
  807. if self.global_rank == assigned_rank:
  808. offsets[bucket_index] = len(params_per_rank[assigned_rank])
  809. params_per_rank[assigned_rank].extend(bucket_params)
  810. assigned_ranks_per_bucket[bucket_index].add(assigned_rank)
  811. self._overlap_info.num_bucket_assignments += 1
  812. @property
  813. def _bucket_assignments_per_rank(self) -> List[Dict[int, _DDPBucketAssignment]]:
  814. r"""
  815. :class:`list` of length world size consisting of :class:`dict` s
  816. mapping bucket indices to :class:`_DDPBucketAssignment` s for each
  817. rank.
  818. """
  819. assert self._overlap_with_ddp, (
  820. "`_bucket_assignments_per_rank` " "only be used if `overlap_with_ddp=True`"
  821. )
  822. if len(self._bucket_assignments_per_rank_cache) > 0:
  823. return self._bucket_assignments_per_rank_cache
  824. overlap_info = self._overlap_info
  825. assert overlap_info.status == _OverlapStatus.INITIALIZED
  826. self._bucket_assignments_per_rank_cache = [{} for _ in range(self.world_size)]
  827. params_per_bucket = overlap_info.params_per_bucket
  828. if overlap_info.shard_buckets:
  829. # Define the assignment threshold to approximate uniformity
  830. assert overlap_info.total_size is not None, "`total_size` was not computed"
  831. threshold = overlap_info.total_size / self.world_size # type: ignore[operator]
  832. size_per_rank = [0 for _ in range(self.world_size)]
  833. num_buckets = len(params_per_bucket)
  834. overlap_info.assigned_ranks_per_bucket = [set() for _ in range(num_buckets)]
  835. assigned_ranks_per_bucket = overlap_info.assigned_ranks_per_bucket
  836. if not overlap_info.shard_buckets:
  837. # Assign each DDP bucket entirely to a single rank
  838. for bucket_index, bucket_params in enumerate(params_per_bucket):
  839. assert len(bucket_params) > 0, "Empty bucket"
  840. assigned_rank = self._get_assigned_rank(bucket_index)
  841. self._assign_bucket_subset_to_rank(
  842. bucket_index,
  843. bucket_params,
  844. 0,
  845. assigned_rank,
  846. assigned_ranks_per_bucket,
  847. )
  848. else:
  849. # Assign each DDP bucket to possibly multiple ranks
  850. # Specifically, sort the DDP buckets by increasing size, and for
  851. # each bucket, iteratively assign the maximal unassigned subset
  852. # with size less than `threshold` to the rank with the least total
  853. # size so far -- each such assignment is represented by a
  854. # `_DDPBucketAssignment` instance and only contains parameters from
  855. # a single DDP bucket
  856. params_per_bucket_enum = sorted(
  857. enumerate(params_per_bucket), key=lambda x: sum(p.numel() for p in x[1])
  858. )
  859. for bucket_index, bucket_params in params_per_bucket_enum:
  860. assert len(bucket_params) > 0, "Empty bucket"
  861. bucket_offset = 0
  862. assignment_size = 0
  863. for param_index, param in enumerate(bucket_params):
  864. param_numel = param.numel()
  865. if (
  866. assignment_size + param_numel >= threshold
  867. and param_index > bucket_offset
  868. ):
  869. assigned_rank = self._get_min_index(
  870. size_per_rank, assigned_ranks_per_bucket[bucket_index]
  871. )
  872. # Include up to but not including the parameter that
  873. # exceeded the threshold
  874. self._assign_bucket_subset_to_rank(
  875. bucket_index,
  876. bucket_params[bucket_offset:param_index],
  877. bucket_offset,
  878. assigned_rank,
  879. assigned_ranks_per_bucket,
  880. )
  881. size_per_rank[assigned_rank] += assignment_size
  882. bucket_offset = param_index
  883. assignment_size = 0
  884. assignment_size += param_numel
  885. # Assign the remainder of the bucket so that no assignment
  886. # spans across two buckets
  887. assigned_rank = self._get_min_index(
  888. size_per_rank, assigned_ranks_per_bucket[bucket_index]
  889. )
  890. self._assign_bucket_subset_to_rank(
  891. bucket_index,
  892. bucket_params[bucket_offset:],
  893. bucket_offset,
  894. assigned_rank,
  895. assigned_ranks_per_bucket,
  896. )
  897. size_per_rank[assigned_rank] += assignment_size
  898. return self._bucket_assignments_per_rank_cache
  899. def _local_step(
  900. self,
  901. gradients: Optional[List[Optional[torch.Tensor]]] = None,
  902. closure: Optional[Callable[[], float]] = None,
  903. **kwargs: Any,
  904. ) -> Optional[float]:
  905. r"""
  906. Performs a single optimizer step without syncing parameters across
  907. ranks.
  908. Arguments:
  909. gradients (list[Optional[torch.Tensor]], optional): a :class:`list`
  910. of length equal to the number of parameters assigned to this
  911. rank containing gradient tensors or ``None`` as its elements;
  912. a ``None`` in the :class:`list` indicates that the
  913. corresponding parameter should not be updated.
  914. If the argument itself is ``None``, then all parameters are
  915. updated, and the gradients are assumed to be already populated.
  916. (default: ``None``)
  917. closure (Callable): a closure that re-evaluates the model and
  918. returns the loss; optional for most optimizers and should be
  919. ``None`` if ``gradients`` is not ``None``; (default: ``None``)
  920. Returns:
  921. Optional loss depending on the underlying local optimizer.
  922. .. warning::
  923. The argument ``gradients`` should only be specified (i.e. not
  924. ``None``) if ``overlap_with_ddp=True``, in which case
  925. :class:`ZeroRedundancyOptimizer` wraps a functional optimizer.
  926. """
  927. Join.notify_join_context(self)
  928. # Check if the model trainability has changed
  929. is_trainable_mask = self._get_is_trainable_mask()
  930. if is_trainable_mask != self._is_trainable_mask:
  931. if self._overlap_with_ddp:
  932. raise RuntimeError(
  933. "ZeroRedundancyOptimizer with `overlap_with_ddp=True` "
  934. "does not support changing parameter trainability at run "
  935. "time"
  936. )
  937. logger.warning(
  938. "ZeroRedundancyOptimizer detected that the trainable "
  939. "parameters changed; rebuilding the parameter buckets if "
  940. "enabled"
  941. )
  942. self._build_param_buckets()
  943. self._is_trainable_mask = is_trainable_mask
  944. # Sync the exposed `param_groups` attributes to the local optimizer in
  945. # case they have been updated
  946. self._sync_param_groups(self.param_groups, self.optim.param_groups)
  947. # Run the optimizer step on this shard only
  948. if gradients is None:
  949. loss = (
  950. self.optim.step(**kwargs)
  951. if closure is None
  952. else self.optim.step(closure=closure, **kwargs)
  953. )
  954. else:
  955. assert self._overlap_with_ddp, (
  956. "Specifying `gradients` should not "
  957. "be used when `overlap_with_ddp=False`"
  958. )
  959. assert closure is None, (
  960. "`closure` is not supported when using " "a local functional optimizer"
  961. )
  962. loss = self.optim.step(gradients=gradients)
  963. # Sync any updated attributes in the local optimizer to the exposed
  964. # `param_groups`
  965. self._sync_param_groups(self.optim.param_groups, self.param_groups)
  966. return loss
  967. def step(
  968. self,
  969. closure: Optional[Callable[[], float]] = None,
  970. **kwargs: Any,
  971. ) -> Optional[float]:
  972. r"""
  973. Performs a single optimizer step and syncs parameters across all ranks.
  974. Arguments:
  975. closure (Callable): a closure that re-evaluates the model and
  976. returns the loss; optional for most optimizers.
  977. Returns:
  978. Optional loss depending on the underlying local optimizer.
  979. .. note: Any extra parameters are passed to the base optimizer as-is.
  980. """
  981. if self._overlap_with_ddp:
  982. logger.warning(
  983. "`step()` should not be included in the training loop when "
  984. "`overlap_with_ddp=True`"
  985. )
  986. return None
  987. # Perform the local optimizer step
  988. loss = self._local_step(closure=closure, **kwargs)
  989. # Sync all of the updated parameter shards across the ranks
  990. self._sync_params()
  991. return loss
  992. def join_hook(self, **kwargs):
  993. r"""
  994. Returns the ZeRO join hook, which enables training on uneven inputs by
  995. shadowing the collective communications in the optimizer step.
  996. Gradients must be properly set before this hook is called.
  997. Arguments:
  998. kwargs (dict): a :class:`dict` containing any keyword arguments
  999. to modify the behavior of the join hook at run time; all
  1000. :class:`Joinable` instances sharing the same join context
  1001. manager are forwarded the same value for ``kwargs``.
  1002. This hook does not support any keyword arguments; i.e. ``kwargs`` is
  1003. unused.
  1004. """
  1005. return _ZeROJoinHook(self)
  1006. @property
  1007. def join_device(self) -> torch.device:
  1008. return self._default_device
  1009. @property
  1010. def join_process_group(self) -> Any:
  1011. return self.process_group
  1012. def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
  1013. r"""
  1014. Load the state pertaining to the given rank from the input
  1015. ``state_dict``, updating the local optimizer as needed.
  1016. Arguments:
  1017. state_dict (dict): optimizer state; should be an object returned
  1018. from a call to :meth:`state_dict`.
  1019. Raises:
  1020. RuntimeError: if ``overlap_with_ddp=True`` and this method is
  1021. called before this :class:`ZeroRedundancyOptimizer` instance
  1022. has been fully initialized, which happens once
  1023. :class:`DistributedDataParallel` gradient buckets have been
  1024. rebuilt.
  1025. """
  1026. self._check_overlap_initialized()
  1027. for index, value in state_dict["state"].items():
  1028. param = self._index_to_param[index]
  1029. if self._param_to_rank[param] != self.rank:
  1030. # Clear any state irrelevant to this rank
  1031. state_dict["state"][index] = None
  1032. else:
  1033. # Load the parameter state to the local optimizer
  1034. self.optim.state[param] = _recursive_copy_to_device(
  1035. value, non_blocking=True, device=param.device
  1036. )
  1037. # Force zero-dimensional tensors (like Adam "step") on CPU
  1038. for state_name, state_value in self.optim.state[param].items():
  1039. if torch.is_tensor(state_value) and state_value.dim() == 0:
  1040. self.optim.state[param][state_name] = state_value.cpu()
  1041. super().load_state_dict(state_dict)
  1042. # Sync the input state with the exposed and local optimizer states
  1043. self._sync_param_groups(state_dict["param_groups"], self.param_groups)
  1044. self._sync_param_groups(self.param_groups, self.optim.param_groups)
  1045. def state_dict(self) -> Dict[str, Any]:
  1046. r"""
  1047. Returns the last global optimizer state known to this rank.
  1048. .. warning:
  1049. If the state has not been consolidated to this rank, this raises a
  1050. runtime error, and even if it has, the state may not be up-to-date,
  1051. depending on when :meth:`consolidate_state_dict` was last called.
  1052. Raises:
  1053. RuntimeError: if ``overlap_with_ddp=True`` and this method is
  1054. called before this :class:`ZeroRedundancyOptimizer` instance
  1055. has been fully initialized, which happens once
  1056. :class:`DistributedDataParallel` gradient buckets have been
  1057. rebuilt; or if this method is called without a preceding call
  1058. to :meth:`consolidate_state_dict`.
  1059. """
  1060. self._check_overlap_initialized()
  1061. if len(self._all_state_dicts) == 0:
  1062. raise RuntimeError(
  1063. "Optimizer state has not been consolidated on this rank. "
  1064. f"Please call `consolidate_state_dict(to={self.rank})` on "
  1065. "all ranks beforehand if you meant to save the global state."
  1066. )
  1067. # Get the possibly-stale global optimizer state that uses global
  1068. # parameter indexing
  1069. state_dict = super().state_dict()
  1070. # Update the global optimizer state with local state information,
  1071. # factoring in the translation from local to global indexing
  1072. for rank, local_state_dict in enumerate(self._all_state_dicts):
  1073. local_param_groups = local_state_dict["param_groups"]
  1074. global_param_groups = self._partition_parameters()[rank]
  1075. assert len(local_param_groups) == len(
  1076. global_param_groups
  1077. ), "Mismatch between number of local and global parameter groups"
  1078. for local_param_group, global_param_group in zip(
  1079. local_param_groups, global_param_groups
  1080. ):
  1081. # `local_param_group` stores local indices, while
  1082. # `global_param_group` stores the tensors directly
  1083. local_param_indices = local_param_group["params"]
  1084. global_params = global_param_group["params"]
  1085. assert len(local_param_indices) == len(
  1086. global_params
  1087. ), "Mismatch between number of local and global parameters in parameter group"
  1088. for local_param_index, global_param in zip(
  1089. local_param_indices, global_params
  1090. ):
  1091. # Update the global parameter state, if any
  1092. if local_param_index in local_state_dict["state"]:
  1093. global_param_index = self._param_to_index[global_param]
  1094. state_dict["state"][global_param_index] = local_state_dict[
  1095. "state"
  1096. ][local_param_index]
  1097. # Sort the parameters in the state
  1098. state_dict["state"] = dict(sorted(state_dict["state"].items()))
  1099. return state_dict
  1100. @staticmethod
  1101. def _sync_param_groups(
  1102. src_param_groups: List[Dict[Any, Any]],
  1103. dst_param_groups: List[Dict[Any, Any]],
  1104. ) -> None:
  1105. r"""
  1106. Syncs the attributes from the source parameter groups to the
  1107. destination parameter groups.
  1108. Example attributes include learning rate or scheduler attributes. The
  1109. two parameter groups should have the same length (i.e. same number of
  1110. parameter groups).
  1111. Arguments:
  1112. src_param_groups (list[dict]): parameter groups giving the
  1113. attribute settings to copy.
  1114. dst_param_groups (list[dict]): parameter groups giving the
  1115. attribute settings to set.
  1116. """
  1117. assert len(src_param_groups) == len(
  1118. dst_param_groups
  1119. ), "Mismatch between number of source and destination parameter groups"
  1120. for src_param_group, dst_param_group in zip(src_param_groups, dst_param_groups):
  1121. # Sync all attributes except the parameters
  1122. for attr in filter(lambda x: x != "params", src_param_group.keys()):
  1123. dst_param_group[attr] = src_param_group[attr]
  1124. def _build_param_buckets(self) -> None:
  1125. r"""
  1126. Builds parameter buckets if ``parameters_as_bucket_view=True`` so
  1127. that for each device that stores this rank's parameters, there is a
  1128. bucket (represented as a tensor) containing all of the parameters on
  1129. that device that are assigned to a given rank in the parameter update
  1130. partition.
  1131. This method is called in the constructor and any time parameter
  1132. trainability is changed.
  1133. .. warning::
  1134. The current implementation assumes that all of the parameters in a
  1135. bucket are of the same dense type when allocating the bucket's
  1136. tensor.
  1137. .. warning::
  1138. If the model parameters are stored across more than one device,
  1139. then the storage partitioning must be the same across all
  1140. processes in order for parameter synchronization to work.
  1141. """
  1142. if not self.parameters_as_bucket_view or self._overlap_with_ddp:
  1143. return
  1144. # `self._buckets[i][j]` are the parameters stored on device i and
  1145. # assigned to rank j
  1146. num_devices = len(self._device_to_params_per_rank)
  1147. self._buckets = [[] for _ in range(num_devices)] # type: ignore[assignment]
  1148. for dev_i, (device, params_per_rank) in enumerate(
  1149. self._device_to_params_per_rank.items()
  1150. ):
  1151. for params in params_per_rank:
  1152. bucket_size = 0
  1153. dtype = None
  1154. trainable_params = []
  1155. for param in params:
  1156. if not _is_trainable(param):
  1157. # Clone in case the parameter was previously part of
  1158. # a bucket to avoid the data from being destroyed
  1159. param.data = param.data.detach().clone()
  1160. else:
  1161. bucket_size += param.numel()
  1162. trainable_params.append(param)
  1163. dtype = param.dtype # assumes all same dtype
  1164. if bucket_size == 0:
  1165. # Create a dummy bucket if there are no parameters
  1166. bucket = torch.zeros(1, device=device)
  1167. else:
  1168. # Construct the bucket (assuming all dense and same dtype)
  1169. bucket = torch.empty(bucket_size, dtype=dtype, device=device)
  1170. offset = 0
  1171. for param in trainable_params:
  1172. offset_next = offset + param.numel()
  1173. bucket[offset:offset_next].copy_(param.data.flatten())
  1174. param.data = bucket[offset:offset_next].view_as(param.data)
  1175. offset = offset_next
  1176. self._buckets[dev_i].append(bucket) # type: ignore[arg-type]
  1177. def _build_ddp_param_buckets(self) -> None:
  1178. r"""
  1179. For each DDP bucket with parameters assigned to this rank, flattens the
  1180. data of those parameters into a single tensor and saves the tensor to
  1181. the ``tensor`` attribute in the corresponding
  1182. :class:`_DDPBucketAssignment` instance stored in
  1183. ``self._bucket_assignments_per_rank``.
  1184. :class:`DistributedDataParallel` guarantees that the parameters
  1185. corresponding to a gradient bucket have the same device and the same
  1186. dtype.
  1187. """
  1188. for bucket_assignments in self._bucket_assignments_per_rank:
  1189. for bucket_assignment in bucket_assignments.values():
  1190. params = bucket_assignment.parameters
  1191. bucket_size = 0
  1192. dtype = None
  1193. for param in params:
  1194. assert _is_trainable(param), (
  1195. "Model parameter "
  1196. "corresponding to a gradient in a DDP bucket should "
  1197. "require a gradient"
  1198. )
  1199. bucket_size += param.numel()
  1200. dtype = param.dtype # assumes all same dtype
  1201. assert bucket_size > 0, "Empty bucket"
  1202. # Construct the bucket tensor (assuming all dense and same dtype)
  1203. tensor = torch.empty(
  1204. bucket_size, dtype=dtype, device=bucket_assignment.device
  1205. )
  1206. offset = 0
  1207. for param in params:
  1208. offset_next = offset + param.numel()
  1209. tensor[offset:offset_next].copy_(param.data.flatten())
  1210. param.data = tensor[offset:offset_next].view_as(param.data)
  1211. offset = offset_next
  1212. bucket_assignment.tensor = tensor
  1213. def _verify_and_init_params(
  1214. self,
  1215. params: Any,
  1216. ) -> Union[List[torch.Tensor], List[dict]]:
  1217. r"""
  1218. Verifies the type of ``params`` and initializes ``self._all_params``
  1219. as a :class:`list` of all parameters if ``params`` is valid.
  1220. Arguments:
  1221. params (Any): Candidate parameter list or parameter groups to
  1222. verify.
  1223. Raises:
  1224. TypeError: ``params`` has an invalid type.
  1225. ValueError: ``params`` is empty.
  1226. Returns:
  1227. The persistent form of ``params`` to be passed into the parent
  1228. :class:`Optimizer` constructor -- i.e. returns ``params`` as a
  1229. :class:`list` to ensure that it can be iterated over again.
  1230. """
  1231. if isinstance(params, torch.Tensor):
  1232. raise TypeError(
  1233. "`params` argument should be an iterable of "
  1234. f"Tensors, but got {torch.typename(params)}"
  1235. )
  1236. try:
  1237. all_params = list(params)
  1238. except TypeError as e:
  1239. raise TypeError(
  1240. "`params` argument should be an iterable of Tensors"
  1241. f" or dicts, but got {torch.typename(params)}"
  1242. ) from e
  1243. if len(all_params) == 0:
  1244. raise ValueError("ZeroRedundancyOptimizer got an empty parameter " "list")
  1245. all_tensors = True
  1246. all_dicts = True
  1247. for param in all_params:
  1248. all_tensors &= isinstance(param, torch.Tensor)
  1249. all_dicts &= isinstance(param, dict)
  1250. if not all_tensors and not all_dicts:
  1251. raise TypeError(
  1252. "`params` argument should be an iterable of " "Tensors or dicts"
  1253. )
  1254. # Ensure that `self._all_params` contains a list of all parameters
  1255. if all_tensors:
  1256. self._all_params = all_params
  1257. elif all_dicts:
  1258. self._all_params = []
  1259. # `all_params` contains parameter groups (not parameters)
  1260. for param_group in all_params:
  1261. if "params" not in param_group:
  1262. raise ValueError(
  1263. "Each parameter group passed-in via `params` must "
  1264. "have a 'params' key mapping to the parameters in "
  1265. "the group"
  1266. )
  1267. self._all_params.extend(param_group["params"])
  1268. return all_params
  1269. def _verify_same_dense_param_type(self) -> None:
  1270. r"""
  1271. Verifies that all parameters are of the same dense type.
  1272. The method assumes that ``self._all_params`` has been initialized
  1273. and is non-empty.
  1274. Raises:
  1275. ValueError: ``params`` contains sparse parameters or parameters
  1276. of varying dense types.
  1277. NOTE: This method can be removed once support for sparse parameters
  1278. and varying parameter types is added.
  1279. """
  1280. typename = torch.typename(self._all_params[0])
  1281. if self._all_params[0].is_sparse:
  1282. raise ValueError(
  1283. "ZeroRedundancyOptimizer only supports using "
  1284. "the same dense type for all parameters but got "
  1285. f"{typename}"
  1286. )
  1287. for param in self._all_params[1:]:
  1288. other_typename = torch.typename(param)
  1289. if other_typename != typename:
  1290. raise ValueError(
  1291. "ZeroRedundancyOptimizer only supports "
  1292. "using the same dense type for all "
  1293. f"parameters but got both {typename} and "
  1294. f"{other_typename}"
  1295. )
  1296. def _get_is_trainable_mask(self) -> List[bool]:
  1297. r"""
  1298. Returns a boolean mask indicating if each parameter is trainable
  1299. (``requires_grad``) or not.
  1300. """
  1301. return list(map(_is_trainable, self._all_params))
  1302. def _init_local_optimizer(self) -> None:
  1303. r"""
  1304. Initializes this rank's local optimizer, responsible for its subset of
  1305. the parameters.
  1306. The local optimizer is saved in ``self.optim``.
  1307. """
  1308. assert (
  1309. self._optim_constructor is not None
  1310. ), "The local optimizer class has not been set"
  1311. param_groups = self._partition_parameters()[self.rank]
  1312. # `overlap_with_ddp=True` requires a local functional optimizer
  1313. if self._overlap_with_ddp:
  1314. # Functional optimizers only support a single parameter group and
  1315. # require passing in the parameters as a list
  1316. assert len(param_groups) == 1, (
  1317. "Initializing the local "
  1318. "functional optimizer with more than one parameter group"
  1319. )
  1320. params = param_groups[0]["params"]
  1321. # Try to pass `_allow_empty_param_list=True` to avoid erroring
  1322. if (
  1323. "_allow_empty_param_list"
  1324. in inspect.signature(self._optim_constructor).parameters
  1325. ):
  1326. self.optim: Any = self._optim_constructor(
  1327. params, **self._optim_defaults, _allow_empty_param_list=True
  1328. )
  1329. else:
  1330. logger.warning(
  1331. f"{self._optim_constructor} does not support the argument "
  1332. "`_allow_empty_param_list`; ZeroRedundancyOptimizer may "
  1333. "error due to an empty parameter list"
  1334. )
  1335. self.optim: Any = self._optim_constructor(params, **self._optim_defaults) # type: ignore[no-redef]
  1336. # Log information about the DDP and ZeRO bucketing
  1337. if dist.get_debug_level() != dist.DebugLevel.OFF:
  1338. local_numel = sum(p.numel() for p in params)
  1339. num_assigned_buckets = len(
  1340. self._bucket_assignments_per_rank[self.global_rank]
  1341. )
  1342. logger.info(
  1343. f"rank {self.global_rank} with {local_numel} parameters "
  1344. f"across {num_assigned_buckets} buckets"
  1345. )
  1346. if self.global_rank == 0:
  1347. logger.info(
  1348. f"{len(self._overlap_info.params_per_bucket)} DDP "
  1349. f"buckets and "
  1350. f"{self._overlap_info.num_bucket_assignments} bucket "
  1351. "assignments"
  1352. )
  1353. else:
  1354. # NOTE: Passing `param_groups` into the local optimizer constructor
  1355. # bypasses the empty parameter list check
  1356. self.optim: Optimizer = self._optim_constructor(param_groups, **self._optim_defaults) # type: ignore[no-redef]
  1357. # TODO: Manually add `self.param_groups` if using a functional
  1358. # optimizer; remove this if/when the functional optimizers support
  1359. # multiple parameter groups
  1360. if self._overlap_with_ddp and not hasattr(self.optim, "param_groups"):
  1361. assert hasattr(self.optim, "param_group"), (
  1362. "The functional optimizer should set at least one of the "
  1363. "attributes `param_group` or `param_groups`"
  1364. )
  1365. self.optim.param_groups = [self.optim.param_group] # type: ignore[attr-defined]
  1366. self._sync_param_groups(self.optim.param_groups, self.param_groups)
  1367. def _init_zero_for_overlap(self) -> None:
  1368. r"""
  1369. Performs a delayed initialization of the local optimizer and the
  1370. supporting data structures.
  1371. """
  1372. assert self._overlap_with_ddp, (
  1373. "`_init_zero_for_overlap()` should only be called when "
  1374. "`overlap_with_ddp=True`"
  1375. )
  1376. self._overlap_info.status = _OverlapStatus.INITIALIZED
  1377. self._clear_cache()
  1378. self._partition_parameters(self._overlap_info.params_per_rank)
  1379. self._build_ddp_param_buckets()
  1380. self._init_local_optimizer()
  1381. def _get_assigned_rank(self, bucket_index: int) -> int:
  1382. r"""
  1383. Returns the single rank assigned to a :class:`DistributedDataParallel`
  1384. gradient bucket.
  1385. Arguments:
  1386. bucket_index (int): index of the :class:`DistributedDataParallel`
  1387. bucket for which to get the assigned rank.
  1388. """
  1389. assert not self._overlap_info.shard_buckets, (
  1390. "The bucket assignment requires global bucket information and "
  1391. "will be computed later; there should be no need to use this "
  1392. "method"
  1393. )
  1394. return bucket_index % self.world_size
  1395. def _check_overlap_initialized(self):
  1396. r"""
  1397. Checks that the delayed initialization has occurred (see
  1398. :meth:`_init_zero_for_overlap`) if ``overlap_with_ddp=True``, and
  1399. raises a ``RuntimeError`` if not. This should preface methods that
  1400. should not be run before that delayed initialization.
  1401. Raises:
  1402. RuntimeError: if ``overlap_with_ddp=True`` and
  1403. :meth:`_init_zero_for_overlap` has not been called.
  1404. """
  1405. if (
  1406. self._overlap_with_ddp
  1407. and self._overlap_info.status != _OverlapStatus.INITIALIZED
  1408. ):
  1409. raise RuntimeError(
  1410. "This method should not be called until this "
  1411. "ZeroRedundancyOptimizer instance has been fully "
  1412. "initialized"
  1413. )
  1414. def _get_optimizer_constructor(self, optimizer_class: Any) -> Any:
  1415. r"""
  1416. Returns the proper optimizer constructor, performing the necessary
  1417. validation and transformation depending on ``overlap_with_ddp``.
  1418. Returns:
  1419. - ``optimizer_class`` if ``overlap_with_ddp=False`` and
  1420. ``optimizer_class`` is not a functional optimizer.
  1421. - ``optimizer_class`` if ``overlap_with_ddp=True`` and
  1422. ``optimizer_class`` is already a functional optimizer.
  1423. - The functional equivalent of ``optimizer_class`` if
  1424. ``overlap_with_ddp=True`` and ``optimizer_class`` is not
  1425. already a functional optimizer (assuming the equivalent
  1426. exists).
  1427. Raises:
  1428. ValueError:
  1429. - if ``overlap_with_ddp=True`` but ``optimizer_class`` is
  1430. neither a functional optimizer nor translatable to a
  1431. functional optimizer.
  1432. - if ``overlap_with_ddp=False`` and ``optimizer_class`` is a
  1433. functional optimizer.
  1434. """
  1435. functional_optims = functional_optim_map.values()
  1436. if not self._overlap_with_ddp:
  1437. if optimizer_class in functional_optims:
  1438. # Using a functional optimizer is only supported when
  1439. # `overlap_with_ddp=True`
  1440. raise ValueError(
  1441. f"Passing in a functional optimizer {optimizer_class} "
  1442. "when `overlap_with_ddp=False`"
  1443. )
  1444. else:
  1445. return optimizer_class
  1446. else:
  1447. if optimizer_class in functional_optims:
  1448. # Already a functional optimizer
  1449. return optimizer_class
  1450. elif optimizer_class in functional_optim_map:
  1451. # Translate the passed-in optimizer class to its functional
  1452. # equivalent if `overlap_with_ddp=True`
  1453. optim_constructor = functional_optim_map[optimizer_class]
  1454. logger.info(
  1455. f"Using the functional optimizer {optim_constructor} "
  1456. f"instead of {optimizer_class} since "
  1457. "`overlap_with_ddp=True`"
  1458. )
  1459. return optim_constructor
  1460. else:
  1461. raise ValueError(
  1462. "Using `ddp_with_overlap=True` requires using a "
  1463. "functional optimizer, but there is no supported functional "
  1464. f"optimizer equivalent for {optimizer_class}"
  1465. )