optimizer.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332
  1. # Copyright (c) Meta Platforms, Inc. and affiliates
  2. import copy
  3. import dataclasses
  4. from typing import Dict, List, Optional, Sequence, Tuple, Union, cast
  5. from torch.distributed.checkpoint.planner import LoadPlan
  6. import torch
  7. import torch.distributed as dist
  8. from torch.distributed._shard.sharded_tensor.api import ShardedTensor
  9. from torch.distributed._shard.sharded_tensor.metadata import TensorProperties
  10. from torch.distributed._shard.sharded_tensor.shard import Shard
  11. from torch.distributed._shard.sharding_spec.chunk_sharding_spec import (
  12. ChunkShardingSpec,
  13. )
  14. import torch.distributed.checkpoint as dist_cp
  15. from torch.distributed.checkpoint.metadata import (
  16. BytesStorageMetadata,
  17. Metadata,
  18. MetadataIndex,
  19. STATE_DICT_TYPE,
  20. TensorStorageMetadata,
  21. )
  22. from torch.distributed.checkpoint.planner_helpers import (
  23. _create_sharded_read_items,
  24. _create_read_items,
  25. )
  26. from torch.distributed.remote_device import _remote_device
  27. from torch.distributed._tensor import DTensor
  28. from torch.distributed.checkpoint.default_planner import (
  29. DefaultLoadPlanner,
  30. )
  31. from torch.distributed._shard.api import _shard_tensor
  32. from torch.distributed.checkpoint._nested_dict import unflatten_state_dict
  33. from torch.distributed.checkpoint.utils import (
  34. _element_wise_add,
  35. _element_wise_sub,
  36. )
  37. STATE_DICT_2D_LAYOUT = Dict[str, Tuple[Optional[Sequence[int]], Sequence[int]]]
  38. # TODO: Update docstrings for optimizer.py
  39. __all__ = [
  40. "load_sharded_optimizer_state_dict",
  41. ]
  42. def _gen_rank_device(global_rank: int) -> str:
  43. if torch.cuda.is_available():
  44. return f"cuda:{global_rank % torch.cuda.device_count()}"
  45. return "cpu"
  46. def _create_colwise_spec(
  47. pg: Optional[dist.ProcessGroup] = None,
  48. ) -> ChunkShardingSpec:
  49. if pg is None:
  50. placements = [
  51. f"rank:{idx}/{_gen_rank_device(idx)}"
  52. for idx in range(dist.get_world_size())
  53. ]
  54. else:
  55. placements = [
  56. f"rank:{idx}/{_gen_rank_device(dist.get_global_rank(pg, idx))}"
  57. for idx in range(pg.size())
  58. ]
  59. return ChunkShardingSpec(
  60. dim=0,
  61. placements=cast(List[Union[_remote_device, str]], placements),
  62. )
  63. def _is_nested_tensor(val: torch.Tensor) -> bool:
  64. if type(val) is ShardedTensor:
  65. if len(val.local_shards()) == 0:
  66. return False
  67. if type(val.local_shards()[0].tensor) is ShardedTensor:
  68. return True
  69. if type(val.local_shards()[0].tensor) is DTensor:
  70. raise ValueError(
  71. "Cannot handle DTensor nested insided ShardedTensor"
  72. )
  73. elif type(val) is DTensor and (
  74. type(val._local_tensor) is DTensor
  75. or type(val._local_tensor) is ShardedTensor
  76. ):
  77. raise ValueError("Cannot handle nested DTensor")
  78. return False
  79. def _alloc_tensor(props: TensorProperties, size: Sequence[int]) -> torch.Tensor:
  80. return torch.empty(
  81. size=size,
  82. dtype=props.dtype,
  83. layout=props.layout,
  84. requires_grad=props.requires_grad,
  85. pin_memory=props.pin_memory,
  86. device=cast(torch.device, torch.cuda.current_device()),
  87. )
  88. def _get_state_dict_2d_layout(
  89. state_dict: STATE_DICT_TYPE,
  90. ) -> Tuple[STATE_DICT_2D_LAYOUT, Optional[dist.ProcessGroup]]:
  91. """
  92. We have to load the right TP slice of the optimizer state.
  93. This is not easy since the per-tensor slicing can't be inferred from checkpoint metadata.
  94. We take advantage of the model state_dict producing a sliced ST to figure out what we need to load.
  95. This is pretty fragile and it might be easier for FSDP to compute this info for us.
  96. Returns a dictionary where keys are the same of the state_dict and the value is a tuple of
  97. (offset, size) for the current rank TP slice.
  98. N.B. The state_dict *MUST* come from FSDP.sharded_state_dict.
  99. """
  100. specs: STATE_DICT_2D_LAYOUT = {}
  101. dp_pg: Optional[dist.ProcessGroup] = None
  102. for key, value in state_dict.items():
  103. specs[key] = (None, value.size())
  104. if _is_nested_tensor(value):
  105. assert (
  106. len(value.local_shards()) == 1
  107. ), "Cannot handle ST with multiple shards"
  108. assert isinstance(
  109. value, ShardedTensor
  110. ), "Can only handle nested ShardedTensor"
  111. shard = value.local_shards()[0]
  112. specs[key] = (
  113. shard.metadata.shard_offsets,
  114. shard.metadata.shard_sizes,
  115. )
  116. dp_pg = shard.tensor._process_group # type: ignore[attr-defined]
  117. return (
  118. specs,
  119. dp_pg,
  120. )
  121. class _ReaderWithOffset(DefaultLoadPlanner):
  122. translation: Dict[MetadataIndex, MetadataIndex]
  123. state_dict: STATE_DICT_TYPE
  124. metadata: Metadata
  125. def __init__(self, fqn_to_offset: Dict[str, Sequence[int]]) -> None:
  126. super().__init__()
  127. self.fqn_to_offset = fqn_to_offset
  128. self.metadata = Metadata({})
  129. self.state_dict = {}
  130. self.translation = {}
  131. def create_local_plan(self) -> LoadPlan:
  132. requests = []
  133. self.translation = {}
  134. for fqn, obj in self.state_dict.items():
  135. md = self.metadata.state_dict_metadata[fqn]
  136. if not isinstance(obj, ShardedTensor):
  137. requests += _create_read_items(fqn, md, obj)
  138. continue
  139. if fqn not in self.fqn_to_offset:
  140. requests += _create_read_items(fqn, md, obj)
  141. continue
  142. offset = self.fqn_to_offset[fqn]
  143. assert len(obj.local_shards()) == 1
  144. original_shard = obj.local_shards()[0]
  145. shard_md = copy.deepcopy(original_shard.metadata)
  146. shard_md.shard_offsets = _element_wise_add(
  147. shard_md.shard_offsets, offset
  148. )
  149. local_shards = [Shard(original_shard.tensor, shard_md)]
  150. reqs = _create_sharded_read_items(
  151. fqn, cast(TensorStorageMetadata, md), local_shards
  152. )
  153. # TODO: The WriteItems will have a displaced MetadataIndex, fix it.
  154. # TODO: we should change _create_sharded_read_items to have more ergonomic API
  155. for wi in reqs:
  156. assert wi.dest_index.offset is not None
  157. original_offset = _element_wise_sub(
  158. wi.dest_index.offset, offset
  159. )
  160. original_index = dataclasses.replace(
  161. wi.dest_index, offset=torch.Size(original_offset)
  162. )
  163. self.translation[wi.dest_index] = original_index
  164. requests += reqs
  165. return LoadPlan(requests)
  166. def lookup_tensor(self, index: MetadataIndex) -> torch.Tensor:
  167. return super().lookup_tensor(self.translation.get(index, index))
  168. def load_sharded_optimizer_state_dict(
  169. model_state_dict: STATE_DICT_TYPE,
  170. optimizer_key: str,
  171. storage_reader: dist_cp.StorageReader,
  172. ) -> STATE_DICT_TYPE:
  173. """
  174. Loads a state_dict to be used in conjuntion with FSDP sharded optimizer state.
  175. This is the current recommended way to checkpoint is FSDP
  176. >>> # xdoctest: +SKIP
  177. >>> import torch.distributed.checkpoint as dist_cp
  178. >>> # Save
  179. >>> model: torch.nn.Model
  180. >>> optim_params = model.parameters()
  181. >>> optim = torch.optim.SGD(optim_params, lr=0.01)
  182. >>>
  183. >>> with FSDP.state_dict_type(model, StateDictType.SHARDED_STATE_DICT):
  184. >>> state_dict = {
  185. >>> "optimizer": FSDP.sharded_optim_state_dict(model, optim, optim_params),
  186. >>> "model": model.state_dict()
  187. >>> }
  188. >>> dist_cp.save_state_dict(
  189. >>> state_dict=optim_state,
  190. >>> storage_writer=dist_cp.FileSystemWriter("checkpoint"),
  191. >>> planner=dist_cp.DefaultSavePlanner(),
  192. >>> )
  193. >>>
  194. >>> # Load
  195. >>> with FSDP.state_dict_type(model_tp, StateDictType.SHARDED_STATE_DICT):
  196. >>> model_state_dict = model_tp.state_dict()
  197. >>> checkpoint = {
  198. >>> "model": model_state_dict
  199. >>> }
  200. >>> dist_cp.load_state_dict(
  201. >>> state_dict=checkpoint,
  202. >>> storage_reader=dist_cp.FileSystemReader(checkpoint_file),
  203. >>> planner=dist_cp.DefaultLoadPlanner(),
  204. >>> )
  205. >>> model.load_state_dict(checkpoint["model_state"])
  206. >>>
  207. >>> optim_state = sp_cp.load_sharded_optimizer_state_dict(
  208. >>> model_state_dict,
  209. >>> optimizer_key="optimizer",
  210. >>> storage_reader=dist_cp.FileSystemReader("checkpoint"),
  211. >>> )
  212. >>>
  213. >>> flattened_osd = FSDP.flatten_sharded_optim_state_dict(
  214. >>> optim_state["optimizer"], model, optim
  215. >>> )
  216. >>>
  217. >>> optim.load_state_dict(flattened_osd)
  218. """
  219. metadata = storage_reader.read_metadata()
  220. layout_specs, dp_pg = _get_state_dict_2d_layout(model_state_dict)
  221. if dp_pg is None:
  222. sharding_spec = ChunkShardingSpec(
  223. dim=0,
  224. placements=[
  225. f"rank:{i}/cuda:{i}" for i in range(dist.get_world_size())
  226. ],
  227. )
  228. else:
  229. sharding_spec = _create_colwise_spec(dp_pg)
  230. # Create a state_dict for optimizer state
  231. state_dict: STATE_DICT_TYPE = {}
  232. fqn_to_offset: Dict[str, Sequence[int]] = {}
  233. for key, value in metadata.state_dict_metadata.items():
  234. key_path = metadata.planner_data[key]
  235. if key_path[0] != optimizer_key:
  236. continue
  237. if isinstance(value, BytesStorageMetadata):
  238. state_dict[key] = "<bytes_io>"
  239. continue
  240. # value: TensorStorageMetadata
  241. if value.size.numel() == 1:
  242. state_dict[key] = _alloc_tensor(value.properties, value.size)
  243. elif dp_pg is None:
  244. state_dict[key] = _shard_tensor(
  245. _alloc_tensor(value.properties, value.size), sharding_spec
  246. )
  247. else:
  248. spec_key = key_path[2]
  249. alloc_size = layout_specs.get(spec_key, (None, value.size))[1]
  250. st_md = sharding_spec.build_metadata(
  251. torch.Size(alloc_size), value.properties
  252. )
  253. local_shards = []
  254. current_rank = dist.get_rank(dp_pg)
  255. for shard_md in st_md.shards_metadata:
  256. if (
  257. cast(_remote_device, shard_md.placement).rank()
  258. != current_rank
  259. ):
  260. continue
  261. local_shards.append(
  262. Shard(
  263. tensor=_alloc_tensor(
  264. value.properties, shard_md.shard_sizes
  265. ),
  266. metadata=shard_md,
  267. )
  268. )
  269. st = ShardedTensor._init_from_local_shards_and_global_metadata(
  270. local_shards, st_md, process_group=dp_pg
  271. )
  272. if (
  273. spec_key in layout_specs
  274. and layout_specs[spec_key][0] is not None
  275. ):
  276. fqn_to_offset[key] = cast(
  277. Sequence[int], layout_specs[spec_key][0]
  278. )
  279. state_dict[key] = st
  280. # Whether we unflatten before or after doesn't matter
  281. dist_cp.load_state_dict(
  282. state_dict=state_dict,
  283. storage_reader=storage_reader,
  284. # FIXME the type of planner is wrong in load_state_dict
  285. planner=_ReaderWithOffset(fqn_to_offset) if dp_pg is not None else None,
  286. )
  287. state_dict = unflatten_state_dict(state_dict, metadata.planner_data)
  288. return state_dict