functional.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440
  1. import torch
  2. import torch.distributed as dist
  3. from torch.autograd import Function
  4. # The two imports below are not always available depending on the
  5. # USE_DISTRIBUTED compile flag. Make sure they raise import error
  6. # if we're trying to use them.
  7. from torch.distributed import group, ReduceOp
  8. def broadcast(tensor, src, group=group.WORLD):
  9. """
  10. Broadcasts the tensor to the whole group.
  11. ``tensor`` must have the same number of elements in all processes
  12. participating in the collective.
  13. Arguments:
  14. tensor (Tensor): Data to be sent if ``src`` is the rank of current
  15. process.
  16. src (int): Source rank.
  17. group (ProcessGroup, optional): The process group to work on.
  18. Returns:
  19. Tensor: Received tensor from the broadcast op.
  20. """
  21. return _Broadcast.apply(src, group, tensor)
  22. def gather(tensor, dst=0, group=group.WORLD):
  23. """
  24. Gathers a list of tensors in a single process.
  25. Arguments:
  26. tensor (Tensor): Input tensor.
  27. dst (int, optional): Destination rank (default is 0).
  28. group (ProcessGroup, optional): The process group to work on.
  29. Returns:
  30. tuple[Tensor]: List of appropriately-sized tensors with the gathered data.
  31. """
  32. return _Gather.apply(dst, group, tensor)
  33. def scatter(tensors, src=0, group=group.WORLD):
  34. """
  35. Scatters a list of tensors to all processes in a group.
  36. Each process will receive exactly one tensor and store its data in the
  37. ``tensor`` argument.
  38. Arguments:
  39. tensors (list[Tensor]): List of tensors to scatter on the source rank.
  40. Receivers must pass ``None`.
  41. src (int, optional): Source rank (default is 0).
  42. group (ProcessGroup, optional): The process group to work on.
  43. Returns:
  44. Tensor: Output tensor from the scatter operation.
  45. """
  46. return _Scatter.apply(src, group, *tensors)
  47. def reduce(tensor, dst, op=ReduceOp.SUM, group=group.WORLD):
  48. """
  49. Reduces the tensor data across all machines.
  50. Only the process with rank ``dst`` is going to receive the final result.
  51. Arguments:
  52. tensor (Tensor): Input of the collective.
  53. dst (int): Destination rank.
  54. op (optional): One of the values from
  55. ``torch.distributed.ReduceOp``
  56. enum. Specifies an operation used for element-wise reductions.
  57. group (ProcessGroup, optional): The process group to work on.
  58. Returns:
  59. Tensor: Output of the collective.
  60. """
  61. return _Reduce.apply(dst, op, group, tensor)
  62. def reduce_scatter(output, input_list, op=ReduceOp.SUM, group=group.WORLD):
  63. """
  64. Reduces, then scatters a list of tensors to all processes in a group.
  65. Arguments:
  66. output (Tensor): Output tensor.
  67. input_list (list[Tensor]): List of tensors to reduce and scatter.
  68. op (optional): One of the values from
  69. ``torch.distributed.ReduceOp``
  70. enum. Specifies an operation used for element-wise reductions.
  71. group (ProcessGroup, optional): The process group to work on.
  72. Returns:
  73. Tensor: Output of the collective.
  74. """
  75. return _Reduce_Scatter.apply(op, group, output, *input_list)
  76. def all_gather(tensor, group=group.WORLD):
  77. """
  78. Gathers tensors from the whole group in a list.
  79. Arguments:
  80. tensor (Tensor): Tensor to be broadcast from current process.
  81. group (ProcessGroup, optional): The process group to work on.
  82. Returns:
  83. tuple([Tensor]): Output of the collective.
  84. """
  85. return _AllGather.apply(group, tensor)
  86. def _all_gather_base(output_tensor, input_tensor, group=group.WORLD):
  87. """
  88. Single tensor all gather. Gathers a single tensor from all ranks, and puts them in a single output tensor.
  89. Args:
  90. output_tensor (Tensor): Output tensor. It should contain
  91. correctly-sized tensors to be used for output of the collective.
  92. input_tensor (Tensor): Tensor to be broadcast from current process.
  93. group (ProcessGroup, optional): The process group to work on. If None,
  94. the default process group will be used.
  95. Examples:
  96. >>> # All tensors below are of torch.int64 dtype.
  97. >>> # We have 2 process groups, 2 ranks.
  98. >>> # xdoctest: +SKIP("incorrect want text")
  99. >>> output_tensor = torch.zeros(2, dtype=torch.int64)
  100. >>> output_tensor
  101. [tensor([0, 0])] # Rank 0 and 1
  102. >>> tensor = torch.arange(1, dtype=torch.int64) + 1 + rank
  103. >>> tensor
  104. tensor([1]) # Rank 0
  105. tensor([2]) # Rank 1
  106. >>> dist.all_gather_base(output_tensor, tensor)
  107. >>> output_tensor
  108. tensor([1,2]) # Rank 0
  109. tensor([1,2]) # Rank 1
  110. .. warning::
  111. `_all_gather_base` is experimental and subject to change.
  112. It is the caller's responsibility to ensure the output_tensor
  113. is correctly sized.
  114. """
  115. return _AllGatherBase.apply(output_tensor, input_tensor, group)
  116. def all_to_all(output_tensor_list, input_tensor_list, group=group.WORLD):
  117. """
  118. Each process scatters list of input tensors to all processes in a group and
  119. return gathered list of tensors in output list.
  120. Arguments:
  121. output_tensor_list (list[Tensor]): list of tensors to gather one per rank.
  122. input_tensor_list (list[Tensor]): List of tensors to scatter one per rank.
  123. group (ProcessGroup, optional): The process group to work on.
  124. Returns:
  125. tuple([Tensor]): Output of the collective.
  126. """
  127. return _AlltoAll.apply(group, output_tensor_list, *input_tensor_list)
  128. def all_to_all_single(
  129. output,
  130. input,
  131. output_split_sizes=None,
  132. input_split_sizes=None,
  133. group=group.WORLD,
  134. ):
  135. """
  136. Each process splits input tensor and then scatters the split list
  137. to all processes in a group. Then concatenate the received tensors from all
  138. the processes in the group and return single output tensor.
  139. Arguments:
  140. output (Tensor): Gathered concatenated output tensor.
  141. input (Tensor): Input tensor to scatter.
  142. output_split_sizes: (list[Int], optional): Output split sizes for dim 0
  143. if specified None or empty, dim 0 of ``output`` tensor must divide
  144. equally by ``world_size``.
  145. input_split_sizes: (list[Int], optional): Input split sizes for dim 0
  146. if specified None or empty, dim 0 of ``input`` tensor must divide
  147. equally by ``world_size``.
  148. Returns:
  149. Tensor: Output of the collective.
  150. """
  151. return _AlltoAllSingle.apply(
  152. group, output, output_split_sizes, input_split_sizes, input
  153. )
  154. def all_reduce(tensor, op=ReduceOp.SUM, group=group.WORLD):
  155. """
  156. Reduces the tensor data across all machines in such a way that all get
  157. the final result.
  158. After the call the returned tensor is going to be bitwise
  159. identical in all processes.
  160. Arguments:
  161. tensor (Tensor): Input of the collective.
  162. op (optional): One of the values from
  163. ``torch.distributed.ReduceOp``
  164. enum. Specifies an operation used for element-wise reductions.
  165. group (ProcessGroup, optional): The process group to work on.
  166. Returns:
  167. Tensor: Output of the collective
  168. """
  169. return _AllReduce.apply(op, group, tensor)
  170. class _Broadcast(Function):
  171. @staticmethod
  172. def forward(ctx, src, group, tensor):
  173. ctx.src = src
  174. ctx.group = group
  175. ctx.rank = dist.get_rank()
  176. # torch.distributed makes all the calls in place
  177. # we allocate new tensors to avoid this
  178. tensor = tensor.clone()
  179. dist.broadcast(tensor, src, group=group)
  180. return tensor
  181. @staticmethod
  182. def backward(ctx, grad_output):
  183. gx = _Reduce.apply(ctx.src, ReduceOp.SUM, ctx.group, grad_output)
  184. if ctx.src != ctx.rank:
  185. gx.zero_()
  186. return (None, None, gx)
  187. class _Gather(Function):
  188. @staticmethod
  189. def forward(ctx, dst, group, tensor):
  190. ctx.dst = dst
  191. ctx.group = group
  192. # Need to create a list of tensors here to do the
  193. # aggregation, get it from the group size
  194. # tensor should be correctly sized for the method
  195. # gathering
  196. tensor_list = [
  197. torch.zeros_like(tensor) for i in range(dist.get_world_size(group=group))
  198. ]
  199. tensor = tensor.contiguous()
  200. if dist.get_rank(group=group) == dst:
  201. dist.gather(tensor, tensor_list, dst, group=group)
  202. else:
  203. dist.gather(tensor, None, dst, group=group)
  204. return tuple(tensor_list)
  205. @staticmethod
  206. def backward(ctx, *grad_outputs):
  207. return (None, None) + (_Scatter.apply(ctx.dst, ctx.group, *grad_outputs),)
  208. class _Scatter(Function):
  209. @staticmethod
  210. def forward(ctx, src, group, *tensors):
  211. ctx.src = src
  212. ctx.group = group
  213. assert all(t.size() == tensors[0].size() for t in tensors)
  214. output = torch.zeros_like(tensors[0])
  215. if dist.get_rank(group=group) == src:
  216. dist.scatter(output, list(tensors), src, group=group)
  217. else:
  218. dist.scatter(output, None, src, group=group)
  219. return output
  220. @staticmethod
  221. def backward(ctx, grad_output):
  222. return (None, None) + _Gather.apply(ctx.src, ctx.group, grad_output)
  223. class _Reduce(Function):
  224. @staticmethod
  225. def forward(ctx, src, op, group, tensor):
  226. ctx.src = src
  227. ctx.group = group
  228. tensor = tensor.clone()
  229. dist.reduce(tensor, src, op=op, group=group)
  230. return tensor
  231. @staticmethod
  232. def backward(ctx, grad_output):
  233. return (None, None, None) + (_Broadcast.apply(ctx.src, ctx.group, grad_output),)
  234. class _Reduce_Scatter(Function):
  235. @staticmethod
  236. def forward(ctx, op, group, tensor, *input_tensor_list):
  237. ctx.group = group
  238. input_tensor_list = tuple(t.contiguous() for t in input_tensor_list)
  239. dist.reduce_scatter(tensor, list(input_tensor_list), op=op, group=group)
  240. return tensor
  241. @staticmethod
  242. def backward(ctx, grad_output):
  243. return (None, None, None) + _AllGather.apply(ctx.group, grad_output)
  244. class _AllGather(Function):
  245. @staticmethod
  246. def forward(ctx, group, tensor):
  247. # Need contiguous tensors for collectives.
  248. tensor = tensor.contiguous()
  249. ctx.group = group
  250. out_tensor_list = [
  251. torch.empty_like(tensor) for _ in range(dist.get_world_size(group=group))
  252. ]
  253. dist.all_gather(out_tensor_list, tensor, group=group)
  254. return tuple(out_tensor_list)
  255. @staticmethod
  256. def backward(ctx, *grad_outputs):
  257. if dist.get_backend(group=ctx.group) is dist.Backend.NCCL:
  258. rank = dist.get_rank()
  259. gx = torch.empty_like(grad_outputs[rank])
  260. _Reduce_Scatter.apply(ReduceOp.SUM, ctx.group, gx, *grad_outputs)
  261. else:
  262. # As many backends doesn't support ReduceScatter, we use AlltoAll with .sum()
  263. # to emulate the ReduceScatter behavior
  264. tensor_list = [torch.empty_like(tensor) for tensor in grad_outputs]
  265. gxs = _AlltoAll.apply(ctx.group, tensor_list, *grad_outputs)
  266. gx = torch.sum(torch.stack(gxs), dim=0)
  267. return (None, gx)
  268. class _AllGatherBase(Function):
  269. @staticmethod
  270. def forward(ctx, output_tensor, input_tensor, group):
  271. ctx.group = group
  272. dist._all_gather_base(output_tensor, input_tensor.contiguous(), group=group)
  273. return output_tensor
  274. @staticmethod
  275. def backward(ctx, grad_output):
  276. if dist.get_backend(group=ctx.group) is dist.Backend.NCCL:
  277. world_size = dist.get_world_size(group=ctx.group)
  278. out_size = list(grad_output.size())
  279. if out_size[0] % world_size != 0:
  280. raise RuntimeError(
  281. f'Tensor with dimensions: {out_size} does '
  282. f'not have first dimension divisible by world_size: {world_size}'
  283. )
  284. out_size[0] = out_size[0] // dist.get_world_size(group=ctx.group)
  285. gx = torch.empty(out_size, device=grad_output.device, dtype=grad_output.dtype)
  286. dist._reduce_scatter_base(gx, grad_output, ReduceOp.SUM, ctx.group)
  287. else:
  288. raise RuntimeError("Backend not supported!")
  289. return (None, gx, None)
  290. class _AlltoAll(Function):
  291. @staticmethod
  292. def forward(ctx, group, out_tensor_list, *tensors):
  293. ctx.group = group
  294. ctx.input_tensor_size_list = [
  295. tensors[i].size() for i in range(dist.get_world_size(group=group))
  296. ]
  297. my_rank = dist.get_rank(group=group)
  298. tensors = tuple(t.contiguous() for t in tensors)
  299. # Implement it on means of scatter/gather, send/recv async operations have issues
  300. if dist.get_backend(group=group) is dist.Backend.GLOO:
  301. for i in range(dist.get_world_size(group=group)):
  302. to_send = None
  303. if i == my_rank:
  304. to_send = list(tensors)
  305. dist.scatter(out_tensor_list[i], to_send, i, group=group)
  306. else:
  307. dist.all_to_all(
  308. out_tensor_list,
  309. list(tensors),
  310. group=group,
  311. )
  312. return tuple(out_tensor_list)
  313. @staticmethod
  314. def backward(ctx, *grad_outputs):
  315. tensor_list = [
  316. torch.empty(size, device=grad_outputs[0].device, dtype=grad_outputs[0].dtype)
  317. for size in ctx.input_tensor_size_list
  318. ]
  319. return (None, None) + _AlltoAll.apply(ctx.group, tensor_list, *grad_outputs)
  320. class _AlltoAllSingle(Function):
  321. @staticmethod
  322. def forward(ctx, group, output, output_split_sizes, input_split_sizes, input):
  323. ctx.group = group
  324. ctx.input_size = input.size()
  325. ctx.output_split_sizes = input_split_sizes
  326. ctx.input_split_sizes = output_split_sizes
  327. dist.all_to_all_single(
  328. output,
  329. input,
  330. output_split_sizes=output_split_sizes,
  331. input_split_sizes=input_split_sizes,
  332. group=group,
  333. )
  334. return output
  335. @staticmethod
  336. def backward(ctx, grad_output):
  337. tensor = torch.empty(ctx.input_size, device=grad_output.device, dtype=grad_output.dtype)
  338. return (None, None, None, None) + (
  339. _AlltoAllSingle.apply(
  340. ctx.group,
  341. tensor,
  342. ctx.output_split_sizes,
  343. ctx.input_split_sizes,
  344. grad_output.contiguous(),
  345. ),
  346. )
  347. class _AllReduce(Function):
  348. @staticmethod
  349. def forward(ctx, op, group, tensor):
  350. ctx.group = group
  351. ctx.op = op
  352. tensor = tensor.clone()
  353. dist.all_reduce(tensor, op=op, group=group)
  354. return tensor
  355. @staticmethod
  356. def backward(ctx, grad_output):
  357. return (None, None) + (_AllReduce.apply(ctx.op, ctx.group, grad_output),)