options.py 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. from typing import Dict, List, Optional, Union
  2. import torch
  3. from torch._C._distributed_rpc import _TensorPipeRpcBackendOptionsBase
  4. from . import constants as rpc_contants
  5. DeviceType = Union[int, str, torch.device]
  6. __all__ = ["TensorPipeRpcBackendOptions"]
  7. def _to_device(device: DeviceType) -> torch.device:
  8. device = torch.device(device)
  9. if device.type != "cuda":
  10. raise ValueError(
  11. "`set_devices` expect a list of CUDA devices, but got "
  12. f"device type {device.type}."
  13. )
  14. return device
  15. def _to_device_map(
  16. device_map: Dict[DeviceType, DeviceType]
  17. ) -> Dict[torch.device, torch.device]:
  18. full_device_map: Dict[torch.device, torch.device] = {}
  19. reverse_map: Dict[torch.device, torch.device] = {}
  20. for k, v in device_map.items():
  21. k, v = torch.device(k), torch.device(v)
  22. if v in reverse_map:
  23. raise ValueError(
  24. "`device_map` only supports 1-to-1 mapping, "
  25. f"trying to map {k} and {reverse_map[v]} to {v}"
  26. )
  27. full_device_map[k] = v
  28. reverse_map[v] = k
  29. return full_device_map
  30. def _to_device_list(devices: List[DeviceType]) -> List[torch.device]:
  31. return list(map(_to_device, devices))
  32. class TensorPipeRpcBackendOptions(_TensorPipeRpcBackendOptionsBase):
  33. r"""
  34. The backend options for
  35. :class:`~torch.distributed.rpc.TensorPipeAgent`, derived from
  36. :class:`~torch.distributed.rpc.RpcBackendOptions`.
  37. Args:
  38. num_worker_threads (int, optional): The number of threads in the
  39. thread-pool used by
  40. :class:`~torch.distributed.rpc.TensorPipeAgent` to execute
  41. requests (default: 16).
  42. rpc_timeout (float, optional): The default timeout, in seconds,
  43. for RPC requests (default: 60 seconds). If the RPC has not
  44. completed in this timeframe, an exception indicating so will
  45. be raised. Callers can override this timeout for individual
  46. RPCs in :meth:`~torch.distributed.rpc.rpc_sync` and
  47. :meth:`~torch.distributed.rpc.rpc_async` if necessary.
  48. init_method (str, optional): The URL to initialize the distributed
  49. store used for rendezvous. It takes any value accepted for the
  50. same argument of :meth:`~torch.distributed.init_process_group`
  51. (default: ``env://``).
  52. device_maps (Dict[str, Dict], optional): Device placement mappings from
  53. this worker to the callee. Key is the callee worker name and value
  54. the dictionary (``Dict`` of ``int``, ``str``, or ``torch.device``)
  55. that maps this worker's devices to the callee worker's devices.
  56. (default: ``None``)
  57. devices (List[int, str, or ``torch.device``], optional): all local
  58. CUDA devices used by RPC agent. By Default, it will be initialized
  59. to all local devices from its own ``device_maps`` and corresponding
  60. devices from its peers' ``device_maps``. When processing CUDA RPC
  61. requests, the agent will properly synchronize CUDA streams for
  62. all devices in this ``List``.
  63. """
  64. def __init__(
  65. self,
  66. *,
  67. num_worker_threads: int = rpc_contants.DEFAULT_NUM_WORKER_THREADS,
  68. rpc_timeout: float = rpc_contants.DEFAULT_RPC_TIMEOUT_SEC,
  69. init_method: str = rpc_contants.DEFAULT_INIT_METHOD,
  70. device_maps: Optional[Dict[str, Dict[DeviceType, DeviceType]]] = None,
  71. devices: Optional[List[DeviceType]] = None,
  72. _transports: Optional[List] = None,
  73. _channels: Optional[List] = None,
  74. ):
  75. full_device_maps = (
  76. {}
  77. if device_maps is None
  78. else {k: _to_device_map(v) for k, v in device_maps.items()}
  79. )
  80. full_device_list = [] if devices is None else _to_device_list(devices)
  81. super().__init__(
  82. num_worker_threads,
  83. _transports,
  84. _channels,
  85. rpc_timeout,
  86. init_method,
  87. full_device_maps,
  88. full_device_list,
  89. )
  90. def set_device_map(self, to: str, device_map: Dict[DeviceType, DeviceType]):
  91. r"""
  92. Set device mapping between each RPC caller and callee pair. This
  93. function can be called multiple times to incrementally add
  94. device placement configurations.
  95. Args:
  96. to (str): Callee name.
  97. device_map (Dict of int, str, or torch.device): Device placement
  98. mappings from this worker to the callee. This map must be
  99. invertible.
  100. Example:
  101. >>> # xdoctest: +SKIP("distributed")
  102. >>> # both workers
  103. >>> def add(x, y):
  104. >>> print(x) # tensor([1., 1.], device='cuda:1')
  105. >>> return x + y, (x + y).to(2)
  106. >>>
  107. >>> # on worker 0
  108. >>> options = TensorPipeRpcBackendOptions(
  109. >>> num_worker_threads=8,
  110. >>> device_maps={"worker1": {0: 1}}
  111. >>> # maps worker0's cuda:0 to worker1's cuda:1
  112. >>> )
  113. >>> options.set_device_map("worker1", {1: 2})
  114. >>> # maps worker0's cuda:1 to worker1's cuda:2
  115. >>>
  116. >>> rpc.init_rpc(
  117. >>> "worker0",
  118. >>> rank=0,
  119. >>> world_size=2,
  120. >>> backend=rpc.BackendType.TENSORPIPE,
  121. >>> rpc_backend_options=options
  122. >>> )
  123. >>>
  124. >>> x = torch.ones(2)
  125. >>> rets = rpc.rpc_sync("worker1", add, args=(x.to(0), 1))
  126. >>> # The first argument will be moved to cuda:1 on worker1. When
  127. >>> # sending the return value back, it will follow the invert of
  128. >>> # the device map, and hence will be moved back to cuda:0 and
  129. >>> # cuda:1 on worker0
  130. >>> print(rets[0]) # tensor([2., 2.], device='cuda:0')
  131. >>> print(rets[1]) # tensor([2., 2.], device='cuda:1')
  132. """
  133. full_device_map = _to_device_map(device_map)
  134. curr_device_maps = super().device_maps
  135. if to in curr_device_maps:
  136. for k, v in full_device_map.items():
  137. if k in curr_device_maps[to] and v != curr_device_maps[to][k]:
  138. raise ValueError(
  139. "`set_device_map` only supports 1-to-1 mapping, trying"
  140. f" to map {k} to {v} and {curr_device_maps[to][k]}"
  141. )
  142. super()._set_device_map(to, full_device_map)
  143. def set_devices(self, devices: List[DeviceType]):
  144. r"""
  145. Set local devices used by the TensorPipe RPC agent. When processing
  146. CUDA RPC requests, the TensorPipe RPC agent will properly synchronize
  147. CUDA streams for all devices in this ``List``.
  148. Args:
  149. devices (List of int, str, or torch.device): local devices used by
  150. the TensorPipe RPC agent.
  151. """
  152. self.devices = _to_device_list(devices)