api.py 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943
  1. __all__ = ["shutdown", "get_worker_info", "remote", "rpc_sync",
  2. "rpc_async", "RRef", "AllGatherStates", "method_factory", "new_method"]
  3. import collections
  4. import contextlib
  5. import functools
  6. import inspect
  7. import logging
  8. import threading
  9. from typing import Dict, Generic, TypeVar, Set, Any
  10. import torch
  11. from torch.futures import Future
  12. from torch._C._distributed_rpc import (
  13. PyRRef,
  14. RemoteProfilerManager,
  15. WorkerInfo,
  16. TensorPipeAgent,
  17. get_rpc_timeout,
  18. _cleanup_python_rpc_handler,
  19. _delete_all_user_and_unforked_owner_rrefs,
  20. _destroy_rref_context,
  21. _get_current_rpc_agent,
  22. _invoke_remote_builtin,
  23. _invoke_remote_python_udf,
  24. _invoke_remote_torchscript,
  25. _invoke_rpc_builtin,
  26. _invoke_rpc_python_udf,
  27. _invoke_rpc_torchscript,
  28. _is_current_rpc_agent_set,
  29. _reset_current_rpc_agent,
  30. _set_and_start_rpc_agent,
  31. )
  32. from .internal import (
  33. PythonUDF,
  34. RPCExecMode,
  35. _internal_rpc_pickler,
  36. _build_rpc_profiling_key,
  37. )
  38. from .constants import DEFAULT_SHUTDOWN_TIMEOUT, UNSET_RPC_TIMEOUT
  39. from ._utils import _group_membership_management, _update_group_membership
  40. logger = logging.getLogger(__name__)
  41. # NB: Ignoring RRef leaks during shutdown. Without this, applications have to
  42. # make sure there is no references to any RRef in the application code and
  43. # Python GC has done its job to delete those RRefs. This is could result in bad
  44. # debugging experiences especially when for large applications. Therefore, by
  45. # default, we are going to ignore RRef leaks during shutdown. This is usually
  46. # fine as shutdown means applications have done training and no longer care
  47. # about states.
  48. #
  49. # To enable RRef leak checking, set this _ignore_rref_leak to False
  50. _ignore_rref_leak = True
  51. _default_pickler = _internal_rpc_pickler
  52. @contextlib.contextmanager
  53. def _use_rpc_pickler(rpc_pickler):
  54. r"""
  55. rpc_pickler: (.internal._InternalRPCPickler) Overrides the default RPC pickler
  56. """
  57. global _default_pickler
  58. _default_pickler = rpc_pickler
  59. try:
  60. yield
  61. finally:
  62. _default_pickler = _internal_rpc_pickler
  63. def _require_initialized(func):
  64. @functools.wraps(func)
  65. def wrapper(*args, **kwargs):
  66. if not _is_current_rpc_agent_set():
  67. raise RuntimeError(
  68. "RPC has not been initialized. Call "
  69. "torch.distributed.rpc.init_rpc first."
  70. )
  71. return func(*args, **kwargs)
  72. return wrapper
  73. class AllGatherStates:
  74. def __init__(self):
  75. # Each `gathered_objects` is an empty dict at beginning.
  76. # The leader worker is elected as the first worker in a sorted worker
  77. # name list. Whenever there is a worker entering `_all_gather()`, it
  78. # runs `_gather_to_leader()` on the leader to add its own name and
  79. # data obj to this dict. The leader also adds itself's name to the dict
  80. # on calling `_all_gather()`.
  81. # Once `set(gathered_objects.keys()) == _ALL_WORKER_NAMES`, the leader
  82. # will broadcast the gathered dict to all follower workers and set their
  83. # `gathered_objects` field and the `proceed_signal` field.
  84. self.gathered_objects = {}
  85. # All workers wait on this signal until it receives all gathered
  86. # objects.
  87. self.proceed_signal = threading.Event()
  88. # States used by `def _all_gather()`.
  89. # `_ALL_WORKER_NAMES` is initialized on initiaizing RPC layer.
  90. _ALL_WORKER_NAMES: Set[Any] = set()
  91. _all_gather_dict_lock = threading.RLock()
  92. _all_gather_sequence_id: Dict[str, int] = {}
  93. _all_gather_sequence_id_to_states: collections.defaultdict = collections.defaultdict(AllGatherStates)
  94. def _init_rpc_states(agent):
  95. worker_infos = agent.get_worker_infos()
  96. global _ALL_WORKER_NAMES
  97. _ALL_WORKER_NAMES = {worker_info.name for worker_info in worker_infos}
  98. # NB: backend implementation might have already set the rpc_agent.
  99. if not _is_current_rpc_agent_set():
  100. _set_and_start_rpc_agent(agent)
  101. def _gather_to_leader(sequence_id, worker_name, obj, worker_names=None):
  102. with _all_gather_dict_lock:
  103. if not worker_names:
  104. worker_names = _ALL_WORKER_NAMES
  105. assert (
  106. worker_name in worker_names
  107. ), f"{worker_name} is not expected by leader."
  108. states = _all_gather_sequence_id_to_states[sequence_id]
  109. assert (
  110. worker_name not in states.gathered_objects
  111. ), f"{worker_name} reported intent sequence id {sequence_id} twice. "
  112. states.gathered_objects[worker_name] = obj
  113. if worker_names == set(states.gathered_objects.keys()):
  114. states.proceed_signal.set()
  115. def _broadcast_to_followers(sequence_id, objects_map):
  116. with _all_gather_dict_lock:
  117. states = _all_gather_sequence_id_to_states[sequence_id]
  118. assert (
  119. not states.proceed_signal.is_set()
  120. ), "Termination signal sequence id {} got set twice.".format(sequence_id)
  121. states.gathered_objects = objects_map
  122. states.proceed_signal.set()
  123. _thread_local_var = threading.local()
  124. @contextlib.contextmanager
  125. def _wait_all():
  126. r"""
  127. A context manager that collects all futures returned by ``rpc_async`` and
  128. waits them on the context manager's exit; relieving the user of needing
  129. to explicitly call wait.
  130. Example::
  131. >>> # xdoctest: +SKIP("distributed")
  132. >>> # On worker 0:
  133. >>> import torch
  134. >>> import torch.distributed.rpc as rpc
  135. >>> rpc.init_rpc("worker0", rank=0, world_size=2)
  136. >>> with rpc._wait_all():
  137. >>> fut_1 = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1))
  138. >>> fut_2 = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1))
  139. >>> #fut_1 and fut_2 are waited on
  140. """
  141. _thread_local_var.future_list = []
  142. try:
  143. yield
  144. finally:
  145. try:
  146. torch.futures.wait_all(_thread_local_var.future_list)
  147. finally:
  148. del _thread_local_var.future_list
  149. @_require_initialized
  150. def _all_gather(obj, worker_names=None, timeout=UNSET_RPC_TIMEOUT):
  151. r"""
  152. This is similar to torch.distributed.all_gather(), but is using RPC. It
  153. picks the worker with the smallest name (alphabetic order) as the leader.
  154. Then all followers send their data ``obj`` to the leader. After the leader
  155. has received all, it will broadcast the results back to all followers. This
  156. function blocks until all workers have received the gathered results.
  157. """
  158. if not worker_names:
  159. assert (
  160. _ALL_WORKER_NAMES is not None
  161. ), "`_ALL_WORKER_NAMES` is not initialized for `def _all_gather`."
  162. worker_names = _ALL_WORKER_NAMES
  163. leader_name = min(worker_names)
  164. self_name = _get_current_rpc_agent().get_worker_info().name
  165. with _all_gather_dict_lock:
  166. concat_names = "".join(sorted(worker_names))
  167. sequence_num = _all_gather_sequence_id.get(concat_names, 0)
  168. _all_gather_sequence_id[concat_names] = sequence_num + 1
  169. sequence_id = concat_names + str(sequence_num)
  170. is_leader = leader_name == self_name
  171. if timeout == UNSET_RPC_TIMEOUT:
  172. # Timeout is specified by agent for RPC calls
  173. rpc_timeout = get_rpc_timeout()
  174. # No timeout for signal
  175. signal_timeout = None
  176. elif timeout == DEFAULT_SHUTDOWN_TIMEOUT:
  177. # No timeout for RPC
  178. rpc_timeout = timeout
  179. # No timeout for signal
  180. signal_timeout = None
  181. else:
  182. # Signal and RPC timeout use the same timeout
  183. signal_timeout = rpc_timeout = timeout
  184. # Phase 1: Followers send it's object to the leader
  185. if is_leader:
  186. _gather_to_leader(sequence_id, self_name, obj, worker_names)
  187. else:
  188. rpc_sync(
  189. leader_name,
  190. _gather_to_leader,
  191. args=(sequence_id, self_name, obj, worker_names),
  192. timeout=rpc_timeout,
  193. )
  194. with _all_gather_dict_lock:
  195. states = _all_gather_sequence_id_to_states[sequence_id]
  196. # Timeout is either set by function parameter or None (which is indefinite)
  197. states.proceed_signal.wait(timeout=signal_timeout)
  198. # Phase 2: Leader broadcast gathered results to all followers
  199. # Leader's signal is the first to be unblocked, after receiving all
  200. # followers' data objects.
  201. if is_leader:
  202. worker_name_to_response_future_dict = {}
  203. for follower_name in worker_names - {leader_name}:
  204. fut = rpc_async(
  205. follower_name,
  206. _broadcast_to_followers,
  207. args=(sequence_id, states.gathered_objects),
  208. timeout=rpc_timeout
  209. )
  210. worker_name_to_response_future_dict[follower_name] = fut
  211. errors = []
  212. for follower_name, fut in worker_name_to_response_future_dict.items():
  213. try:
  214. fut.wait()
  215. except RuntimeError as ex:
  216. errors.append((follower_name, ex))
  217. if errors:
  218. raise RuntimeError(
  219. f"Followers {[e[0] for e in errors]} timed out in _all_gather "
  220. f"after {rpc_timeout:.2f} seconds. The first exception is {errors[0][1]}"
  221. )
  222. # Clean up for the states using the sequence_id
  223. with _all_gather_dict_lock:
  224. states = _all_gather_sequence_id_to_states.pop(sequence_id)
  225. return states.gathered_objects
  226. @_require_initialized
  227. def _barrier(worker_names):
  228. r"""
  229. Synchronizes local and remote RPC processes.
  230. This will block until all local and remote RPC processes specified under worker_names
  231. reach this method to wait for all outstanding work to complete.
  232. Args:
  233. worker_names (List[str]): The set of workers to synchronize.
  234. """
  235. try:
  236. _all_gather(None, set(worker_names))
  237. except RuntimeError as ex:
  238. logger.error(
  239. f"Failed to complete barrier, got error {ex}"
  240. )
  241. @_require_initialized
  242. def _wait_all_workers(timeout=DEFAULT_SHUTDOWN_TIMEOUT):
  243. r"""
  244. Block until all local and remote RPC processes reach this method and wait
  245. for all outstanding work to complete. Every RPC process must call this
  246. method before exit to perform a graceful shutdown. This should be used to
  247. terminate the RPC framework, and there is no guarantee that the RPC
  248. framework will work after this method returns.
  249. """
  250. try:
  251. _all_gather(None, timeout=timeout)
  252. except RuntimeError as ex:
  253. logger.error(
  254. f"Failed to respond to 'Shutdown Proceed' in time, got error {ex}"
  255. )
  256. raise ex
  257. @_require_initialized
  258. def shutdown(graceful=True, timeout=DEFAULT_SHUTDOWN_TIMEOUT):
  259. r"""
  260. Perform a shutdown of the RPC agent, and then destroy the RPC agent. This
  261. stops the local agent from accepting outstanding requests, and shuts
  262. down the RPC framework by terminating all RPC threads. If ``graceful=True``,
  263. this will block until all local and remote RPC processes reach this method
  264. and wait for all outstanding work to complete. Otherwise, if
  265. ``graceful=False``, this is a local shutdown, and it does not wait for other
  266. RPC processes to reach this method.
  267. .. warning::
  268. For :class:`~torch.futures.Future` objects returned by
  269. :meth:`~torch.distributed.rpc.rpc_async`, ``future.wait()`` should not
  270. be called after ``shutdown()``.
  271. Args:
  272. graceful (bool): Whether to do a graceful shutdown or not. If True,
  273. this will 1) wait until there is no pending system
  274. messages for ``UserRRefs`` and delete them; 2) block
  275. until all local and remote RPC processes have reached
  276. this method and wait for all outstanding work to
  277. complete.
  278. Example::
  279. Make sure that ``MASTER_ADDR`` and ``MASTER_PORT`` are set properly
  280. on both workers. Refer to :meth:`~torch.distributed.init_process_group`
  281. API for more details. For example,
  282. export MASTER_ADDR=localhost
  283. export MASTER_PORT=5678
  284. Then run the following code in two different processes:
  285. >>> # xdoctest: +SKIP
  286. >>> # On worker 0:
  287. >>> import torch
  288. >>> import torch.distributed.rpc as rpc
  289. >>> rpc.init_rpc("worker0", rank=0, world_size=2)
  290. >>> # do some work
  291. >>> result = rpc.rpc_sync("worker1", torch.add, args=(torch.ones(1), 1))
  292. >>> # ready to shutdown
  293. >>> rpc.shutdown()
  294. >>> # On worker 1:
  295. >>> import torch.distributed.rpc as rpc
  296. >>> rpc.init_rpc("worker1", rank=1, world_size=2)
  297. >>> # wait for worker 0 to finish work, and then shutdown.
  298. >>> rpc.shutdown()
  299. """
  300. if graceful:
  301. try:
  302. agent = _get_current_rpc_agent()
  303. if not isinstance(agent, TensorPipeAgent) or agent.is_static_group:
  304. _wait_all_workers(timeout)
  305. _delete_all_user_and_unforked_owner_rrefs()
  306. agent.join(shutdown=True, timeout=timeout)
  307. else:
  308. # This is a dynamic group so we need to grab the token for the operation
  309. my_worker_info = agent.get_worker_info()
  310. my_name = my_worker_info.name
  311. with _group_membership_management(agent.store, my_name, False):
  312. all_worker_infos = agent.get_worker_infos()
  313. for worker in all_worker_infos:
  314. if worker.name != my_name:
  315. rpc_sync(worker.name, _update_group_membership, args=(my_worker_info, [], {}, False))
  316. agent.join(shutdown=True, timeout=timeout)
  317. finally:
  318. # In case of errors, continue to complete the local shutdown.
  319. _finalize_shutdown()
  320. else:
  321. _finalize_shutdown()
  322. def _finalize_shutdown():
  323. try:
  324. # This raises a `TORCH_CHECK()` exception on RRef leak detected.
  325. _destroy_rref_context(_ignore_rref_leak)
  326. finally:
  327. _get_current_rpc_agent().shutdown()
  328. # clean up python rpc handler in shutdown(), see comments in
  329. # PythonRpcHandler::cleanup(), call it in python API because the
  330. # cleanup() function has python dependency, it assumes python
  331. # interpreter exists.
  332. # No matter if RRef leak exception is raised, this clean-up code
  333. # must run to avoid destruction segfault in Python 3.5.
  334. #
  335. # future.wait() should not be called after shutdown().
  336. # pythonRpcHandler is cleaned up in shutdown(), after
  337. # shutdown(), python objects returned from rpc python call can not be
  338. # resolved.
  339. _cleanup_python_rpc_handler()
  340. _reset_current_rpc_agent()
  341. @_require_initialized
  342. def get_worker_info(worker_name=None):
  343. r"""
  344. Get :class:`~torch.distributed.rpc.WorkerInfo` of a given worker name.
  345. Use this :class:`~torch.distributed.rpc.WorkerInfo` to avoid passing an
  346. expensive string on every invocation.
  347. Args:
  348. worker_name (str): the string name of a worker. If ``None``, return the
  349. the id of the current worker. (default ``None``)
  350. Returns:
  351. :class:`~torch.distributed.rpc.WorkerInfo` instance for the given
  352. ``worker_name`` or :class:`~torch.distributed.rpc.WorkerInfo` of the
  353. current worker if ``worker_name`` is ``None``.
  354. """
  355. if worker_name is not None:
  356. return _get_current_rpc_agent().get_worker_info(worker_name)
  357. else:
  358. return _get_current_rpc_agent().get_worker_info()
  359. def _to_worker_info(to):
  360. if isinstance(to, WorkerInfo):
  361. return to
  362. elif isinstance(to, (str, int)):
  363. return get_worker_info(to)
  364. else:
  365. raise ValueError("Cannot get WorkerInfo from name {}".format(to))
  366. def _rref_typeof_on_owner(rref, blocking=True):
  367. rref_type = type(rref.local_value())
  368. if blocking:
  369. return rref_type
  370. else:
  371. # Wrap result into a completed Future. This is so that if blocking=`False`
  372. # is specified, we return a future regardless of if this call is on user
  373. # or owner.
  374. future = Future[type]()
  375. future.set_result(rref_type)
  376. return future
  377. def _rref_typeof_on_user(rref, timeout=UNSET_RPC_TIMEOUT, blocking=True):
  378. fut = rpc_async(
  379. rref.owner(),
  380. _rref_typeof_on_owner,
  381. args=(rref,),
  382. timeout=timeout
  383. )
  384. if blocking:
  385. return fut.wait()
  386. else:
  387. return fut
  388. T = TypeVar("T")
  389. GenericWithOneTypeVar = Generic[T]
  390. try:
  391. # Combine the implementation class and the type class.
  392. class RRef(PyRRef, Generic[T]):
  393. pass
  394. except TypeError:
  395. # TypeError: metaclass conflict: the metaclass of a derived class
  396. # must be a (non-strict) subclass of the metaclasses of all its bases
  397. # Mypy doesn't understand __class__ (mypy bug #4177)
  398. class RRefMeta(PyRRef.__class__, GenericWithOneTypeVar.__class__): # type: ignore[name-defined, misc, valid-type]
  399. pass
  400. # Combine the implementation class and the type class.
  401. # Types for classes expecting a certain generic parameter (mypy bug #7791)
  402. class RRef(PyRRef, GenericWithOneTypeVar, metaclass=RRefMeta): # type: ignore[misc, no-redef, valid-type]
  403. pass
  404. # Install docstrings from `PyRRef` to `RRef`.
  405. #
  406. # This is for the fact that pybind11 generates the parameter
  407. # `self` as type `rpc.PyRRef`, so a `:inherited-members:`
  408. # under `.. autoclass:: RRef` does not work.
  409. # we have to do the following process to replacee `rpc.PyRRef` with `rpc.RRef`.
  410. #
  411. def method_factory(method_name, docstring):
  412. def method(self, *args, **kwargs):
  413. return getattr(super(RRef, self), method_name)(*args, **kwargs)
  414. if method.__doc__:
  415. method.__doc__ = docstring
  416. return method
  417. for method_name, method in inspect.getmembers(PyRRef):
  418. # Ignore magic methods, except "__str__".
  419. if method_name.startswith("_") and method_name != "__str__":
  420. continue
  421. # Get pybind11 generated docstring.
  422. # It's like,
  423. """
  424. to_here(self: torch.distributed.rpc.PyRRef, timeout: float=-1.0) -> object
  425. Blocking call that copies the value of the RRef from the owner
  426. to the local node and returns it. If the current node is the
  427. owner, returns a reference to the local value.
  428. """
  429. docstring = getattr(method, "__doc__", None)
  430. assert docstring is not None, "RRef user-facing methods should all have docstrings."
  431. # Do surgery on pybind11 generated docstrings.
  432. docstring = docstring.replace("torch.distributed.rpc.PyRRef", "torch.distributed.rpc.RRef")
  433. # Attach user-facing RRef method with modified docstring.
  434. new_method = method_factory(method_name, docstring)
  435. setattr(RRef, method_name, new_method)
  436. @_require_initialized
  437. def remote(to, func, args=None, kwargs=None, timeout=UNSET_RPC_TIMEOUT):
  438. r"""
  439. Make a remote call to run ``func`` on worker ``to`` and return an
  440. :class:`~torch.distributed.rpc.RRef` to the result value immediately.
  441. Worker ``to`` will be the owner of the returned
  442. :class:`~torch.distributed.rpc.RRef`, and the worker calling ``remote`` is
  443. a user. The owner manages the global reference count of its
  444. :class:`~torch.distributed.rpc.RRef`, and the owner
  445. :class:`~torch.distributed.rpc.RRef` is only destructed when globally there
  446. are no living references to it.
  447. Args:
  448. to (str or WorkerInfo or int): name/rank/``WorkerInfo`` of the destination worker.
  449. func (Callable): a callable function, such as Python callables, builtin
  450. operators (e.g. :meth:`~torch.add`) and annotated
  451. TorchScript functions.
  452. args (tuple): the argument tuple for the ``func`` invocation.
  453. kwargs (dict): is a dictionary of keyword arguments for the ``func``
  454. invocation.
  455. timeout (float, optional): timeout in seconds for this remote call. If the
  456. creation of this
  457. :class:`~torch.distributed.rpc.RRef` on worker
  458. ``to`` is not successfully processed on this
  459. worker within this timeout, then the next time
  460. there is an attempt to use the RRef (such as
  461. ``to_here()``), a timeout will be raised
  462. indicating this failure. A value of 0 indicates
  463. an infinite timeout, i.e. a timeout error will
  464. never be raised. If not provided, the default
  465. value set during initialization or with
  466. ``_set_rpc_timeout`` is used.
  467. Returns:
  468. A user :class:`~torch.distributed.rpc.RRef` instance to the result
  469. value. Use the blocking API :meth:`torch.distributed.rpc.RRef.to_here`
  470. to retrieve the result value locally.
  471. .. warning ::
  472. The ``remote`` API does not copy storages of argument tensors until
  473. sending them over the wire, which could be done by a different thread
  474. depending on the RPC backend type. The caller should make sure that the
  475. contents of those tensors stay intact until the returned RRef is
  476. confirmed by the owner, which can be checked using the
  477. :meth:`torch.distributed.rpc.RRef.confirmed_by_owner` API.
  478. .. warning ::
  479. Errors such as timeouts for the ``remote`` API are handled on a
  480. best-effort basis. This means that when remote calls initiated by
  481. ``remote`` fail, such as with a timeout error, we take a best-effort
  482. approach to error handling. This means that errors are handled and set
  483. on the resulting RRef on an asynchronous basis. If the RRef has not been
  484. used by the application before this handling (such as ``to_here`` or
  485. fork call), then future uses of the ``RRef`` will appropriately raise
  486. errors. However, it is possible that the user application will use the
  487. ``RRef`` before the errors are handled. In this case, errors may not be
  488. raised as they have not yet been handled.
  489. Example::
  490. Make sure that ``MASTER_ADDR`` and ``MASTER_PORT`` are set properly
  491. on both workers. Refer to :meth:`~torch.distributed.init_process_group`
  492. API for more details. For example,
  493. export MASTER_ADDR=localhost
  494. export MASTER_PORT=5678
  495. Then run the following code in two different processes:
  496. >>> # xdoctest: +SKIP
  497. >>> # On worker 0:
  498. >>> import torch
  499. >>> import torch.distributed.rpc as rpc
  500. >>> rpc.init_rpc("worker0", rank=0, world_size=2)
  501. >>> rref1 = rpc.remote("worker1", torch.add, args=(torch.ones(2), 3))
  502. >>> rref2 = rpc.remote("worker1", torch.add, args=(torch.ones(2), 1))
  503. >>> x = rref1.to_here() + rref2.to_here()
  504. >>> rpc.shutdown()
  505. >>> # On worker 1:
  506. >>> import torch.distributed.rpc as rpc
  507. >>> rpc.init_rpc("worker1", rank=1, world_size=2)
  508. >>> rpc.shutdown()
  509. Below is an example of running a TorchScript function using RPC.
  510. >>> # On both workers:
  511. >>> @torch.jit.script
  512. >>> def my_script_add(t1, t2):
  513. >>> return torch.add(t1, t2)
  514. >>> # On worker 0:
  515. >>> import torch.distributed.rpc as rpc
  516. >>> rpc.init_rpc("worker0", rank=0, world_size=2)
  517. >>> rref = rpc.remote("worker1", my_script_add, args=(torch.ones(2), 3))
  518. >>> rref.to_here()
  519. >>> rpc.shutdown()
  520. >>> # On worker 1:
  521. >>> import torch.distributed.rpc as rpc
  522. >>> rpc.init_rpc("worker1", rank=1, world_size=2)
  523. >>> rpc.shutdown()
  524. """
  525. torch._C._log_api_usage_once("torch.distributed.rpc_remote")
  526. qualified_name = torch.jit._builtins._find_builtin(func)
  527. dst_worker_info = _to_worker_info(to)
  528. should_profile = _get_should_profile()
  529. ctx_manager = _enable_rpc_profiler(should_profile, qualified_name, func, RPCExecMode.REMOTE, dst_worker_info)
  530. with ctx_manager as rf:
  531. args = args if args else ()
  532. kwargs = kwargs if kwargs else {}
  533. is_async_exec = hasattr(func, "_wrapped_async_rpc_function")
  534. if is_async_exec:
  535. wrapped = func._wrapped_async_rpc_function
  536. if isinstance(wrapped, torch.jit.ScriptFunction):
  537. func = wrapped
  538. if qualified_name is not None:
  539. rref = _invoke_remote_builtin(dst_worker_info, qualified_name, timeout, *args, **kwargs)
  540. elif isinstance(func, torch.jit.ScriptFunction):
  541. rref = _invoke_remote_torchscript(
  542. dst_worker_info.name,
  543. torch._jit_internal._qualified_name(func),
  544. timeout,
  545. is_async_exec,
  546. *args,
  547. **kwargs,
  548. )
  549. else:
  550. (pickled_python_udf, tensors) = _default_pickler.serialize(
  551. PythonUDF(func, args, kwargs)
  552. )
  553. rref = _invoke_remote_python_udf(
  554. dst_worker_info,
  555. pickled_python_udf,
  556. tensors,
  557. timeout,
  558. is_async_exec
  559. )
  560. # attach profiling information
  561. if should_profile:
  562. assert torch.autograd._profiler_enabled()
  563. assert rf is not None
  564. fut = rf._call_end_callbacks_on_future(rref._get_future())
  565. rref._set_profiling_future(fut)
  566. return rref
  567. def _invoke_rpc(to, func, rpc_type, args=None, kwargs=None, rpc_timeout=UNSET_RPC_TIMEOUT):
  568. if not callable(func):
  569. raise TypeError("function should be callable.")
  570. qualified_name = torch.jit._builtins._find_builtin(func)
  571. dst_worker_info = _to_worker_info(to)
  572. should_profile = _get_should_profile()
  573. ctx_manager = _enable_rpc_profiler(should_profile, qualified_name, func, rpc_type, dst_worker_info)
  574. with ctx_manager as rf:
  575. args = args if args else ()
  576. kwargs = kwargs if kwargs else {}
  577. is_async_exec = hasattr(func, "_wrapped_async_rpc_function")
  578. if is_async_exec:
  579. wrapped = func._wrapped_async_rpc_function
  580. if isinstance(wrapped, torch.jit.ScriptFunction):
  581. func = wrapped
  582. if qualified_name is not None:
  583. fut = _invoke_rpc_builtin(
  584. dst_worker_info,
  585. qualified_name,
  586. rpc_timeout,
  587. *args,
  588. **kwargs
  589. )
  590. elif isinstance(func, torch.jit.ScriptFunction):
  591. fut = _invoke_rpc_torchscript(
  592. dst_worker_info.name,
  593. torch._jit_internal._qualified_name(func),
  594. args,
  595. kwargs,
  596. rpc_timeout,
  597. is_async_exec
  598. )
  599. else:
  600. (pickled_python_udf, tensors) = _default_pickler.serialize(
  601. PythonUDF(func, args, kwargs)
  602. )
  603. fut = _invoke_rpc_python_udf(
  604. dst_worker_info,
  605. pickled_python_udf,
  606. tensors,
  607. rpc_timeout,
  608. is_async_exec
  609. )
  610. if should_profile:
  611. assert torch.autograd._profiler_enabled()
  612. assert rf is not None
  613. # Schedule profiling callbacks to run when the future completes.
  614. # This returns a future that is completed when the original future
  615. # completes and the profiling callbacks have been completed as well,
  616. # to guarantee that fut.wait() completes the profiling. This new
  617. # future will contain the same value as the original future.
  618. fut = rf._call_end_callbacks_on_future(fut)
  619. return fut
  620. @_require_initialized
  621. def rpc_sync(to, func, args=None, kwargs=None, timeout=UNSET_RPC_TIMEOUT):
  622. r"""
  623. Make a blocking RPC call to run function ``func`` on worker ``to``. RPC
  624. messages are sent and received in parallel to execution of Python code. This
  625. method is thread-safe.
  626. Args:
  627. to (str or WorkerInfo or int): name/rank/``WorkerInfo`` of the destination worker.
  628. func (Callable): a callable function, such as Python callables, builtin
  629. operators (e.g. :meth:`~torch.add`) and annotated
  630. TorchScript functions.
  631. args (tuple): the argument tuple for the ``func`` invocation.
  632. kwargs (dict): is a dictionary of keyword arguments for the ``func``
  633. invocation.
  634. timeout (float, optional): timeout in seconds to use for this RPC. If
  635. the RPC does not complete in this amount of
  636. time, an exception indicating it has
  637. timed out will be raised. A value of 0
  638. indicates an infinite timeout, i.e. a timeout
  639. error will never be raised. If not provided,
  640. the default value set during initialization
  641. or with ``_set_rpc_timeout`` is used.
  642. Returns:
  643. Returns the result of running ``func`` with ``args`` and ``kwargs``.
  644. Example::
  645. Make sure that ``MASTER_ADDR`` and ``MASTER_PORT`` are set properly
  646. on both workers. Refer to :meth:`~torch.distributed.init_process_group`
  647. API for more details. For example,
  648. export MASTER_ADDR=localhost
  649. export MASTER_PORT=5678
  650. Then run the following code in two different processes:
  651. >>> # xdoctest: +SKIP
  652. >>> # On worker 0:
  653. >>> import torch
  654. >>> import torch.distributed.rpc as rpc
  655. >>> rpc.init_rpc("worker0", rank=0, world_size=2)
  656. >>> ret = rpc.rpc_sync("worker1", torch.add, args=(torch.ones(2), 3))
  657. >>> rpc.shutdown()
  658. >>> # On worker 1:
  659. >>> import torch.distributed.rpc as rpc
  660. >>> rpc.init_rpc("worker1", rank=1, world_size=2)
  661. >>> rpc.shutdown()
  662. Below is an example of running a TorchScript function using RPC.
  663. >>> # On both workers:
  664. >>> @torch.jit.script
  665. >>> def my_script_add(t1, t2):
  666. >>> return torch.add(t1, t2)
  667. >>> # On worker 0:
  668. >>> import torch.distributed.rpc as rpc
  669. >>> rpc.init_rpc("worker0", rank=0, world_size=2)
  670. >>> ret = rpc.rpc_sync("worker1", my_script_add, args=(torch.ones(2), 3))
  671. >>> rpc.shutdown()
  672. >>> # On worker 1:
  673. >>> import torch.distributed.rpc as rpc
  674. >>> rpc.init_rpc("worker1", rank=1, world_size=2)
  675. >>> rpc.shutdown()
  676. """
  677. torch._C._log_api_usage_once("torch.distributed.rpc_sync")
  678. fut = _invoke_rpc(to, func, RPCExecMode.SYNC, args, kwargs, timeout)
  679. return fut.wait()
  680. @_require_initialized
  681. def rpc_async(to, func, args=None, kwargs=None, timeout=UNSET_RPC_TIMEOUT):
  682. r"""
  683. Make a non-blocking RPC call to run function ``func`` on worker ``to``. RPC
  684. messages are sent and received in parallel to execution of Python code. This
  685. method is thread-safe. This method will immediately return a
  686. :class:`~torch.futures.Future` that can be awaited on.
  687. Args:
  688. to (str or WorkerInfo or int): name/rank/``WorkerInfo`` of the destination worker.
  689. func (Callable): a callable function, such as Python callables, builtin
  690. operators (e.g. :meth:`~torch.add`) and annotated
  691. TorchScript functions.
  692. args (tuple): the argument tuple for the ``func`` invocation.
  693. kwargs (dict): is a dictionary of keyword arguments for the ``func``
  694. invocation.
  695. timeout (float, optional): timeout in seconds to use for this RPC. If
  696. the RPC does not complete in this amount of
  697. time, an exception indicating it has
  698. timed out will be raised. A value of 0
  699. indicates an infinite timeout, i.e. a timeout
  700. error will never be raised. If not provided,
  701. the default value set during initialization
  702. or with ``_set_rpc_timeout`` is used.
  703. Returns:
  704. Returns a :class:`~torch.futures.Future` object that can be waited
  705. on. When completed, the return value of ``func`` on ``args`` and
  706. ``kwargs`` can be retrieved from the :class:`~torch.futures.Future`
  707. object.
  708. .. warning ::
  709. Using GPU tensors as arguments or return values of ``func`` is not
  710. supported since we don't support sending GPU tensors over the wire. You
  711. need to explicitly copy GPU tensors to CPU before using them as
  712. arguments or return values of ``func``.
  713. .. warning ::
  714. The ``rpc_async`` API does not copy storages of argument tensors until
  715. sending them over the wire, which could be done by a different thread
  716. depending on the RPC backend type. The caller should make sure that the
  717. contents of those tensors stay intact until the returned
  718. :class:`~torch.futures.Future` completes.
  719. Example::
  720. Make sure that ``MASTER_ADDR`` and ``MASTER_PORT`` are set properly
  721. on both workers. Refer to :meth:`~torch.distributed.init_process_group`
  722. API for more details. For example,
  723. export MASTER_ADDR=localhost
  724. export MASTER_PORT=5678
  725. Then run the following code in two different processes:
  726. >>> # xdoctest: +SKIP
  727. >>> # On worker 0:
  728. >>> import torch
  729. >>> import torch.distributed.rpc as rpc
  730. >>> rpc.init_rpc("worker0", rank=0, world_size=2)
  731. >>> fut1 = rpc.rpc_async("worker1", torch.add, args=(torch.ones(2), 3))
  732. >>> fut2 = rpc.rpc_async("worker1", min, args=(1, 2))
  733. >>> result = fut1.wait() + fut2.wait()
  734. >>> rpc.shutdown()
  735. >>> # On worker 1:
  736. >>> import torch.distributed.rpc as rpc
  737. >>> rpc.init_rpc("worker1", rank=1, world_size=2)
  738. >>> rpc.shutdown()
  739. Below is an example of running a TorchScript function using RPC.
  740. >>> # On both workers:
  741. >>> @torch.jit.script
  742. >>> def my_script_add(t1, t2):
  743. >>> return torch.add(t1, t2)
  744. >>> # On worker 0:
  745. >>> import torch.distributed.rpc as rpc
  746. >>> rpc.init_rpc("worker0", rank=0, world_size=2)
  747. >>> fut = rpc.rpc_async("worker1", my_script_add, args=(torch.ones(2), 3))
  748. >>> ret = fut.wait()
  749. >>> rpc.shutdown()
  750. >>> # On worker 1:
  751. >>> import torch.distributed.rpc as rpc
  752. >>> rpc.init_rpc("worker1", rank=1, world_size=2)
  753. >>> rpc.shutdown()
  754. """
  755. torch._C._log_api_usage_once("torch.distributed.rpc_async")
  756. fut = _invoke_rpc(to, func, RPCExecMode.ASYNC, args, kwargs, timeout)
  757. if hasattr(_thread_local_var, "future_list"):
  758. _thread_local_var.future_list.append(fut)
  759. return fut
  760. def _get_should_profile():
  761. # Legacy profiler should be enabled. RPC profiling is not supported with
  762. # Kineto profiler.
  763. ActiveProfilerType = torch._C._profiler.ActiveProfilerType
  764. return (
  765. torch.autograd._profiler_enabled() and
  766. torch._C._autograd._profiler_type() == ActiveProfilerType.LEGACY # type: ignore[attr-defined]
  767. )
  768. def _enable_rpc_profiler(should_profile, qualified_name, func, rpc_type, dst_worker_info):
  769. ctx_manager = contextlib.suppress()
  770. if should_profile:
  771. # Create appropriate string representation based on type of func
  772. # (builtin, script, python)
  773. if qualified_name is None:
  774. func_name = (
  775. torch._jit_internal._qualified_name(func)
  776. if isinstance(func, torch.jit.ScriptFunction)
  777. else func.__qualname__
  778. )
  779. else:
  780. func_name = qualified_name
  781. # Build RPC profiling key.
  782. rpc_profiling_key = _build_rpc_profiling_key(
  783. rpc_type,
  784. func_name,
  785. get_worker_info().name,
  786. dst_worker_info.name,
  787. )
  788. RemoteProfilerManager.set_current_profiling_key(rpc_profiling_key)
  789. # Mypy doesn't support re-def of a variable not in the same block (#1174)
  790. ctx_manager = torch.autograd.profiler.record_function(rpc_profiling_key) # type: ignore[assignment]
  791. return ctx_manager