rendezvous.py 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255
  1. try:
  2. from urllib.parse import urlparse, urlunparse
  3. except ImportError as e:
  4. raise ImportError(
  5. "urllib cannot be found, urlparse from python2 is no longer supported."
  6. ) from e
  7. import numbers
  8. import os
  9. import sys
  10. from datetime import timedelta
  11. from typing import Dict, Optional
  12. from torch.distributed import FileStore, PrefixStore, Store, TCPStore
  13. from .constants import default_pg_timeout
  14. _rendezvous_handlers = {}
  15. def register_rendezvous_handler(scheme, handler):
  16. """Registers a new rendezvous handler.
  17. Before we can run collective algorithms, participating processes
  18. need to find each other and exchange information to be able to
  19. communicate. We call this process rendezvous.
  20. The outcome of the rendezvous process is a triplet containing a
  21. shared key/value store, the rank of the process, and the total
  22. number of participating processes.
  23. If none of the bundled rendezvous methods apply to your execution
  24. environment you can opt to register your own rendezvous handler.
  25. Pick a unique name and use the URL scheme to identify it when
  26. calling the `rendezvous()` function.
  27. Args:
  28. scheme (str): URL scheme to identify your rendezvous handler.
  29. handler (function): Handler that is invoked when the
  30. `rendezvous()` function is called with a URL that uses
  31. the corresponding scheme. It must be a generator function
  32. that yields the triplet.
  33. """
  34. global _rendezvous_handlers
  35. if scheme in _rendezvous_handlers:
  36. raise RuntimeError(
  37. "Rendezvous handler for {}:// already registered".format(scheme)
  38. )
  39. _rendezvous_handlers[scheme] = handler
  40. # Query will have format "rank=0&world_size=1" and is
  41. # converted into {"rank": 0, "world_size": 1}
  42. def _query_to_dict(query: str) -> Dict[str, str]:
  43. return {pair[0]: pair[1] for pair in (pair.split("=") for pair in filter(None, query.split("&")))}
  44. def _rendezvous_helper(url: str, rank: int, world_size_opt: Optional[int], **kwargs):
  45. result = urlparse(url)
  46. if world_size_opt is None:
  47. world_size = -1
  48. if result.scheme == "env":
  49. rank = int(os.environ.get("RANK", rank))
  50. # If the world_size env variable is not present then it is a dynamic group
  51. world_size = int(os.environ.get("WORLD_SIZE", world_size))
  52. else:
  53. world_size = world_size_opt
  54. if rank != -1 or world_size != -1 or world_size_opt is None:
  55. query_dict = _query_to_dict(result.query)
  56. assert (
  57. "rank" not in query_dict and "world_size" not in query_dict
  58. ), "The url: {url} has node-specific arguments(rank, world_size) already.".format(
  59. url=url
  60. )
  61. if rank != -1:
  62. query_dict["rank"] = str(rank)
  63. if world_size != -1 or world_size_opt is None:
  64. query_dict["world_size"] = str(world_size)
  65. result = result._replace(
  66. query="{}".format(
  67. "&".join(["{}={}".format(k, v) for k, v in query_dict.items()])
  68. )
  69. )
  70. url = urlunparse(result)
  71. if result.scheme not in _rendezvous_handlers:
  72. raise RuntimeError("No rendezvous handler for {}://".format(result.scheme))
  73. return _rendezvous_handlers[result.scheme](url, **kwargs)
  74. def rendezvous(url: str, rank: int = -1, world_size: int = -1, **kwargs):
  75. if not isinstance(url, str):
  76. raise RuntimeError("`url` must be a string. {}: {}".format(type(url), url))
  77. if not isinstance(rank, numbers.Integral):
  78. raise RuntimeError("`rank` must be an integer. {}".format(rank))
  79. if not isinstance(world_size, numbers.Integral):
  80. raise RuntimeError("`world_size` must be an integer. {}".format(world_size))
  81. return _rendezvous_helper(url, rank, world_size, **kwargs)
  82. def _create_store_from_options(backend_options, rank):
  83. store, _, _ = next(_rendezvous_helper(backend_options.init_method, rank, None))
  84. return store
  85. def _rendezvous_error(msg):
  86. return ValueError("Error initializing torch.distributed using " + msg)
  87. def _file_rendezvous_handler(url: str, **kwargs):
  88. def _error(msg):
  89. return _rendezvous_error("file:// rendezvous: " + msg)
  90. result = urlparse(url)
  91. path = result.path
  92. if sys.platform == "win32":
  93. import urllib.request
  94. full_path = result.netloc + result.path
  95. path = urllib.request.url2pathname(full_path)
  96. if path:
  97. # Normalizing an empty string produces ".", which is not expected.
  98. path = os.path.normpath(path)
  99. if not path:
  100. raise _error("path missing")
  101. query_dict = _query_to_dict(result.query)
  102. if "rank" not in query_dict:
  103. raise _error("rank parameter missing")
  104. if "world_size" not in query_dict:
  105. raise _error("world size parameter missing")
  106. rank = int(query_dict["rank"])
  107. world_size = int(query_dict["world_size"])
  108. store = FileStore(path, world_size)
  109. yield (store, rank, world_size)
  110. # If this configuration is invalidated, there is nothing we can do about it
  111. raise RuntimeError("Unable to perform rerendezvous using file:// method")
  112. def _torchelastic_use_agent_store() -> bool:
  113. return os.environ.get("TORCHELASTIC_USE_AGENT_STORE", None) == str(True)
  114. def _create_c10d_store(hostname, port, rank, world_size, timeout) -> Store:
  115. """
  116. Smartly creates a c10d Store object on ``rank`` based on whether
  117. we need to re-use agent store. The TCPStore server is assumed to be hosted
  118. on ``hostname:port``.
  119. If ``torchelastic_use_agent_store()`` is ``True``, then it is assumed that
  120. the agent leader (node rank 0) hosts the TCPStore server (for which the
  121. endpoint is specified by the given ``hostname:port``). Hence
  122. ALL ranks will create and return a TCPStore client (e.g. ``start_daemon=False``).
  123. If ``torchelastic_use_agent_store()`` is ``False``, then rank 0 will host
  124. the TCPStore (with multi-tenancy) and it is assumed that rank 0's hostname
  125. and port are correctly passed via ``hostname`` and ``port``. All
  126. non-zero ranks will create and return a TCPStore client.
  127. """
  128. # check if port is uint16_t
  129. if not 0 <= port < 2**16:
  130. raise ValueError(f"port must have value from 0 to 65535 but was {port}.")
  131. if _torchelastic_use_agent_store():
  132. attempt = os.environ["TORCHELASTIC_RESTART_COUNT"]
  133. tcp_store = TCPStore(hostname, port, world_size, False, timeout)
  134. return PrefixStore(f"/worker/attempt_{attempt}", tcp_store)
  135. else:
  136. start_daemon = rank == 0
  137. return TCPStore(
  138. hostname, port, world_size, start_daemon, timeout, multi_tenant=True
  139. )
  140. def _tcp_rendezvous_handler(
  141. url: str, timeout: timedelta = default_pg_timeout, **kwargs
  142. ):
  143. def _error(msg):
  144. return _rendezvous_error("tcp:// rendezvous: " + msg)
  145. result = urlparse(url)
  146. if not result.port:
  147. raise _error("port number missing")
  148. query_dict = _query_to_dict(result.query)
  149. if "rank" not in query_dict:
  150. raise _error("rank parameter missing")
  151. if "world_size" not in query_dict:
  152. raise _error("world size parameter missing")
  153. rank = int(query_dict["rank"])
  154. world_size = int(query_dict["world_size"])
  155. assert result.hostname is not None
  156. store = _create_c10d_store(result.hostname, result.port, rank, world_size, timeout)
  157. yield (store, rank, world_size)
  158. # If this configuration is invalidated, there is nothing we can do about it
  159. raise RuntimeError("Unable to perform re-rendezvous using tcp:// method")
  160. def _env_rendezvous_handler(
  161. url: str, timeout: timedelta = default_pg_timeout, **kwargs
  162. ):
  163. def _error(msg):
  164. return _rendezvous_error("env:// rendezvous: " + msg)
  165. def _env_error(var):
  166. return _error("environment variable %s expected, but not set" % var)
  167. def _get_env_or_raise(env_var: str) -> str:
  168. env_val = os.environ.get(env_var, None)
  169. if not env_val:
  170. raise _env_error(env_var)
  171. else:
  172. return env_val
  173. result = urlparse(url)
  174. query_dict = _query_to_dict(result.query)
  175. rank: int
  176. world_size: int
  177. master_port: int
  178. master_addr: str
  179. if "rank" in query_dict:
  180. rank = int(query_dict["rank"])
  181. else:
  182. rank = int(_get_env_or_raise("RANK"))
  183. if "world_size" in query_dict:
  184. world_size = int(query_dict["world_size"])
  185. else:
  186. world_size = int(_get_env_or_raise("WORLD_SIZE"))
  187. master_addr = _get_env_or_raise("MASTER_ADDR")
  188. master_port = int(_get_env_or_raise("MASTER_PORT"))
  189. store = _create_c10d_store(master_addr, master_port, rank, world_size, timeout)
  190. yield (store, rank, world_size)
  191. # If this configuration is invalidated, there is nothing we can do about it
  192. raise RuntimeError("Unable to perform re-rendezvous using env:// method")
  193. register_rendezvous_handler("tcp", _tcp_rendezvous_handler)
  194. register_rendezvous_handler("env", _env_rendezvous_handler)
  195. register_rendezvous_handler("file", _file_rendezvous_handler)