internal.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288
  1. import collections
  2. import copyreg
  3. import io
  4. import pickle
  5. import sys
  6. import threading
  7. import traceback
  8. from enum import Enum
  9. import torch
  10. import torch.distributed as dist
  11. from torch._C._distributed_rpc import _get_current_rpc_agent
  12. __all__ = ["RPCExecMode", "serialize", "deserialize", "PythonUDF", "RemoteException"]
  13. # Thread local tensor tables to store tensors while pickling torch.Tensor
  14. # objects
  15. _thread_local_tensor_tables = threading.local()
  16. _pickler = pickle.Pickler
  17. _unpickler = pickle.Unpickler
  18. class RPCExecMode(Enum):
  19. SYNC = "sync"
  20. ASYNC = "async"
  21. ASYNC_JIT = "async_jit"
  22. REMOTE = "remote"
  23. class _InternalRPCPickler:
  24. r"""
  25. This class provides serialize() and deserialize() interfaces to serialize
  26. data to be "binary string + tensor table" format
  27. So for RPC python UDF function and args, non tensor data will be serialized
  28. into regular binary string, tensor data will be put into thread local tensor
  29. tables, this serialization format is consistent with builtin operator and args
  30. using JIT pickler. This format will make tensor handling in C++ much easier,
  31. e.g. attach tensor to distributed autograd graph in C++
  32. """
  33. def __init__(self):
  34. # Ignore type error because dispatch_table is defined in third-party package
  35. self._dispatch_table = copyreg.dispatch_table.copy() # type: ignore[attr-defined]
  36. self._dispatch_table[torch.Tensor] = self._tensor_reducer
  37. # Used for registering customized picklers.
  38. self._class_reducer_dict = {}
  39. def _register_reducer(self, obj_class, reducer):
  40. # For the same class, only register the reducer once.
  41. if obj_class not in self._class_reducer_dict:
  42. self._class_reducer_dict[obj_class] = reducer
  43. @classmethod
  44. def _tensor_receiver(cls, tensor_index):
  45. global _thread_local_tensor_tables
  46. return _thread_local_tensor_tables.recv_tables[tensor_index]
  47. def _tensor_reducer(self, tensor):
  48. global _thread_local_tensor_tables
  49. _thread_local_tensor_tables.send_tables.append(tensor)
  50. tensor_index = len(_thread_local_tensor_tables.send_tables) - 1
  51. return (_InternalRPCPickler._tensor_receiver, (tensor_index,))
  52. @classmethod
  53. def _py_rref_receiver(cls, rref_fork_data):
  54. return dist.rpc.PyRRef._deserialize(rref_fork_data)
  55. def _py_rref_reducer(self, py_rref):
  56. rref_fork_data = py_rref._serialize()
  57. return (_InternalRPCPickler._py_rref_receiver, (rref_fork_data,))
  58. def _rref_reducer(self, rref):
  59. return self._py_rref_reducer(rref)
  60. @classmethod
  61. def _script_module_receiver(cls, script_module_serialized):
  62. """
  63. Given a serialized representation of a ScriptModule created with torch.jit.save,
  64. loads and returns the ScriptModule.
  65. """
  66. f = io.BytesIO(script_module_serialized)
  67. m = torch.jit.load(f)
  68. return m
  69. def _script_module_reducer(self, script_module):
  70. """
  71. Serializes a ScriptModule.
  72. """
  73. f = io.BytesIO()
  74. torch.jit.save(script_module, f)
  75. return (_InternalRPCPickler._script_module_receiver, (f.getvalue(),))
  76. def serialize(self, obj):
  77. r"""
  78. Serialize non tensor data into binary string, tensor data into
  79. tensor table
  80. """
  81. f = io.BytesIO()
  82. p = _pickler(f)
  83. p.dispatch_table = self._dispatch_table
  84. # rpc api could accept user picklers inheriting from _InternalRPCPickler to serialize rref,
  85. # user picklers could have different initialization function from _InternalRPCPickler,
  86. # but all the user picklers should call serialize() and use _rref_reducer to pickle rref
  87. # in python. also, when _internal_rpc_pickler is imported to rpc/api.py, rpc.RRef is not
  88. # compiled yet, it is not good place to acces rpc.RRef inside _InternalRPCPickler constructor,
  89. # so puting rref's dispatch table here
  90. #
  91. # The return value of a `rpc.remote(..)` call is type of `rpc.PyRRef`.
  92. # The deserialized RRef object on an RPC receiver side is type of `rpc.PyRRef`.
  93. # Ignore type error because dispatch_table is defined in third-party package
  94. p.dispatch_table[dist.rpc.PyRRef] = self._py_rref_reducer # type: ignore[index]
  95. # An RRef created locally by RRef Python constructor is type of `rpc.RRef`.
  96. # Ignore type error because dispatch_table is defined in third-party package
  97. p.dispatch_table[dist.rpc.RRef] = self._rref_reducer # type: ignore[index]
  98. # Add dispatch pickling for ScriptModule or its subclass.
  99. if isinstance(obj, torch.jit.ScriptModule):
  100. # Ignore type error because dispatch_table is defined in third-party package
  101. p.dispatch_table[obj.__class__] = self._script_module_reducer # type: ignore[index]
  102. # Install customized picklers.
  103. for class_name in self._class_reducer_dict.keys():
  104. p.dispatch_table[class_name] = self._class_reducer_dict[class_name] # type: ignore[index]
  105. # save _thread_local_tensor_tables.send_tables if it is in nested call
  106. global _thread_local_tensor_tables
  107. if hasattr(_thread_local_tensor_tables, "send_tables"):
  108. old_send_tables = _thread_local_tensor_tables.send_tables
  109. else:
  110. old_send_tables = None
  111. _thread_local_tensor_tables.send_tables = []
  112. p.dump(obj)
  113. # restore _thread_local_tensor_tables.send_tables if return
  114. # from nested call, otherwise clean up the table
  115. tensors = _thread_local_tensor_tables.send_tables
  116. if old_send_tables is not None:
  117. _thread_local_tensor_tables.send_tables = old_send_tables
  118. else:
  119. del _thread_local_tensor_tables.send_tables
  120. return (f.getvalue(), tensors)
  121. def deserialize(self, binary_data, tensor_table):
  122. r"""
  123. Deserialize binary string + tensor table to original obj
  124. """
  125. # save _thread_local_tensor_tables.recv_tables if it is in nested call
  126. global _thread_local_tensor_tables
  127. if hasattr(_thread_local_tensor_tables, "recv_tables"):
  128. old_recv_tables = _thread_local_tensor_tables.recv_tables
  129. else:
  130. old_recv_tables = None
  131. _thread_local_tensor_tables.recv_tables = tensor_table
  132. try:
  133. unpickler = _unpickler(io.BytesIO(binary_data))
  134. ret = unpickler.load()
  135. except AttributeError as e:
  136. # Occurs when function is not found on module/class during
  137. # unpickling.
  138. except_str = (
  139. str(e)
  140. + """ Default RPC pickler does not serialize
  141. function code. Ensure that UDFs are defined on both caller and
  142. callee modules."""
  143. )
  144. ret = AttributeError(except_str)
  145. # Ensure the stack trace gets preserved
  146. ret.__cause__ = e
  147. # restore _thread_local_tensor_tables.recv_tables if return
  148. # from nested call, otherwise clean up the table
  149. if old_recv_tables is not None:
  150. _thread_local_tensor_tables.recv_tables = old_recv_tables
  151. else:
  152. del _thread_local_tensor_tables.recv_tables
  153. return ret
  154. # Create _internal_rpc_pickler only once to initialize _dispatch_table only once
  155. _internal_rpc_pickler = _InternalRPCPickler()
  156. def serialize(obj):
  157. return _internal_rpc_pickler.serialize(obj)
  158. def deserialize(binary_data, tensor_table):
  159. return _internal_rpc_pickler.deserialize(binary_data, tensor_table)
  160. def _run_function(python_udf):
  161. r"""
  162. This function is exclusively called from C++.
  163. See ``torch/csrc/distributed/rpc/python_rpc_handler.cpp``.
  164. Runs a Python UDF and returns its return value.
  165. Wraps any exception in ``RemoteException`` if the function raises.
  166. """
  167. try:
  168. if isinstance(python_udf, AttributeError):
  169. raise python_udf
  170. result = python_udf.func(*python_udf.args, **python_udf.kwargs)
  171. except Exception as e:
  172. # except str = exception info + traceback string
  173. except_str = (
  174. f"On {_get_current_rpc_agent().get_worker_info()}:\n"
  175. f"{repr(e)}\n{traceback.format_exc()}"
  176. )
  177. print(except_str, file=sys.stderr)
  178. result = RemoteException(except_str, type(e))
  179. return result
  180. def _handle_exception(result):
  181. if isinstance(result, RemoteException):
  182. exception_msg = result.msg.encode("utf-8").decode("unicode_escape")
  183. # We wrap exception re-creation here in case some exception classes
  184. # cannot be constructed directly from a string.
  185. exc = None
  186. try:
  187. exc = result.exception_type(exception_msg)
  188. except BaseException as e:
  189. raise RuntimeError( # noqa: B904
  190. f"Failed to create original exception type. Error msg was {str(e)}"
  191. f" Original exception on remote side was {exception_msg}"
  192. ) from e
  193. if exc is not None:
  194. raise exc
  195. def _build_rpc_profiling_key(
  196. exec_type, func_name, current_worker_name, dst_worker_name
  197. ):
  198. """
  199. Builds the key that RPC calls are profiled with using the autograd profiler.
  200. This will be the name of the corresponding Event recorded in the profiler.
  201. Args:
  202. exec_type (RPCExecMode): Type of RPC/RRef call
  203. func_name (str): Name of function being profiled.
  204. current_worker_name (str): Name of current worker.
  205. dst_worker_name (str): Name of the destination worker.
  206. Returns:
  207. String representing profiling key
  208. """
  209. profile_key = "rpc_{rpc_type}#{func_name}({current_worker} -> {dst_worker})".format(
  210. rpc_type=exec_type.value,
  211. func_name=func_name,
  212. current_worker=current_worker_name,
  213. dst_worker=dst_worker_name,
  214. )
  215. return profile_key
  216. def _start_record_function(exec_type, func_name, current_worker_name, dest_worker_name):
  217. """
  218. This function should be called from RPC/RRef functions to create a
  219. RecordFunction object for profiling. This function also runs the before
  220. callbacks that start the profiling, though the user is responsible for
  221. running the appropriate callbacks when the function to be profiled finishes.
  222. Args:
  223. exec_type (RPCExecMode): Type of RPC/RRef call
  224. func_name (str): Name of function being profiled.
  225. current_worker_name (str): Name of current worker.
  226. dest_worker_name (str): Name of the destination worker.
  227. Returns:
  228. An instance of `torch.autograd._RecordFunction`.
  229. """
  230. assert torch.autograd._profiler_enabled(), "Autograd profiler should be enabled."
  231. profile_key = "rpc_{}#{}({} -> {})".format(
  232. exec_type.value, str(func_name), current_worker_name, dest_worker_name
  233. )
  234. rf = torch.autograd._RecordFunction() # type: ignore[attr-defined]
  235. torch.autograd._run_before_callbacks(rf, profile_key) # type: ignore[attr-defined]
  236. return rf
  237. PythonUDF = collections.namedtuple("PythonUDF", ["func", "args", "kwargs"])
  238. RemoteException = collections.namedtuple("RemoteException", ["msg", "exception_type"])