server_process_global_profiler.py 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. #!/usr/bin/python3
  2. import itertools
  3. import torch
  4. from torch.autograd.profiler_legacy import profile
  5. from typing import List
  6. from . import (
  7. _disable_server_process_global_profiler,
  8. _enable_server_process_global_profiler,
  9. )
  10. __all__: List[str] = []
  11. class _server_process_global_profile(profile):
  12. """
  13. It has the same API as ``torch.autograd.profiler.profile`` class,
  14. except that it enables profiling on all threads running RPC server request callbacks.
  15. Context manager that manages autograd profiler state and holds a summary of results.
  16. Under the hood it just records events of functions being executed in C++ and
  17. exposes those events to Python. You can wrap any code into it and it will
  18. only report runtime of PyTorch functions.
  19. Note: profiler is thread local and is automatically propagated into the async tasks
  20. Args:
  21. enabled (bool, optional): Setting this to False makes this context manager a no-op.
  22. Default: ``True``.
  23. use_cuda (bool, optional): Enables timing of CUDA events as well using the cudaEvent API.
  24. Adds approximately 4us of overhead to each tensor operation.
  25. Default: ``False``
  26. record_shapes (bool, optional): If shapes recording is set, information
  27. about input dimensions will be collected. This allows one to see which
  28. dimensions have been used under the hood and further group by them
  29. using prof.key_averages(group_by_input_shape=True). Please note that
  30. shape recording might skew your profiling data. It is recommended to
  31. use separate runs with and without shape recording to validate the timing.
  32. Most likely the skew will be negligible for bottom most events (in a case
  33. of nested function calls). But for higher level functions the total
  34. self cpu time might be artificially increased because of the shape
  35. collection.
  36. profile_memory (bool, optional): Whether to report memory usage, default: ``False``
  37. .. warning:
  38. Enabling memory profiling incurs additional profiler overhead
  39. .. warning:
  40. Due to some CUDA multiprocessing limitations (multiprocessing-cuda-note_),
  41. one cannot use the profiler with ``use_cuda = True`` to benchmark
  42. DataLoaders with ``num_workers > 0``. If you wish to benchmark data loading,
  43. please use ``use_cuda = False`` or ``num_workers = 0``.
  44. Example:
  45. >>> # xdoctest: +SKIP
  46. >>> # On worker 0:
  47. >>> import torch
  48. >>> import torch.distributed.rpc as rpc
  49. >>> rpc.init_rpc("worker0", rank=0, world_size=2)
  50. >>> x, y = torch.tensor(1), torch.tensor(2)
  51. >>> outer_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile)
  52. >>> outer_profile_rref.rpc_sync().__enter__()
  53. >>> rpc.rpc_sync(dst_worker_name, torch.add, (x, y))
  54. >>> inner_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile)
  55. >>> inner_profile_rref.rpc_sync().__enter__()
  56. >>> rpc.rpc_sync(dst_worker_name, torch.sub, (x, y))
  57. >>> inner_profile_rref.rpc_sync().__exit__(None, None, None)
  58. >>> outer_profile_rref.rpc_sync().__exit__(None, None, None)
  59. >>> print(inner_profile_rref.rpc_sync().key_averages())
  60. --------- --------------- --------------- --------------- --------------- --------------- ---------------
  61. Name Self CPU total % Self CPU total CPU total % CPU total CPU time avg Number of Calls
  62. --------- --------------- --------------- --------------- --------------- --------------- ---------------
  63. sub 85.06% 76.275us 100.00% 89.667us 89.667us 1
  64. empty 14.94% 13.392us 14.94% 13.392us 13.392us 1
  65. --------- --------------- --------------- --------------- --------------- --------------- ---------------
  66. Self CPU time total: 89.667us
  67. >>> print(outer_profile_rref.rpc_sync().key_averages())
  68. --------- --------------- --------------- --------------- --------------- --------------- ---------------
  69. Name Self CPU total % Self CPU total CPU total % CPU total CPU time avg Number of Calls
  70. --------- --------------- --------------- --------------- --------------- --------------- ---------------
  71. sub 35.65% 76.275us 41.91% 89.667us 89.667us 1
  72. empty 12.67% 27.101us 12.67% 27.101us 13.551us 2
  73. add 51.68% 110.550us 58.09% 124.259us 124.259us 1
  74. --------- --------------- --------------- --------------- --------------- --------------- ---------------
  75. Self CPU time total: 213.926us
  76. >>> rpc.shutdown()
  77. >>> # On worker 1:
  78. >>> import torch.distributed.rpc as rpc
  79. >>> rpc.init_rpc("worker1", rank=1, world_size=2)
  80. >>> # wait for worker 0 to finish work, and then shutdown.
  81. >>> rpc.shutdown()
  82. """
  83. def __init__(self, *args, **kwargs):
  84. super().__init__(*args, **kwargs)
  85. def __enter__(self):
  86. """
  87. Turn on server-side process-global profiling.
  88. This enables thread-local profiler on all RPC threads running server-side request callbacks.
  89. """
  90. if not self.enabled:
  91. return
  92. if self.entered: # type: ignore[has-type]
  93. raise RuntimeError("autograd profiler traces are not reentrant")
  94. self.entered = True
  95. profiler_kind = (
  96. torch.autograd.ProfilerState.CUDA
  97. if self.use_cuda
  98. else torch.autograd.ProfilerState.CPU
  99. )
  100. profiler_config = torch.autograd.ProfilerConfig(
  101. profiler_kind,
  102. self.record_shapes,
  103. self.profile_memory,
  104. False,
  105. False,
  106. False,
  107. torch.profiler._ExperimentalConfig())
  108. _enable_server_process_global_profiler(profiler_config)
  109. return self
  110. def __exit__(self, exc_type, exc_val, exc_tb):
  111. """
  112. Turn off server-side process-global profiling.
  113. Aggregate all profiling events recorded by RPC threads.
  114. These attributes are assigned on exiting context.
  115. Attributes:
  116. function_events (torch.autograd.profiler.EventList). It's a list that has helper
  117. methods, like 1) show record items in a pretty-print table.
  118. 2) do averaging by grouping on keys. 3) and more.
  119. process_global_function_events (List[torch.autograd.profiler.FunctionEvent]).
  120. It's a list of ``FunctionEvent`` elements. Every element is a profiling result
  121. of an RPC request handling within the profiling range.
  122. """
  123. if not self.enabled:
  124. return
  125. process_global_events = _disable_server_process_global_profiler()
  126. # Every element in this list is a thread profiling result from an RPC request handling.
  127. process_global_function_events = []
  128. for thread_local_events in process_global_events:
  129. # Parse from ``Event``s to ``FunctionEvent``s.
  130. thread_local_function_events = torch.autograd.profiler_legacy._parse_legacy_records(
  131. thread_local_events
  132. )
  133. thread_local_function_events.sort(
  134. key=lambda function_event: [
  135. function_event.time_range.start,
  136. -(function_event.time_range.end),
  137. ]
  138. )
  139. process_global_function_events.append(thread_local_function_events)
  140. flattened_function_events = list(
  141. itertools.chain(*process_global_function_events)
  142. )
  143. self.function_events = torch.autograd.profiler_util.EventList(
  144. flattened_function_events,
  145. use_cuda=self.use_cuda,
  146. profile_memory=self.profile_memory,
  147. )
  148. self.function_events._build_tree()
  149. self.process_global_function_events = process_global_function_events
  150. return False