closure.py 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134
  1. import os
  2. import threading
  3. from queue import Empty as EmptyQueue, Queue
  4. from torch._lazy.device_context import get_device_context
  5. class ClosureHandler:
  6. def __init__(self):
  7. pass
  8. def run(self, closure):
  9. """Run closure function
  10. Args:
  11. closure: callable function to run
  12. """
  13. closure()
  14. def __call__(self, closures):
  15. for closure in closures:
  16. self.run(closure)
  17. class AsyncClosureHandler(ClosureHandler):
  18. """Handler for Asynchronous Step Closures
  19. Args:
  20. max_queue_size: The maximum length of the closure queue after which
  21. the training loop will block until closures are evaluated.
  22. By default, a reasonable limit of a maximum of 100 on the queue.
  23. This value can be set using the `XLA_MAX_ASYNC_QUEUE` environment
  24. variable.
  25. """
  26. def __init__(self, max_queue_size=100):
  27. super().__init__()
  28. self._closure_queue: Queue = Queue(
  29. int(os.environ.get("LTC_MAX_ASYNC_QUEUE", max_queue_size))
  30. )
  31. self._closure_exception: Queue = Queue()
  32. self._closure_lock = threading.Lock()
  33. self._closure_event_loop_finished = threading.Event()
  34. self._closure_event_loop = None
  35. def start_event_loop(self):
  36. """Start closure event loop if not started"""
  37. if self._closure_event_loop is None:
  38. def event_loop():
  39. # Run loop until closure event is set and closure queue is empty
  40. while True:
  41. try:
  42. closure = self._closure_queue.get(block=True, timeout=3)
  43. closure()
  44. self._closure_queue.task_done()
  45. except EmptyQueue:
  46. with self._closure_lock:
  47. if self._closure_queue.empty():
  48. self._closure_event_loop_finished.set()
  49. return
  50. except Exception as e:
  51. self._closure_exception.put(e)
  52. return
  53. self._closure_event_loop = threading.Thread(target=event_loop)
  54. self._closure_event_loop.start()
  55. def run(self, closure):
  56. with self._closure_lock:
  57. self._closure_queue.put(closure, block=True)
  58. if (
  59. self._closure_event_loop is None
  60. or not self._closure_event_loop.is_alive()
  61. ):
  62. try:
  63. e = self._closure_exception.get(block=False)
  64. raise RuntimeError(
  65. "Cannot run asynchronous closure due to previously raised exception"
  66. ) from e
  67. except EmptyQueue:
  68. self._closure_event_loop = None
  69. self.start_event_loop()
  70. def add_step_closure(closure, args=(), run_async=False):
  71. """Adds a closure to the list of the ones to be run at the end of the step.
  72. Many times during model training there is the need to print/report (print to
  73. console, post to tensorboard, etc...) information which require the content of
  74. intermediary tensors to be inspected.
  75. Inspecting different tensors content in different points of the model code
  76. requires many executions and typically causes performance issues.
  77. Adding a step closure will ensure that it will be run after the barrier, when
  78. all the live tensors will be already materialized to device data.
  79. Live tensors which will include the ones captured by the closure arguments.
  80. So using `add_step_closure()` will ensure a single execution will be
  81. performed, even when multiple closures are queued, requiring multiple tensors
  82. to be inspected.
  83. Step closures will be run sequentially in the order they have been queued.
  84. Note that even though using this API the execution will be optimized, it is
  85. advised to throttle the printing/reporting events once every N steps.
  86. Args:
  87. closure (callable): The function to be called.
  88. args (tuple): The arguments to be passed to the closure.
  89. run_async: If True, run the closure asynchronously.
  90. """
  91. devctx = get_device_context()
  92. closures_type = "async_step_closures" if run_async else "step_closures"
  93. step_closures = getattr(devctx, closures_type, None)
  94. if step_closures is None:
  95. step_closures = []
  96. setattr(devctx, closures_type, step_closures)
  97. step_closures.append(lambda a=args: closure(*a))
  98. def run_step_closures():
  99. devctx = get_device_context()
  100. async_step_closures = getattr(devctx, "async_step_closures", None)
  101. if async_step_closures is not None:
  102. devctx.async_step_closures = []
  103. async_closure_handler = getattr(devctx, "async_closure_handler", None)
  104. if async_closure_handler is None:
  105. async_closure_handler = AsyncClosureHandler()
  106. devctx.async_closure_handler = async_closure_handler
  107. async_closure_handler(async_step_closures)
  108. step_closures = getattr(devctx, "step_closures", None)
  109. if step_closures is not None:
  110. devctx.step_closures = []
  111. closure_handler = getattr(devctx, "closure_handler", None)
  112. if closure_handler is None:
  113. closure_handler = ClosureHandler()
  114. devctx.closure_handler = closure_handler
  115. closure_handler(step_closures)
  116. return devctx