_fuser.py 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160
  1. import contextlib
  2. import torch
  3. from typing import List, Tuple
  4. @contextlib.contextmanager
  5. def optimized_execution(should_optimize):
  6. """
  7. A context manager that controls whether the JIT's executor will run
  8. optimizations before executing a function.
  9. """
  10. stored_flag = torch._C._get_graph_executor_optimize()
  11. torch._C._set_graph_executor_optimize(should_optimize)
  12. try:
  13. yield
  14. finally:
  15. torch._C._set_graph_executor_optimize(stored_flag)
  16. @contextlib.contextmanager
  17. def fuser(name):
  18. """
  19. A context manager that facilitates switching between
  20. backend fusers.
  21. Valid names:
  22. * ``fuser0`` - enables only legacy fuser
  23. * ``fuser1`` - enables only NNC
  24. * ``fuser2`` - enables only nvFuser
  25. * ``fuser3`` - enables oneDNN Graph
  26. """
  27. old_cpu_fuse = torch._C._jit_can_fuse_on_cpu()
  28. old_gpu_fuse = torch._C._jit_can_fuse_on_gpu()
  29. old_texpr_fuser_state = torch._C._jit_texpr_fuser_enabled()
  30. old_nvfuser_state = torch._C._jit_nvfuser_enabled()
  31. old_llga_state = torch._C._jit_llga_enabled()
  32. if name == 'fuser0': # legacy fuser
  33. torch._C._jit_override_can_fuse_on_cpu(True)
  34. torch._C._jit_override_can_fuse_on_gpu(True)
  35. torch._C._jit_set_texpr_fuser_enabled(False)
  36. torch._C._jit_set_nvfuser_enabled(False)
  37. torch._C._jit_set_llga_enabled(False)
  38. elif name == 'fuser1': # NNC
  39. old_profiling_executor = torch._C._jit_set_profiling_executor(True)
  40. old_profiling_mode = torch._C._get_graph_executor_optimize(True)
  41. torch._C._jit_override_can_fuse_on_cpu(True)
  42. torch._C._jit_override_can_fuse_on_gpu(True)
  43. torch._C._jit_set_texpr_fuser_enabled(True)
  44. torch._C._jit_set_nvfuser_enabled(False)
  45. torch._C._jit_set_llga_enabled(False)
  46. elif name == 'fuser2': # nvFuser
  47. torch._C._jit_override_can_fuse_on_cpu(False)
  48. torch._C._jit_override_can_fuse_on_gpu(False)
  49. torch._C._jit_set_texpr_fuser_enabled(False)
  50. torch._C._jit_set_nvfuser_enabled(True)
  51. torch._C._jit_set_llga_enabled(False)
  52. elif name == 'fuser3': # oneDNN Graph
  53. old_profiling_executor = torch._C._jit_set_profiling_executor(True)
  54. old_profiling_mode = torch._C._get_graph_executor_optimize(True)
  55. torch._C._jit_override_can_fuse_on_cpu(True)
  56. torch._C._jit_override_can_fuse_on_gpu(False)
  57. torch._C._jit_set_texpr_fuser_enabled(True)
  58. torch._C._jit_set_nvfuser_enabled(False)
  59. torch._C._jit_set_llga_enabled(True)
  60. elif name == 'none': # Turn Pytorch fuser off
  61. torch._C._jit_override_can_fuse_on_cpu(False)
  62. torch._C._jit_override_can_fuse_on_gpu(False)
  63. torch._C._jit_set_texpr_fuser_enabled(False)
  64. torch._C._jit_set_nvfuser_enabled(False)
  65. torch._C._jit_set_llga_enabled(False)
  66. else:
  67. raise Exception(f"unrecognized fuser option (name: {name})")
  68. try:
  69. yield
  70. finally:
  71. if name in ['fuser1', 'fuser3']: # NNC or oneDNN Graph
  72. torch._C._jit_set_profiling_executor(old_profiling_executor)
  73. torch._C._get_graph_executor_optimize(old_profiling_mode)
  74. # recover the previous values
  75. torch._C._jit_override_can_fuse_on_cpu(old_cpu_fuse)
  76. torch._C._jit_override_can_fuse_on_gpu(old_gpu_fuse)
  77. torch._C._jit_set_texpr_fuser_enabled(old_texpr_fuser_state)
  78. torch._C._jit_set_nvfuser_enabled(old_nvfuser_state)
  79. torch._C._jit_set_llga_enabled(old_llga_state)
  80. last_executed_optimized_graph = torch._C._last_executed_optimized_graph
  81. def _get_differentiable_graph_node(node, diff_node):
  82. if node.kind() == 'prim::DifferentiableGraph':
  83. diff_node.append(node)
  84. else:
  85. for block in node.blocks():
  86. for n in block.nodes():
  87. _get_differentiable_graph_node(n, diff_node)
  88. def _graph_for(self, *args, **kwargs):
  89. return _script_method_graph_for(self, self, *args, **kwargs)
  90. def _script_method_graph_for(self, parent, *args, **kwargs):
  91. try:
  92. dbs = parent.get_debug_state()
  93. eps = list(dbs.execution_plans.values())
  94. assert(len(eps) == 1)
  95. graph = eps[0].graph.copy()
  96. # graph_executor_states for differentiable node
  97. fw_states = eps[0].code.differentiable_op_executor_states()
  98. diff_nodes: List[torch._C.Node] = []
  99. for n in graph.nodes():
  100. _get_differentiable_graph_node(n, diff_nodes)
  101. assert(len(fw_states) == len(diff_nodes))
  102. # swap each differentiable graph with optimized graph in their execution plan
  103. for n, state in zip(diff_nodes, fw_states):
  104. fw_execution_plans = list(state.execution_plans.values())
  105. # we can only update the subgraph when there's a unique execution
  106. # plan. Avoid assert here so we would skip the ones that can't be
  107. # updated while try the best effort to update other nodes.
  108. if len(fw_execution_plans) == 1:
  109. n.g_('Subgraph', fw_execution_plans[0].graph)
  110. return graph
  111. except Exception:
  112. # fallback approach, we just ran the graph and return the recorded optimized
  113. # graph
  114. self(*args, **kwargs)
  115. return last_executed_optimized_graph()
  116. def set_fusion_strategy(strategy: List[Tuple[str, int]]):
  117. """
  118. Sets the type and number of specializations that can occur during fusion.
  119. Usage: provide a list of pairs (type, depth) where type is one of "STATIC" or "DYNAMIC"
  120. and depth is an integer.
  121. Behavior - static vs dynamic:
  122. In STATIC fusion, fused ops are compiled to have fixed input shapes. The shape is determined
  123. based on some initial profiling runs.
  124. In DYNAMIC fusion, fused ops are compiled to have variable input shapes, so that multiple
  125. shapes are possible.
  126. In both cases, we also recompile on new striding behavior, device, or dtype.
  127. Behavior - fallback functions & depth:
  128. When an input doesn't match the format required by the specialized compiled op, it will run
  129. a fallback function. Fallback functions are recursively be compiled and specialized based
  130. on the observed tensor shapes. Since compilation can be slow, the "depth" parameter is provided to
  131. limit the number of specializations that can be compiled, before giving up on recompiling and
  132. falling back to a completely un-fused, un-specialized implementation.
  133. The list of (type, depth) pairs controls the type of specializations and the number of
  134. specializations. For example: [("STATIC", 2), ("DYNAMIC", 2)] indicates that the first
  135. two specializations will use static fusions, the following two specializations will use
  136. dynamic fusion, and any inputs that satisfy none of the 4 options will run an
  137. unfused implementation.
  138. NB: in the future, if more as more fusion backends are added there may be more granular
  139. apis for specific fusers.
  140. """
  141. return torch._C._jit_set_fusion_strategy(strategy)