_state.py 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. """JIT-related state
  2. This module stores various pieces of Python-global state relating to the JIT.
  3. This is not intended to be imported directly; please the exposed
  4. functionalities in `torch.jit`.
  5. """
  6. import torch
  7. import os
  8. import weakref
  9. class EnabledProxy:
  10. """Stores whether the JIT is enabled or not.
  11. This is just a wrapper for a bool, so that we get reference semantics
  12. """
  13. def __init__(self):
  14. self.enabled = self.parse_env(
  15. "PYTORCH_JIT", True, "> Using PyTorch JIT", "> PyTorch JIT DISABLED"
  16. )
  17. def parse_env(self, name, default, true_message, false_message):
  18. value = os.environ.get(name)
  19. if value is None:
  20. return default
  21. if value.lower() in {"1", "true", "yes"}:
  22. return True
  23. elif value.lower() in {"0", "false", "no"}:
  24. return False
  25. if value == "1v":
  26. print(true_message)
  27. return True
  28. elif value == "0v":
  29. print(false_message)
  30. return False
  31. raise ValueError("Unknown setting of {}. Try using 0 or 1.".format(name))
  32. def __bool__(self):
  33. return self.enabled
  34. _enabled = EnabledProxy()
  35. def disable():
  36. _enabled.enabled = False
  37. def enable():
  38. _enabled.enabled = True
  39. # The Python CompilationUnit. All functions and modules defined in Python will
  40. # live in here. It's defined in Python because doing in cpp creates static
  41. # destruction order issues.
  42. _python_cu = torch._C.CompilationUnit()
  43. # python class => ScriptClass mapping
  44. _script_classes = {}
  45. _name_to_pyclass = {}
  46. def _add_script_class(python_class, script_class):
  47. _script_classes[python_class] = script_class
  48. _name_to_pyclass[script_class.qualified_name()] = python_class
  49. def _get_script_class(python_class):
  50. override = getattr(python_class, "_jit_override_qualname", None)
  51. if override is not None:
  52. python_class = _get_python_class(override)
  53. return _script_classes.get(python_class, None)
  54. def _get_python_class(qualified_name):
  55. return _name_to_pyclass.get(qualified_name, None)
  56. def _clear_class_state():
  57. _script_classes.clear()
  58. _name_to_pyclass.clear()
  59. # Caching: we currently cache compilation of free functions and overloaded functions.
  60. # To cache free functions we hold a weak ref to the function object and
  61. # map to the compiled fn's qualified name.
  62. # To cache overloaded functions we hold a weak ref to the function obj and
  63. # map to all of its overloaded compiled fns.
  64. # In the future we could consider caching more types of objects so that
  65. # aliasing is preserved across separate compilations of the same object.
  66. _jit_caching_layer: weakref.WeakKeyDictionary = weakref.WeakKeyDictionary()
  67. _jit_function_overload_caching: weakref.WeakKeyDictionary = weakref.WeakKeyDictionary()
  68. def _try_get_jit_cached_overloads(key):
  69. qual_names = _jit_function_overload_caching.get(key, None)
  70. if qual_names:
  71. return [_python_cu.find_function(qual_name) for qual_name in qual_names]
  72. else:
  73. return None
  74. def _set_jit_overload_cache(key, compiled_fns):
  75. _jit_function_overload_caching[key] = [fn.qualified_name for fn in compiled_fns]
  76. def _try_get_jit_cached_function(key):
  77. if getattr(key, "__disable_jit_function_caching__", False) is True:
  78. return None
  79. qual_name = _jit_caching_layer.get(key, None)
  80. if qual_name:
  81. return _python_cu.find_function(qual_name)
  82. else:
  83. return None
  84. def _set_jit_function_cache(key, value):
  85. # only free functions currently supported
  86. assert isinstance(value, torch.jit.ScriptFunction)
  87. _jit_caching_layer[key] = value.qualified_name