__init__.py 1.7 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849
  1. import torch
  2. def is_available():
  3. r"""Returns whether PyTorch is built with MKL support."""
  4. return torch._C.has_mkl
  5. VERBOSE_OFF = 0
  6. VERBOSE_ON = 1
  7. class verbose:
  8. """
  9. On-demand oneMKL verbosing functionality
  10. To make it easier to debug performance issues, oneMKL can dump verbose
  11. messages containing execution information like duration while executing
  12. the kernel. The verbosing functionality can be invoked via an environment
  13. variable named `MKL_VERBOSE`. However, this methodology dumps messages in
  14. all steps. Those are a large amount of verbose messages. Moreover, for
  15. investigating the performance issues, generally taking verbose messages
  16. for one single iteration is enough. This on-demand verbosing functionality
  17. makes it possible to control scope for verbose message dumping. In the
  18. following example, verbose messages will be dumped out for the second
  19. inference only.
  20. .. highlight:: python
  21. .. code-block:: python
  22. import torch
  23. model(data)
  24. with torch.backends.mkl.verbose(torch.backends.mkl.VERBOSE_ON):
  25. model(data)
  26. Args:
  27. level: Verbose level
  28. - ``VERBOSE_OFF``: Disable verbosing
  29. - ``VERBOSE_ON``: Enable verbosing
  30. """
  31. def __init__(self, enable):
  32. self.enable = enable
  33. def __enter__(self):
  34. if self.enable == VERBOSE_OFF:
  35. return
  36. st = torch._C._verbose.mkl_set_verbose(self.enable)
  37. assert st, "Failed to set MKL into verbose mode. Please consider to disable this verbose scope."
  38. return self
  39. def __exit__(self, exc_type, exc_val, exc_tb):
  40. torch._C._verbose.mkl_set_verbose(VERBOSE_OFF)
  41. return False