__init__.py 1.4 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546
  1. r"""
  2. PyTorch Profiler is a tool that allows the collection of performance metrics during training and inference.
  3. Profiler's context manager API can be used to better understand what model operators are the most expensive,
  4. examine their input shapes and stack traces, study device kernel activity and visualize the execution trace.
  5. .. note::
  6. An earlier version of the API in :mod:`torch.autograd` module is considered legacy and will be deprecated.
  7. """
  8. import os
  9. from torch._C._autograd import _supported_activities, DeviceType, kineto_available
  10. from torch._C._profiler import _ExperimentalConfig, ProfilerActivity, RecordScope
  11. from torch.autograd.profiler import record_function, KinetoStepTracker
  12. from torch.optim.optimizer import register_optimizer_step_post_hook
  13. from .profiler import (
  14. _KinetoProfile,
  15. ExecutionGraphObserver,
  16. profile,
  17. ProfilerAction,
  18. schedule,
  19. supported_activities,
  20. tensorboard_trace_handler,
  21. )
  22. __all__ = [
  23. "profile",
  24. "schedule",
  25. "supported_activities",
  26. "tensorboard_trace_handler",
  27. "ProfilerAction",
  28. "ProfilerActivity",
  29. "kineto_available",
  30. "DeviceType",
  31. "record_function",
  32. "ExecutionGraphObserver",
  33. ]
  34. from . import itt
  35. def _optimizer_post_hook(optimizer, args, kwargs):
  36. KinetoStepTracker.increment_step("Optimizer")
  37. if os.environ.get("KINETO_USE_DAEMON", None):
  38. _ = register_optimizer_step_post_hook(_optimizer_post_hook)