__init__.py 1.7 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455
  1. import threading
  2. import torch._C._lazy
  3. from torch.utils._pytree import tree_flatten, tree_unflatten
  4. from .closure import add_step_closure, run_step_closures
  5. def mark_step(device: str = "", wait=False):
  6. """Triggers a mark step, which amounts to
  7. - collecting a group of 'live' lazy tensors to index into the compilation cache
  8. (lowering/compiling their IR graphs if not cached)
  9. - kicking off execution of the compiled function
  10. - (optionally, wait=True) waiting for cpu-side execution to complete (does not sync the accelerator)
  11. """
  12. # TODO(whc) expand this to include backend hooks and align with XLA backend needs
  13. torch._C._lazy._mark_step(device, [], wait=wait)
  14. run_step_closures()
  15. def wait_device_ops(devices=None):
  16. """Waits for all the async operations on the given devices to complete.
  17. Args:
  18. devices (string..., optional): The devices whose async ops need to be waited
  19. for. If empty, all the local devices will be waited for.
  20. """
  21. if devices is None:
  22. devices = []
  23. torch._C._lazy._wait_device_ops(devices=devices)
  24. def sync_multi(tensors, devices):
  25. """
  26. Sync the list of lazy tensors so there IR get lowered for the activate backend
  27. and the compiled computation graph get cached.
  28. """
  29. torch._C._lazy._sync_multi(tensors, devices)
  30. def get_tensor_id(tensor):
  31. """Return a unique id of the lazy tensor maintained by LTC"""
  32. return torch._C._lazy._get_tensor_id(tensor)
  33. def to_cpu(tensors, devices=None):
  34. devices = devices or ["lazy"]
  35. flattened, spec = tree_flatten(tensors)
  36. sync_multi(flattened, devices)
  37. return tree_unflatten([t.to("cpu") for t in flattened], spec)
  38. def save(tensors, *args, **kwargs):
  39. torch.save(to_cpu(tensors), *args, **kwargs)