utils.py 833 B

1234567891011121314151617181920212223242526272829
  1. # flake8: noqa: F401
  2. r"""
  3. Utils shared by different modes of quantization (eager/graph)
  4. This file is in the process of migration to `torch/ao/quantization`, and
  5. is kept here for compatibility while the migration process is ongoing.
  6. If you are adding a new entry/functionality, please, add it to the
  7. `torch/ao/quantization/utils.py`, while adding an import statement
  8. here.
  9. """
  10. from torch.ao.quantization.utils import (
  11. activation_dtype,
  12. activation_is_int8_quantized,
  13. activation_is_statically_quantized,
  14. calculate_qmin_qmax,
  15. check_min_max_valid,
  16. get_combined_dict,
  17. get_qconfig_dtypes,
  18. get_qparam_dict,
  19. get_quant_type,
  20. get_swapped_custom_module_class,
  21. getattr_from_fqn,
  22. is_per_channel,
  23. is_per_tensor,
  24. weight_dtype,
  25. weight_is_quantized,
  26. weight_is_statically_quantized,
  27. )