quantize.py 1.4 KB

12345678910111213141516171819202122232425262728
  1. # flake8: noqa: F401
  2. r"""
  3. This file is in the process of migration to `torch/ao/quantization`, and
  4. is kept here for compatibility while the migration process is ongoing.
  5. If you are adding a new entry/functionality, please, add it to the
  6. `torch/ao/quantization/quantize.py`, while adding an import statement
  7. here.
  8. """
  9. from torch.ao.quantization.quantize import _convert
  10. from torch.ao.quantization.quantize import _observer_forward_hook
  11. from torch.ao.quantization.quantize import _propagate_qconfig_helper
  12. from torch.ao.quantization.quantize import _remove_activation_post_process
  13. from torch.ao.quantization.quantize import _remove_qconfig
  14. from torch.ao.quantization.quantize import _add_observer_
  15. from torch.ao.quantization.quantize import add_quant_dequant
  16. from torch.ao.quantization.quantize import convert
  17. from torch.ao.quantization.quantize import _get_observer_dict
  18. from torch.ao.quantization.quantize import _get_unique_devices_
  19. from torch.ao.quantization.quantize import _is_activation_post_process
  20. from torch.ao.quantization.quantize import prepare
  21. from torch.ao.quantization.quantize import prepare_qat
  22. from torch.ao.quantization.quantize import propagate_qconfig_
  23. from torch.ao.quantization.quantize import quantize
  24. from torch.ao.quantization.quantize import quantize_dynamic
  25. from torch.ao.quantization.quantize import quantize_qat
  26. from torch.ao.quantization.quantize import _register_activation_post_process_hook
  27. from torch.ao.quantization.quantize import swap_module