_equalize.py 1.2 KB

1234567891011121314151617181920212223242526272829303132333435363738
  1. # flake8: noqa: F401
  2. r"""
  3. This file is in the process of migration to `torch/ao/quantization`, and
  4. is kept here for compatibility while the migration process is ongoing.
  5. If you are adding a new entry/functionality, please, add it to the
  6. appropriate files under `torch/ao/quantization/fx/`, while adding an import statement
  7. here.
  8. """
  9. from torch.ao.quantization.fx._equalize import (
  10. reshape_scale,
  11. _InputEqualizationObserver,
  12. _WeightEqualizationObserver,
  13. calculate_equalization_scale,
  14. EqualizationQConfig,
  15. input_equalization_observer,
  16. weight_equalization_observer,
  17. default_equalization_qconfig,
  18. fused_module_supports_equalization,
  19. nn_module_supports_equalization,
  20. custom_module_supports_equalization,
  21. node_supports_equalization,
  22. is_equalization_observer,
  23. get_op_node_and_weight_eq_obs,
  24. maybe_get_weight_eq_obs_node,
  25. maybe_get_next_input_eq_obs,
  26. maybe_get_next_equalization_scale,
  27. scale_input_observer,
  28. scale_weight_node,
  29. scale_weight_functional,
  30. clear_weight_quant_obs_node,
  31. remove_node,
  32. update_obs_for_equalization,
  33. convert_eq_obs,
  34. _convert_equalization_ref,
  35. get_layer_sqnr_dict,
  36. get_equalization_qconfig_dict,
  37. CUSTOM_MODULE_SUPP_LIST,
  38. )