lower_to_fbgemm.py 532 B

12345678910111213141516
  1. from ._lower_to_native_backend import _lower_to_native_backend
  2. from ..qconfig import QConfigAny
  3. from torch.fx import GraphModule
  4. from typing import Dict, Tuple
  5. __all__ = ['lower_to_fbgemm']
  6. def lower_to_fbgemm(
  7. model: GraphModule,
  8. qconfig_map: Dict[str, QConfigAny],
  9. node_name_to_scope: Dict[str, Tuple[str, type]]
  10. ) -> GraphModule:
  11. """ Lower a quantized reference model (with reference quantized operator patterns)
  12. to fbgemm
  13. """
  14. return _lower_to_native_backend(model, qconfig_map, node_name_to_scope)