lower_to_qnnpack.py 541 B

123456789101112131415161718
  1. from ._lower_to_native_backend import _lower_to_native_backend
  2. from ..qconfig import QConfigAny
  3. from torch.fx import GraphModule
  4. from typing import Dict, Tuple
  5. __all__ = [
  6. "lower_to_qnnpack"
  7. ]
  8. def lower_to_qnnpack(
  9. model: GraphModule,
  10. qconfig_map: Dict[str, QConfigAny],
  11. node_name_to_scope: Dict[str, Tuple[str, type]]
  12. ) -> GraphModule:
  13. """ Lower a quantized reference model (with reference quantized operator patterns)
  14. to qnnpack
  15. """
  16. return _lower_to_native_backend(model, qconfig_map, node_name_to_scope)