1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
|
from typing import Dict, Tuple
from torch.ao.quantization.qconfig import QConfigAny
from torch.fx import GraphModule
from ._lower_to_native_backend import _lower_to_native_backend
__all__ = ["lower_to_qnnpack"]
def lower_to_qnnpack(
model: GraphModule,
qconfig_map: Dict[str, QConfigAny],
node_name_to_scope: Dict[str, Tuple[str, type]],
) -> GraphModule:
"""Lower a quantized reference model (with reference quantized operator patterns)
to qnnpack
"""
return _lower_to_native_backend(model, qconfig_map, node_name_to_scope)
|