From e5315fec80da5dc59d4075344cdf1154c8efaa57 Mon Sep 17 00:00:00 2001 From: Chen Lai Date: Tue, 19 Nov 2024 22:12:08 -0800 Subject: [PATCH] Fix custom annotation Summary: Looks like it's added in https://github.com/pytorch/executorch/pull/6849, maybe it was using the old api for the default 8bit quantization Differential Revision: D66219251 --- backends/qualcomm/quantizer/custom_annotation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backends/qualcomm/quantizer/custom_annotation.py b/backends/qualcomm/quantizer/custom_annotation.py index bbb35383417..8a3ff405711 100644 --- a/backends/qualcomm/quantizer/custom_annotation.py +++ b/backends/qualcomm/quantizer/custom_annotation.py @@ -78,7 +78,7 @@ def annotate_single_in_single_out( ) def annotate_matmul_input1(node: Node): - quantization_config_8a8w = get_default_8bit_qnn_ptq_config( + quantization_config_8a8w = get_8a8w_qnn_ptq_config( act_symmetric=True, act_observer=MinMaxObserver ) while isinstance(node, Node) and node.op == "call_function":