From 323c74a1ffe1e26b4ba7aa0460bbd36c41422a1a Mon Sep 17 00:00:00 2001 From: cccclai Date: Fri, 21 Nov 2025 17:32:42 -0800 Subject: [PATCH] Forward fix eval_llama_qnn https://github.com/pytorch/executorch/pull/15807 break the eval flow. Forward fix --- examples/qualcomm/oss_scripts/llama/eval_llama_qnn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/qualcomm/oss_scripts/llama/eval_llama_qnn.py b/examples/qualcomm/oss_scripts/llama/eval_llama_qnn.py index 9af9cdf9549..264d0fe69a5 100644 --- a/examples/qualcomm/oss_scripts/llama/eval_llama_qnn.py +++ b/examples/qualcomm/oss_scripts/llama/eval_llama_qnn.py @@ -316,7 +316,7 @@ def eval_llm(args): if args.ptq is not None: quant_dtype = getattr(QuantDtype, f"use_{args.ptq}") decoder_model_config = SUPPORTED_LLM_MODELS[args.decoder_model] - custom_annotations = decoder_model_config.custom_annotation + custom_annotations = decoder_model_config.quant_recipe.custom_quant_annotations quantizer = make_custom_quantizer( quant_dtype, args.range_setting, custom_annotations, args.quant_linear_only