From b6f2566d9682c90fc06a3d0c67a1daf95b858c1a Mon Sep 17 00:00:00 2001 From: Chen Lai Date: Thu, 29 May 2025 10:06:26 -0700 Subject: [PATCH] forward fix uninitialized param Summary: Forward fix for https://github.com/pytorch/executorch/pull/10578 Reviewed By: kimishpatel Differential Revision: D75536694 --- examples/qualcomm/oss_scripts/llama/runner/runner.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/examples/qualcomm/oss_scripts/llama/runner/runner.cpp b/examples/qualcomm/oss_scripts/llama/runner/runner.cpp index d348878294a..bdc2019352e 100644 --- a/examples/qualcomm/oss_scripts/llama/runner/runner.cpp +++ b/examples/qualcomm/oss_scripts/llama/runner/runner.cpp @@ -152,8 +152,10 @@ Error Runner::load() { // Use attention mask length to retrieve AR length and context length // Cache len equals to context_len - ar_len - int32_t prompt_processor_ar_len, token_generator_ar_len, max_cache_len, - max_ar_len; + int32_t prompt_processor_ar_len = 0; + int32_t token_generator_ar_len = 0; + int32_t max_cache_len = 0; + int32_t max_ar_len = 0; // atten mask: [1, AR-N, CL] auto atten_mask_meta_token = method_meta->input_tensor_meta(1); token_generator_ar_len = atten_mask_meta_token->sizes()[1];