diff --git a/vllm/model_executor/models/qwen3_vl_moe.py b/vllm/model_executor/models/qwen3_vl_moe.py index 52ea652b3765..02cc5d6d66d1 100644 --- a/vllm/model_executor/models/qwen3_vl_moe.py +++ b/vllm/model_executor/models/qwen3_vl_moe.py @@ -212,6 +212,8 @@ def load_weights(self, weights: Iterable[tuple[str, # attempted to load as other weights later is_expert_weight = True name_mapped = name.replace(weight_name, param_name) + if is_pp_missing_parameter(name_mapped, self): + continue if is_fused_expert: loaded_weight = loaded_weight.transpose(-1, -2) # no bias @@ -230,8 +232,6 @@ def load_weights(self, weights: Iterable[tuple[str, name_mapped, params_dict, loaded_weight, shard_id, num_experts) else: - if is_pp_missing_parameter(name_mapped, self): - continue # Skip loading extra parameters for GPTQ/modelopt models if name_mapped.endswith( ignore_suffixes