From b81bcd15a88859920fceea85d00f0f5aa4ee4b17 Mon Sep 17 00:00:00 2001 From: Bill Nell Date: Tue, 9 Sep 2025 23:40:08 +0000 Subject: [PATCH 1/7] [Models] Apply SharedFusedMoE to all models with shared experts Signed-off-by: Bill Nell --- .../layers/fused_moe/__init__.py | 9 +- .../shared_fused_moe.py | 30 +++--- .../model_executor/layers/quantization/fp8.py | 2 + .../layers/shared_fused_moe/__init__.py | 5 - vllm/model_executor/models/aria.py | 37 ++++---- vllm/model_executor/models/bailing_moe.py | 49 +++++----- vllm/model_executor/models/deepseek_v2.py | 69 +++++--------- vllm/model_executor/models/dots1.py | 49 +++++----- vllm/model_executor/models/ernie45_moe.py | 41 ++++---- vllm/model_executor/models/ernie45_vl_moe.py | 65 +++++++------ vllm/model_executor/models/glm4_moe.py | 67 +++++-------- vllm/model_executor/models/hunyuan_v1.py | 59 ++++++------ vllm/model_executor/models/llama4.py | 15 ++- vllm/model_executor/models/qwen2_moe.py | 60 ++++++------ vllm/model_executor/models/qwen3_next.py | 93 ++++++++----------- 15 files changed, 315 insertions(+), 335 deletions(-) rename vllm/model_executor/layers/{shared_fused_moe => fused_moe}/shared_fused_moe.py (59%) delete mode 100644 vllm/model_executor/layers/shared_fused_moe/__init__.py diff --git a/vllm/model_executor/layers/fused_moe/__init__.py b/vllm/model_executor/layers/fused_moe/__init__.py index 56ffaf861ac7..5ff1e83617b7 100644 --- a/vllm/model_executor/layers/fused_moe/__init__.py +++ b/vllm/model_executor/layers/fused_moe/__init__.py @@ -11,10 +11,10 @@ FusedMoeWeightScaleSupported, ) from vllm.model_executor.layers.fused_moe.modular_kernel import ( - FusedMoEActivationFormat, - FusedMoEPermuteExpertsUnpermute, - FusedMoEPrepareAndFinalize, -) + FusedMoEActivationFormat, FusedMoEPermuteExpertsUnpermute, + FusedMoEPrepareAndFinalize) +from vllm.model_executor.layers.fused_moe.shared_fused_moe import ( + SharedFusedMoE) from vllm.model_executor.layers.fused_moe.utils import activation_without_mul from vllm.triton_utils import HAS_TRITON @@ -42,6 +42,7 @@ def get_config() -> Optional[dict[str, Any]]: "FusedMoEPermuteExpertsUnpermute", "FusedMoEActivationFormat", "FusedMoEPrepareAndFinalize", + "SharedFusedMoE", "activation_without_mul", "override_config", "get_config", diff --git a/vllm/model_executor/layers/shared_fused_moe/shared_fused_moe.py b/vllm/model_executor/layers/fused_moe/shared_fused_moe.py similarity index 59% rename from vllm/model_executor/layers/shared_fused_moe/shared_fused_moe.py rename to vllm/model_executor/layers/fused_moe/shared_fused_moe.py index a8b09a5c3cdb..4f9cc682b6ec 100644 --- a/vllm/model_executor/layers/shared_fused_moe/shared_fused_moe.py +++ b/vllm/model_executor/layers/fused_moe/shared_fused_moe.py @@ -18,13 +18,19 @@ class SharedFusedMoE(FusedMoE): def __init__( self, - shared_experts: torch.nn.Module, + shared_experts: Optional[torch.nn.Module], use_overlapped: bool = True, **kwargs, ): super().__init__(**kwargs) self._shared_experts = shared_experts - self.use_overlapped = use_overlapped + # Disable shared expert overlap if EP is disabled or we are not using + # flashinfer + DP since there is nothing to be gained in this case. + # Disabling the overlap optimization also prevents the shared experts + # from being hidden from torch.compile. + self.use_overlapped = use_overlapped and not ( + self.use_ep or self.use_flashinfer_cutlass_kernels + ) and self._shared_experts is not None @property def shared_experts(self) -> Optional[torch.nn.Module]: @@ -36,16 +42,16 @@ def forward( router_logits: torch.Tensor, ) -> tuple[torch.Tensor, torch.Tensor]: if not self.use_overlapped: - shared_out = self._shared_experts(hidden_states) - - # Reduce outputs if necessary, since the MLP should - # have been created with reduce_results=False. - if ( - self.reduce_results - and self.tp_size > 1 - and self.must_reduce_shared_expert_outputs() - ): - shared_out = tensor_model_parallel_all_reduce(shared_out) + if self._shared_experts is not None: + shared_out = self._shared_experts(hidden_states) + + # Reduce shared expert outputs if necessary, since the MLP + # should have been created with reduce_results=False. + if (self.reduce_results and self.tp_size > 1 + and self.must_reduce_shared_expert_outputs()): + shared_out = tensor_model_parallel_all_reduce(shared_out) + else: + shared_out = None fused_out = super().forward( hidden_states=hidden_states, diff --git a/vllm/model_executor/layers/quantization/fp8.py b/vllm/model_executor/layers/quantization/fp8.py index 2123fd9eba15..73e004480398 100644 --- a/vllm/model_executor/layers/quantization/fp8.py +++ b/vllm/model_executor/layers/quantization/fp8.py @@ -741,6 +741,8 @@ def create_weights( layer.w13_input_scale = None layer.w2_input_scale = None + self.rocm_aiter_moe_enabled = False + def process_weights_after_loading(self, layer: Module) -> None: # Lazy import to avoid importing triton too early. from vllm.model_executor.layers.fused_moe.rocm_aiter_fused_moe import ( diff --git a/vllm/model_executor/layers/shared_fused_moe/__init__.py b/vllm/model_executor/layers/shared_fused_moe/__init__.py deleted file mode 100644 index b047e9cad04a..000000000000 --- a/vllm/model_executor/layers/shared_fused_moe/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# SPDX-License-Identifier: Apache-2.0 -# SPDX-FileCopyrightText: Copyright contributors to the vLLM project -from vllm.model_executor.layers.shared_fused_moe.shared_fused_moe import SharedFusedMoE - -__all__ = ["SharedFusedMoE"] diff --git a/vllm/model_executor/models/aria.py b/vllm/model_executor/models/aria.py index 7db118ca0745..79d535ec904d 100644 --- a/vllm/model_executor/models/aria.py +++ b/vllm/model_executor/models/aria.py @@ -13,8 +13,9 @@ from vllm.config.multimodal import BaseDummyOptions from vllm.distributed import get_tensor_model_parallel_rank from vllm.model_executor.layers.activation import get_act_fn -from vllm.model_executor.layers.fused_moe import FusedMoE -from vllm.model_executor.layers.linear import ColumnParallelLinear, RowParallelLinear +from vllm.model_executor.layers.fused_moe import SharedFusedMoE +from vllm.model_executor.layers.linear import (ColumnParallelLinear, + RowParallelLinear) from vllm.model_executor.layers.logits_processor import LogitsProcessor from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.layers.vocab_parallel_embedding import ParallelLMHead @@ -206,10 +207,10 @@ def forward( return out -class AriaFusedMoE(FusedMoE): - def weight_loader( - self, param: nn.Parameter, loaded_weight: torch.Tensor, shard_id: str - ) -> None: +class AriaFusedMoE(SharedFusedMoE): + + def weight_loader(self, param: nn.Parameter, loaded_weight: torch.Tensor, + shard_id: str) -> None: # Override the weight_loader to handle the expert weights in the Aria # model, which are already packed with experts, and merge the gate and # up weights for each expert. @@ -260,7 +261,16 @@ def __init__( torch.empty((self.config.moe_num_experts, self.config.hidden_size)) ) + self.shared_experts = LlamaMLP( + config.hidden_size, + config.intermediate_size * config.moe_num_shared_experts, + "silu", + quant_config=quant_config, + bias=config.mlp_bias, + ) + self.experts = AriaFusedMoE( + shared_experts=self.shared_experts, num_experts=config.moe_num_experts, top_k=config.moe_topk, hidden_size=config.hidden_size, @@ -269,13 +279,6 @@ def __init__( reduce_results=True, prefix=f"{prefix}.experts", ) - self.shared_experts = LlamaMLP( - config.hidden_size, - config.intermediate_size * config.moe_num_shared_experts, - "silu", - quant_config=quant_config, - bias=config.mlp_bias, - ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: """ @@ -291,12 +294,12 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: router_output = torch.nn.functional.linear(hidden_states, self.router_weight) - hidden_states_copy = hidden_states.clone() - # NOTE: hidden_states will be modified inplace by `FusedMoE` sparse_expert_output = self.experts(hidden_states, router_output) - shared_expert_output = self.shared_experts(hidden_states_copy) - return sparse_expert_output + shared_expert_output + if self.shared_experts is not None: + return sparse_expert_output[0] + sparse_expert_output[1] + else: + return sparse_expert_output class AriaTextDecoderLayer(LlamaDecoderLayer): diff --git a/vllm/model_executor/models/bailing_moe.py b/vllm/model_executor/models/bailing_moe.py index 3911ba599069..050af23352ff 100644 --- a/vllm/model_executor/models/bailing_moe.py +++ b/vllm/model_executor/models/bailing_moe.py @@ -43,7 +43,7 @@ tensor_model_parallel_all_reduce, ) from vllm.model_executor.layers.activation import SiluAndMul -from vllm.model_executor.layers.fused_moe import FusedMoE +from vllm.model_executor.layers.fused_moe import SharedFusedMoE from vllm.model_executor.layers.layernorm import RMSNorm from vllm.model_executor.layers.linear import ( MergedColumnParallelLinear, @@ -276,22 +276,6 @@ def __init__( # default value for scoring_func self.score_function = "softmax" - self.experts = FusedMoE( - num_experts=self.num_experts, - top_k=self.top_k, - hidden_size=self.hidden_size, - intermediate_size=config.moe_intermediate_size, - reduce_results=False, - renormalize=self.norm_expert_prob, - quant_config=quant_config, - prefix=f"{prefix}.experts", - scoring_func=self.score_function, - e_score_correction_bias=self.gate.expert_bias, - num_expert_group=self.n_group, - topk_group=self.topk_group, - use_grouped_topk=self.use_grouped_topk, - ) - if self.num_shared_experts > 0: if hasattr(config, "moe_shared_expert_intermediate_size"): intermediate_size = config.moe_shared_expert_intermediate_size @@ -308,11 +292,29 @@ def __init__( else: self.shared_experts = None + self.experts = SharedFusedMoE( + shared_experts=self.shared_experts, + fused_output_scaling_factor=self.routed_scaling_factor, + shared_output_scaling_factor=1.0, + num_experts=self.num_experts, + top_k=self.top_k, + hidden_size=self.hidden_size, + intermediate_size=config.moe_intermediate_size, + reduce_results=False, + renormalize=self.norm_expert_prob, + quant_config=quant_config, + prefix=f"{prefix}.experts", + scoring_func=self.score_function, + e_score_correction_bias=self.gate.expert_bias, + num_expert_group=self.n_group, + topk_group=self.topk_group, + use_grouped_topk=self.use_grouped_topk, + ) + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: num_tokens, hidden_size = hidden_states.shape hidden_states = hidden_states.view(-1, hidden_size) - if self.shared_experts: - shared_output = self.shared_experts(hidden_states) + # router_logits: (num_tokens, n_experts) router_logits = self.gate(hidden_states.to(self.router_dtype)) router_logits = router_logits.to(hidden_states.dtype) @@ -321,9 +323,14 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states=hidden_states, router_logits=router_logits ) + if self.shared_experts is not None: + shared_output, final_hidden_states = final_hidden_states + else: + shared_output = None + final_hidden_states *= self.routed_scaling_factor - if self.shared_experts: + if shared_output is not None: final_hidden_states = final_hidden_states + shared_output if self.tp_size > 1: @@ -475,7 +482,7 @@ def forward( return hidden_states def get_expert_mapping(self) -> list[tuple[str, str, int, str]]: - return FusedMoE.make_expert_params_mapping( + return SharedFusedMoE.make_expert_params_mapping( ckpt_gate_proj_name="gate_proj", ckpt_down_proj_name="down_proj", ckpt_up_proj_name="up_proj", diff --git a/vllm/model_executor/models/deepseek_v2.py b/vllm/model_executor/models/deepseek_v2.py index f149b02e5522..1eca6f9ca20e 100644 --- a/vllm/model_executor/models/deepseek_v2.py +++ b/vllm/model_executor/models/deepseek_v2.py @@ -49,7 +49,7 @@ from vllm.logger import init_logger from vllm.model_executor.layers.activation import SiluAndMul from vllm.model_executor.layers.attention_layer_base import AttentionLayerBase -from vllm.model_executor.layers.fused_moe import FusedMoE +from vllm.model_executor.layers.fused_moe import SharedFusedMoE from vllm.model_executor.layers.layernorm import LayerNorm, RMSNorm from vllm.model_executor.layers.linear import ( ColumnParallelLinear, @@ -64,7 +64,6 @@ per_token_group_quant_fp8, ) from vllm.model_executor.layers.rotary_embedding import get_rope -from vllm.model_executor.layers.shared_fused_moe import SharedFusedMoE from vllm.model_executor.layers.vocab_parallel_embedding import ( ParallelLMHead, VocabParallelEmbedding, @@ -205,26 +204,6 @@ def __init__( ) if config.n_shared_experts is None: - self.experts = FusedMoE( - num_experts=config.n_routed_experts, - top_k=config.num_experts_per_tok, - hidden_size=config.hidden_size, - intermediate_size=config.moe_intermediate_size, - reduce_results=False, - renormalize=config.norm_topk_prob, - quant_config=quant_config, - use_grouped_topk=True, - num_expert_group=config.n_group, - topk_group=config.topk_group, - prefix=f"{prefix}.experts", - scoring_func=config.scoring_func, - # we do scaling outside, set factor to 1.0 to avoid double mul - routed_scaling_factor=1.0, - e_score_correction_bias=self.gate.e_score_correction_bias, - enable_eplb=self.enable_eplb, - num_redundant_experts=self.n_redundant_experts, - is_sequence_parallel=self.is_sequence_parallel, - ) self.shared_experts = None else: intermediate_size = config.moe_intermediate_size * config.n_shared_experts @@ -239,27 +218,27 @@ def __init__( prefix=f"{prefix}.shared_experts", ) - self.experts = SharedFusedMoE( - shared_experts=self.shared_experts, - num_experts=config.n_routed_experts, - top_k=config.num_experts_per_tok, - hidden_size=config.hidden_size, - intermediate_size=config.moe_intermediate_size, - reduce_results=False, - renormalize=config.norm_topk_prob, - quant_config=quant_config, - use_grouped_topk=True, - num_expert_group=config.n_group, - topk_group=config.topk_group, - prefix=f"{prefix}.experts", - scoring_func=config.scoring_func, - # we do scaling outside, set factor to 1.0 to avoid double mul - routed_scaling_factor=1.0, - e_score_correction_bias=self.gate.e_score_correction_bias, - enable_eplb=self.enable_eplb, - num_redundant_experts=self.n_redundant_experts, - is_sequence_parallel=self.is_sequence_parallel, - ) + self.experts = SharedFusedMoE( + shared_experts=self.shared_experts, + num_experts=config.n_routed_experts, + top_k=config.num_experts_per_tok, + hidden_size=config.hidden_size, + intermediate_size=config.moe_intermediate_size, + reduce_results=False, + renormalize=config.norm_topk_prob, + quant_config=quant_config, + use_grouped_topk=True, + num_expert_group=config.n_group, + topk_group=config.topk_group, + prefix=f"{prefix}.experts", + scoring_func=config.scoring_func, + # we do scaling outside, set factor to 1.0 to avoid double mul + routed_scaling_factor=1.0, + e_score_correction_bias=self.gate.e_score_correction_bias, + enable_eplb=self.enable_eplb, + num_redundant_experts=self.n_redundant_experts, + is_sequence_parallel=self.is_sequence_parallel, + ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: num_tokens, hidden_dim = hidden_states.shape @@ -1293,7 +1272,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.num_moe_layers = config.num_hidden_layers - config.first_k_dense_replace self.num_expert_groups = config.n_group - self.moe_layers: list[FusedMoE] = [] + self.moe_layers: list[SharedFusedMoE] = [] example_moe = None for layer in self.model.layers: if isinstance(layer, PPMissingLayer): @@ -1381,7 +1360,7 @@ def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]: # Params for weights, fp8 weight scales, fp8 activation scales # (param_name, weight_name, expert_id, shard_id) - expert_params_mapping = FusedMoE.make_expert_params_mapping( + expert_params_mapping = SharedFusedMoE.make_expert_params_mapping( ckpt_gate_proj_name="gate_proj", ckpt_down_proj_name="down_proj", ckpt_up_proj_name="up_proj", diff --git a/vllm/model_executor/models/dots1.py b/vllm/model_executor/models/dots1.py index 1ae7457fb215..460c8ff1c1cd 100644 --- a/vllm/model_executor/models/dots1.py +++ b/vllm/model_executor/models/dots1.py @@ -42,7 +42,7 @@ tensor_model_parallel_all_reduce, ) from vllm.model_executor.layers.activation import SiluAndMul -from vllm.model_executor.layers.fused_moe import FusedMoE +from vllm.model_executor.layers.fused_moe import SharedFusedMoE from vllm.model_executor.layers.layernorm import RMSNorm from vllm.model_executor.layers.linear import ( MergedColumnParallelLinear, @@ -145,7 +145,22 @@ def __init__( else: self.gate.e_score_correction_bias = None - self.experts = FusedMoE( + if config.n_shared_experts is not None: + intermediate_size = (config.moe_intermediate_size * + config.n_shared_experts) + self.shared_experts = Dots1MLP( + hidden_size=config.hidden_size, + intermediate_size=intermediate_size, + hidden_act=config.hidden_act, + quant_config=quant_config, + reduce_results=False, + prefix=f"{prefix}.shared_experts", + ) + else: + self.shared_experts = None + + self.experts = SharedFusedMoE( + shared_experts=self.shared_experts, num_experts=config.n_routed_experts, top_k=config.num_experts_per_tok, hidden_size=config.hidden_size, @@ -163,29 +178,19 @@ def __init__( e_score_correction_bias=self.gate.e_score_correction_bias, ) - if config.n_shared_experts is not None: - intermediate_size = config.moe_intermediate_size * config.n_shared_experts - self.shared_experts = Dots1MLP( - hidden_size=config.hidden_size, - intermediate_size=intermediate_size, - hidden_act=config.hidden_act, - quant_config=quant_config, - reduce_results=False, - prefix=f"{prefix}.shared_experts", - ) - def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: num_tokens, hidden_dim = hidden_states.shape hidden_states = hidden_states.view(-1, hidden_dim) - if self.n_shared_experts is not None: - shared_output = self.shared_experts(hidden_states) + router_logits, _ = self.gate(hidden_states) - final_hidden_states = ( - self.experts(hidden_states=hidden_states, router_logits=router_logits) - * self.routed_scaling_factor - ) - if shared_output is not None: - final_hidden_states = final_hidden_states + shared_output + final_hidden_states = self.experts( + hidden_states=hidden_states, + router_logits=router_logits) * self.routed_scaling_factor + + if self.shared_experts is not None: + final_hidden_states = final_hidden_states[0] + final_hidden_states[ + 1] + if self.tp_size > 1: final_hidden_states = tensor_model_parallel_all_reduce(final_hidden_states) return final_hidden_states.view(num_tokens, hidden_dim) @@ -426,7 +431,7 @@ def forward( return hidden_states def get_expert_mapping(self) -> list[tuple[str, str, int, str]]: - return FusedMoE.make_expert_params_mapping( + return SharedFusedMoE.make_expert_params_mapping( ckpt_gate_proj_name="gate_proj", ckpt_down_proj_name="down_proj", ckpt_up_proj_name="up_proj", diff --git a/vllm/model_executor/models/ernie45_moe.py b/vllm/model_executor/models/ernie45_moe.py index 3cb93177a383..b3cfb39a10e6 100644 --- a/vllm/model_executor/models/ernie45_moe.py +++ b/vllm/model_executor/models/ernie45_moe.py @@ -37,7 +37,7 @@ from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size from vllm.logger import init_logger from vllm.model_executor.layers.activation import SiluAndMul -from vllm.model_executor.layers.fused_moe import FusedMoE +from vllm.model_executor.layers.fused_moe import SharedFusedMoE from vllm.model_executor.layers.layernorm import RMSNorm from vllm.model_executor.layers.linear import ( MergedColumnParallelLinear, @@ -145,7 +145,21 @@ def __init__( torch.empty(config.moe_num_experts, dtype=torch.float32) ) - self.experts = FusedMoE( + if self.has_shared_experts: + intermediate_size = (config.moe_intermediate_size * + config.moe_num_shared_experts) + self.shared_experts = Ernie4_5_MoeMLP( + hidden_size=config.hidden_size, + intermediate_size=intermediate_size, + hidden_act=config.hidden_act, + quant_config=quant_config, + prefix=f"{prefix}.shared_experts", + reduce_results=False) + else: + self.shared_experts = None + + self.experts = SharedFusedMoE( + shared_experts=self.shared_experts, num_experts=config.moe_num_experts, top_k=config.moe_k, hidden_size=config.hidden_size, @@ -157,26 +171,10 @@ def __init__( e_score_correction_bias=self.gate.e_score_correction_bias, ) - if self.has_shared_experts: - intermediate_size = ( - config.moe_intermediate_size * config.moe_num_shared_experts - ) - self.shared_experts = Ernie4_5_MoeMLP( - hidden_size=config.hidden_size, - intermediate_size=intermediate_size, - hidden_act=config.hidden_act, - quant_config=quant_config, - prefix=f"{prefix}.shared_experts", - reduce_results=self.experts.must_reduce_shared_expert_outputs(), - ) - def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: orig_shape = hidden_states.shape hidden_dim = hidden_states.shape[-1] hidden_states = hidden_states.view(-1, hidden_dim) - shared_output = None - if self.has_shared_experts: - shared_output = self.shared_experts(hidden_states) router_logits, _ = self.gate(hidden_states.to(dtype=torch.float32)) @@ -184,8 +182,9 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states=hidden_states, router_logits=router_logits ) - if self.has_shared_experts and shared_output is not None: - final_hidden_states = final_hidden_states + shared_output + if self.has_shared_experts: + final_hidden_states = final_hidden_states[0] + final_hidden_states[ + 1] if self.tp_size > 1: final_hidden_states = self.experts.maybe_all_reduce_tensor_model_parallel( @@ -460,7 +459,7 @@ def forward( def get_expert_mapping(self) -> list[tuple[str, str, int, str]]: # Params for weights, fp8 weight scales, fp8 activation scales # (param_name, weight_name, expert_id, shard_id) - return FusedMoE.make_expert_params_mapping( + return SharedFusedMoE.make_expert_params_mapping( ckpt_gate_proj_name="gate_proj", ckpt_down_proj_name="down_proj", ckpt_up_proj_name="up_proj", diff --git a/vllm/model_executor/models/ernie45_vl_moe.py b/vllm/model_executor/models/ernie45_vl_moe.py index 51f49b8587e6..82ab89f4c6ce 100644 --- a/vllm/model_executor/models/ernie45_vl_moe.py +++ b/vllm/model_executor/models/ernie45_vl_moe.py @@ -37,7 +37,7 @@ from vllm.config import CacheConfig, VllmConfig from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size from vllm.logger import init_logger -from vllm.model_executor.layers.fused_moe import FusedMoE +from vllm.model_executor.layers.fused_moe import SharedFusedMoE from vllm.model_executor.layers.layernorm import RMSNorm from vllm.model_executor.layers.linear import ( QKVParallelLinear, @@ -74,7 +74,18 @@ class Ernie4_5_VLMoeMLP(Ernie4_5_MoeMLP): - pass + + def __init__(self, + shared_experts: Optional[torch.nn.Module] = None, + **kwargs): + super().__init__(**kwargs) + self.shared_experts = shared_experts + + def forward(self, x): + if self.shared_experts is not None: + return self.shared_experts(x) + super().forward(x) + else: + return super().forward(x) class Ernie4_5_VLMoeAttention(nn.Module): @@ -223,10 +234,21 @@ def __init__( assert text_moe_layer_start_index <= text_moe_layer_end_index - if ( - layer_idx >= text_moe_layer_start_index - and layer_idx <= text_moe_layer_end_index - ): + if self.has_shared_experts: + intermediate_size = (config.moe_intermediate_size[0] * + config.moe_num_shared_experts) + self.shared_experts = Ernie4_5_VLMoeMLP( + hidden_size=config.hidden_size, + intermediate_size=intermediate_size, + hidden_act=config.hidden_act, + quant_config=quant_config, + prefix=f"{prefix}.shared_experts", + reduce_results=False) + else: + self.shared_experts = None + + if layer_idx >= text_moe_layer_start_index and \ + layer_idx <= text_moe_layer_end_index: self.text_experts_gate = ReplicatedLinear( config.hidden_size, config.moe_num_experts[0], @@ -236,7 +258,8 @@ def __init__( prefix=f"{prefix}.text_experts_gate", ) - self.text_experts = FusedMoE( + self.text_experts = SharedFusedMoE( + shared_experts=self.shared_experts, num_experts=config.moe_num_experts[0], top_k=config.moe_k, hidden_size=config.hidden_size, @@ -249,6 +272,7 @@ def __init__( ) else: self.text_experts = Ernie4_5_VLMoeMLP( + shared_experts=self.shared_experts, hidden_size=config.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, @@ -271,7 +295,8 @@ def __init__( prefix=f"{prefix}.vision_experts_gate", ) - self.vision_experts = FusedMoE( + self.vision_experts = SharedFusedMoE( + shared_experts=self.shared_experts, num_experts=config.moe_num_experts[1], top_k=config.moe_k, hidden_size=config.hidden_size, @@ -284,6 +309,7 @@ def __init__( ) else: self.vision_experts = Ernie4_5_VLMoeMLP( + shared_experts=self.shared_experts, hidden_size=config.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, @@ -292,19 +318,6 @@ def __init__( prefix=f"{prefix}.mlp", ) - if self.has_shared_experts: - intermediate_size = ( - config.moe_intermediate_size[0] * config.moe_num_shared_experts - ) - self.shared_experts = Ernie4_5_VLMoeMLP( - hidden_size=config.hidden_size, - intermediate_size=intermediate_size, - hidden_act=config.hidden_act, - quant_config=quant_config, - prefix=f"{prefix}.shared_experts", - reduce_results=self.text_experts.must_reduce_shared_expert_outputs(), - ) - def forward( self, hidden_states: torch.Tensor, @@ -315,9 +328,6 @@ def forward( hidden_dim = hidden_states.shape[-1] hidden_states = hidden_states.view(-1, hidden_dim) - if self.has_shared_experts: - shared_output = self.shared_experts(hidden_states) - if visual_token_mask is not None and visual_token_mask.all(): # only vision modal input router_logits, _ = self.vision_experts_gate( @@ -362,8 +372,9 @@ def forward( hidden_states=hidden_states, router_logits=text_router_logits ) - if self.has_shared_experts and shared_output is not None: - final_hidden_states = final_hidden_states + shared_output + if self.has_shared_experts: + final_hidden_states = final_hidden_states[0] + final_hidden_states[ + 1] if self.tp_size > 1: final_hidden_states = ( @@ -649,7 +660,7 @@ def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]: # Params for weights, fp8 weight scales, fp8 activation scales # (param_name, weight_name, expert_id, shard_id) - expert_params_mapping = FusedMoE.make_expert_params_mapping( + expert_params_mapping = SharedFusedMoE.make_expert_params_mapping( ckpt_gate_proj_name="gate_proj", ckpt_down_proj_name="down_proj", ckpt_up_proj_name="up_proj", diff --git a/vllm/model_executor/models/glm4_moe.py b/vllm/model_executor/models/glm4_moe.py index 5db6f297dbf2..d7e15e02544c 100644 --- a/vllm/model_executor/models/glm4_moe.py +++ b/vllm/model_executor/models/glm4_moe.py @@ -42,7 +42,7 @@ ) from vllm.logger import init_logger from vllm.model_executor.layers.activation import SiluAndMul -from vllm.model_executor.layers.fused_moe import FusedMoE +from vllm.model_executor.layers.fused_moe import SharedFusedMoE from vllm.model_executor.layers.layernorm import RMSNorm from vllm.model_executor.layers.linear import ( MergedColumnParallelLinear, @@ -52,7 +52,6 @@ from vllm.model_executor.layers.logits_processor import LogitsProcessor from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.layers.rotary_embedding import get_rope -from vllm.model_executor.layers.shared_fused_moe import SharedFusedMoE from vllm.model_executor.layers.vocab_parallel_embedding import ( ParallelLMHead, VocabParallelEmbedding, @@ -176,46 +175,28 @@ def __init__( reduce_results=False, prefix=f"{prefix}.shared_experts", ) - self.experts = SharedFusedMoE( - shared_experts=self.shared_experts, - num_experts=config.n_routed_experts, - top_k=config.num_experts_per_tok, - hidden_size=config.hidden_size, - intermediate_size=config.moe_intermediate_size, - reduce_results=False, - renormalize=config.norm_topk_prob, - quant_config=quant_config, - use_grouped_topk=True, - num_expert_group=config.n_group, - topk_group=config.topk_group, - prefix=f"{prefix}.experts", - scoring_func="sigmoid", - # we do scaling outside, set factor to 1.0 to avoid double mul - routed_scaling_factor=1.0, - e_score_correction_bias=self.gate.e_score_correction_bias, - enable_eplb=self.enable_eplb, - num_redundant_experts=self.n_redundant_experts, - ) else: - self.experts = FusedMoE( - num_experts=config.n_routed_experts, - top_k=config.num_experts_per_tok, - hidden_size=config.hidden_size, - intermediate_size=config.moe_intermediate_size, - reduce_results=False, - renormalize=config.norm_topk_prob, - quant_config=quant_config, - use_grouped_topk=True, - num_expert_group=config.n_group, - topk_group=config.topk_group, - prefix=f"{prefix}.experts", - scoring_func="sigmoid", - # we do scaling outside, set factor to 1.0 to avoid double mul - routed_scaling_factor=1.0, - e_score_correction_bias=self.gate.e_score_correction_bias, - enable_eplb=self.enable_eplb, - num_redundant_experts=self.n_redundant_experts, - ) + self.shared_experts = None + + self.experts = SharedFusedMoE( + shared_experts=self.shared_experts, + num_experts=config.n_routed_experts, + top_k=config.num_experts_per_tok, + hidden_size=config.hidden_size, + intermediate_size=config.moe_intermediate_size, + reduce_results=False, + renormalize=config.norm_topk_prob, + quant_config=quant_config, + use_grouped_topk=True, + num_expert_group=config.n_group, + topk_group=config.topk_group, + prefix=f"{prefix}.experts", + scoring_func="sigmoid", + # we do scaling outside, set factor to 1.0 to avoid double mul + routed_scaling_factor=1.0, + e_score_correction_bias=self.gate.e_score_correction_bias, + enable_eplb=self.enable_eplb, + num_redundant_experts=self.n_redundant_experts) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: num_tokens, hidden_dim = hidden_states.shape @@ -522,7 +503,7 @@ def make_empty_intermediate_tensors( def get_expert_mapping(self) -> list[tuple[str, str, int, str]]: # Params for weights, fp8 weight scales, fp8 activation scales # (param_name, weight_name, expert_id, shard_id) - return FusedMoE.make_expert_params_mapping( + return SharedFusedMoE.make_expert_params_mapping( ckpt_gate_proj_name="gate_proj", ckpt_down_proj_name="down_proj", ckpt_up_proj_name="up_proj", @@ -677,7 +658,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.num_moe_layers = config.num_hidden_layers - config.first_k_dense_replace self.num_expert_groups = config.n_group - self.moe_layers: list[FusedMoE] = [] + self.moe_layers: list[SharedFusedMoE] = [] example_moe = None for layer in self.model.layers: if isinstance(layer, PPMissingLayer): diff --git a/vllm/model_executor/models/hunyuan_v1.py b/vllm/model_executor/models/hunyuan_v1.py index d33406b7be2b..07cf3eaf2590 100644 --- a/vllm/model_executor/models/hunyuan_v1.py +++ b/vllm/model_executor/models/hunyuan_v1.py @@ -43,7 +43,7 @@ tensor_model_parallel_all_reduce, ) from vllm.model_executor.layers.activation import SiluAndMul -from vllm.model_executor.layers.fused_moe import FusedMoE +from vllm.model_executor.layers.fused_moe import SharedFusedMoE from vllm.model_executor.layers.layernorm import RMSNorm from vllm.model_executor.layers.linear import ( ColumnParallelLinear, @@ -414,26 +414,11 @@ def __init__( self.physical_expert_start + self.n_local_physical_experts ) - self.experts = FusedMoE( - num_experts=self.n_routed_experts, - top_k=top_k, - hidden_size=config.hidden_size, - intermediate_size=intermediate_size, - reduce_results=False, - renormalize=top_k > 1, - quant_config=quant_config, - prefix=f"{prefix}.experts", - enable_eplb=self.enable_eplb, - num_redundant_experts=self.n_redundant_experts, - ) - - self.gate = ReplicatedLinear( - config.hidden_size, - config.num_experts, - bias=False, - quant_config=None, - prefix=f"{prefix}.gate", - ) + self.gate = ReplicatedLinear(config.hidden_size, + config.num_experts, + bias=False, + quant_config=None, + prefix=f"{prefix}.gate") if config.use_mixed_mlp_moe > 0: # Get layer_id num_shared_expert if config.num_shared_expert is # a list. @@ -454,22 +439,34 @@ def __init__( else: self.shared_mlp = None + self.experts = SharedFusedMoE( + shared_experts=self.shared_mlp, + num_experts=self.n_routed_experts, + top_k=top_k, + hidden_size=config.hidden_size, + intermediate_size=intermediate_size, + reduce_results=False, + renormalize=top_k > 1, + quant_config=quant_config, + prefix=f"{prefix}.experts", + enable_eplb=self.enable_eplb, + num_redundant_experts=self.n_redundant_experts, + ) + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # NOTE: hidden_states can have either 1D or 2D shape. orig_shape = hidden_states.shape hidden_dim = hidden_states.shape[-1] hidden_states = hidden_states.view(-1, hidden_dim) - shared_output = None - if self.shared_mlp is not None: - shared_output = self.shared_mlp(hidden_states) # router_logits: (num_tokens, n_experts) router_logits, _ = self.gate(hidden_states) - final_hidden_states = self.experts( - hidden_states=hidden_states, router_logits=router_logits - ) - if shared_output is not None: - final_hidden_states = final_hidden_states + shared_output + final_hidden_states = self.experts(hidden_states=hidden_states, + router_logits=router_logits) + if self.shared_mlp is not None: + final_hidden_states = final_hidden_states[0] + final_hidden_states[ + 1] + if self.tp_size > 1: final_hidden_states = tensor_model_parallel_all_reduce(final_hidden_states) @@ -725,7 +722,7 @@ def get_expert_mapping(self) -> list[tuple[str, str, int, str]]: if _is_moe(self.config): # Params for weights, fp8 weight scales, fp8 activation scales # (param_name, weight_name, expert_id, shard_id) - return FusedMoE.make_expert_params_mapping( + return SharedFusedMoE.make_expert_params_mapping( ckpt_gate_proj_name="gate_proj", ckpt_down_proj_name="down_proj", ckpt_up_proj_name="up_proj", @@ -1009,7 +1006,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): # Set MoE hyperparameters self.expert_weights = [] self.num_expert_groups = 1 - self.moe_layers: list[FusedMoE] = [] + self.moe_layers: list[SharedFusedMoE] = [] example_layer = None for layer in self.model.layers: if isinstance(layer, PPMissingLayer): diff --git a/vllm/model_executor/models/llama4.py b/vllm/model_executor/models/llama4.py index 075f35a098a4..eb260d4176e5 100644 --- a/vllm/model_executor/models/llama4.py +++ b/vllm/model_executor/models/llama4.py @@ -29,11 +29,9 @@ from vllm.attention.layers.chunked_local_attention import ChunkedLocalAttention from vllm.compilation.decorators import support_torch_compile from vllm.config import CacheConfig, VllmConfig -from vllm.distributed import ( - get_tensor_model_parallel_world_size, - tensor_model_parallel_all_gather, -) -from vllm.model_executor.layers.fused_moe import FusedMoE +from vllm.distributed import (get_tensor_model_parallel_world_size, + tensor_model_parallel_all_gather) +from vllm.model_executor.layers.fused_moe import SharedFusedMoE from vllm.model_executor.layers.layernorm import RMSNorm from vllm.model_executor.layers.linear import ( QKVParallelLinear, @@ -42,7 +40,6 @@ ) from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.layers.rotary_embedding import get_rope -from vllm.model_executor.layers.shared_fused_moe import SharedFusedMoE from vllm.model_executor.model_loader.weight_utils import ( default_weight_loader, maybe_remap_kv_scale_name, @@ -399,7 +396,7 @@ def load_moe_expert_weights( params_dict: The dictionary of module parameters. loaded_params: The set of already loaded parameters. expert_params_mapping: The mapping of expert parameters. Must be - generated by FusedMoE.make_expert_params_mapping(). + generated by SharedFusedMoE.make_expert_params_mapping(). fused: Whether the expert weights are fused into a single weight tensor or are separate weight tensors for each expert. When fused is True, loaded_weight should have shape of: @@ -522,7 +519,7 @@ def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]: fused_experts_params = False # Expert parameter mapping for the case where the expert weights are # not fused into a single weight tensor. - expert_params_mapping = FusedMoE.make_expert_params_mapping( + expert_params_mapping = SharedFusedMoE.make_expert_params_mapping( ckpt_gate_proj_name="gate_proj", ckpt_down_proj_name="down_proj", ckpt_up_proj_name="up_proj", @@ -530,7 +527,7 @@ def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]: ) # Expert parameter mapping for the case where the expert weights are # fused into a single weight tensor. - expert_params_mapping_fused = FusedMoE.make_expert_params_mapping( + expert_params_mapping_fused = SharedFusedMoE.make_expert_params_mapping( ckpt_gate_proj_name="gate_up_proj", ckpt_down_proj_name="down_proj", ckpt_up_proj_name="gate_up_proj", diff --git a/vllm/model_executor/models/qwen2_moe.py b/vllm/model_executor/models/qwen2_moe.py index 61b203a08349..33537ce8f1b8 100644 --- a/vllm/model_executor/models/qwen2_moe.py +++ b/vllm/model_executor/models/qwen2_moe.py @@ -40,7 +40,7 @@ from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size from vllm.logger import init_logger from vllm.model_executor.layers.activation import SiluAndMul -from vllm.model_executor.layers.fused_moe import FusedMoE +from vllm.model_executor.layers.fused_moe import SharedFusedMoE from vllm.model_executor.layers.layernorm import RMSNorm from vllm.model_executor.layers.linear import ( MergedColumnParallelLinear, @@ -79,6 +79,7 @@ def __init__( hidden_act: str, quant_config: Optional[QuantizationConfig] = None, reduce_results: bool = True, + expert_gate: Optional[torch.nn.Linear] = None, prefix: str = "", ) -> None: super().__init__() @@ -102,12 +103,17 @@ def __init__( f"Unsupported activation: {hidden_act}. Only silu is supported for now." ) self.act_fn = SiluAndMul() + self.expert_gate = expert_gate def forward(self, x): gate_up, _ = self.gate_up_proj(x) - x = self.act_fn(gate_up) - x, _ = self.down_proj(x) - return x + out = self.act_fn(gate_up) + out, _ = self.down_proj(out) + + if self.expert_gate is not None: + out = F.sigmoid(self.expert_gate(x)) * out + + return out class Qwen2MoeSparseMoeBlock(nn.Module): @@ -137,46 +143,48 @@ def __init__( prefix=f"{prefix}.experts", ) - self.gate = ReplicatedLinear( - config.hidden_size, - config.num_experts, - bias=False, - quant_config=None, - prefix=f"{prefix}.gate", - ) + self.gate = ReplicatedLinear(config.hidden_size, + config.num_experts, + bias=False, + quant_config=None, + prefix=f"{prefix}.gate") if config.shared_expert_intermediate_size > 0: self.shared_expert = Qwen2MoeMLP( hidden_size=config.hidden_size, intermediate_size=config.shared_expert_intermediate_size, hidden_act=config.hidden_act, quant_config=quant_config, - reduce_results=self.experts.must_reduce_shared_expert_outputs(), + reduce_results=False, + expert_gate=torch.nn.Linear(config.hidden_size, 1, bias=False), prefix=f"{prefix}.shared_expert", ) else: self.shared_expert = None - self.shared_expert_gate = torch.nn.Linear(config.hidden_size, 1, bias=False) + + self.experts = SharedFusedMoE( + shared_experts=self.shared_expert, + num_experts=config.num_experts, + top_k=config.num_experts_per_tok, + hidden_size=config.hidden_size, + intermediate_size=config.moe_intermediate_size, + reduce_results=False, + renormalize=config.norm_topk_prob, + quant_config=quant_config, + prefix=f"{prefix}.experts") def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # NOTE: hidden_states can have either 1D or 2D shape. orig_shape = hidden_states.shape hidden_dim = hidden_states.shape[-1] hidden_states = hidden_states.view(-1, hidden_dim) - shared_output = None - if self.shared_expert is not None: - shared_output = self.shared_expert(hidden_states) - if self.shared_expert_gate is not None: - shared_output = ( - F.sigmoid(self.shared_expert_gate(hidden_states)) * shared_output - ) # router_logits: (num_tokens, n_experts) router_logits, _ = self.gate(hidden_states) - final_hidden_states = self.experts( - hidden_states=hidden_states, router_logits=router_logits - ) - if shared_output is not None: - final_hidden_states = final_hidden_states + shared_output + final_hidden_states = self.experts(hidden_states=hidden_states, + router_logits=router_logits) + if self.shared_expert is not None: + final_hidden_states = final_hidden_states[0] + final_hidden_states[ + 1] if self.tp_size > 1: final_hidden_states = self.experts.maybe_all_reduce_tensor_model_parallel( # noqa E501 final_hidden_states @@ -418,7 +426,7 @@ def forward( def get_expert_mapping(self) -> list[tuple[str, str, int, str]]: # Params for weights, fp8 weight scales, fp8 activation scales # (param_name, weight_name, expert_id, shard_id) - return FusedMoE.make_expert_params_mapping( + return SharedFusedMoE.make_expert_params_mapping( ckpt_gate_proj_name="gate_proj", ckpt_down_proj_name="down_proj", ckpt_up_proj_name="up_proj", diff --git a/vllm/model_executor/models/qwen3_next.py b/vllm/model_executor/models/qwen3_next.py index cea3faf45a14..5541df23a67f 100644 --- a/vllm/model_executor/models/qwen3_next.py +++ b/vllm/model_executor/models/qwen3_next.py @@ -7,7 +7,6 @@ from typing import Optional import torch -import torch.nn.functional as F from einops import rearrange from torch import nn from transformers.activations import ACT2FN @@ -32,18 +31,17 @@ from vllm.forward_context import ForwardContext, get_forward_context from vllm.logger import init_logger from vllm.model_executor.layers.fla.ops import ( - RMSNormGated, - chunk_gated_delta_rule, - fused_recurrent_gated_delta_rule, -) -from vllm.model_executor.layers.fused_moe import FusedMoE -from vllm.model_executor.layers.layernorm import GemmaRMSNorm as Qwen3NextRMSNorm -from vllm.model_executor.layers.linear import ( - ColumnParallelLinear, - QKVParallelLinear, - ReplicatedLinear, - RowParallelLinear, -) + RMSNormGated, chunk_gated_delta_rule, fused_recurrent_gated_delta_rule) +from vllm.model_executor.layers.fused_moe import SharedFusedMoE +# yapf conflicts with isort for this block +# yapf: disable +from vllm.model_executor.layers.layernorm import ( + GemmaRMSNorm as Qwen3NextRMSNorm) +# yapf: enable +from vllm.model_executor.layers.linear import (ColumnParallelLinear, + QKVParallelLinear, + ReplicatedLinear, + RowParallelLinear) from vllm.model_executor.layers.logits_processor import LogitsProcessor from vllm.model_executor.layers.mamba.abstract import MambaBase from vllm.model_executor.layers.mamba.mamba_mixer2 import mamba_v2_sharded_weight_loader @@ -131,32 +129,16 @@ def __init__(self, vllm_config: VllmConfig, prefix: str = ""): self.n_physical_experts = self.n_logical_experts + self.n_redundant_experts self.n_local_physical_experts = self.n_physical_experts // self.ep_size - self.physical_expert_start = self.ep_rank * self.n_local_physical_experts - self.physical_expert_end = ( - self.physical_expert_start + self.n_local_physical_experts - ) + self.physical_expert_start = (self.ep_rank * + self.n_local_physical_experts) + self.physical_expert_end = (self.physical_expert_start + + self.n_local_physical_experts) - self.experts = FusedMoE( - num_experts=self.n_routed_experts, - top_k=config.num_experts_per_tok, - hidden_size=config.hidden_size, - intermediate_size=config.moe_intermediate_size, - reduce_results=False, - renormalize=config.norm_topk_prob, - quant_config=quant_config, - prefix=f"{prefix}.experts", - enable_eplb=self.enable_eplb, - num_redundant_experts=self.n_redundant_experts, - is_sequence_parallel=self.is_sequence_parallel, - ) - - self.gate = ReplicatedLinear( - config.hidden_size, - config.num_experts, - bias=False, - quant_config=quant_config, - prefix=f"{prefix}.gate", - ) + self.gate = ReplicatedLinear(config.hidden_size, + config.num_experts, + bias=False, + quant_config=quant_config, + prefix=f"{prefix}.gate") if config.shared_expert_intermediate_size > 0: self.shared_expert = Qwen3NextMLP( @@ -164,12 +146,26 @@ def __init__(self, vllm_config: VllmConfig, prefix: str = ""): intermediate_size=config.shared_expert_intermediate_size, hidden_act=config.hidden_act, quant_config=quant_config, - reduce_results=self.experts.must_reduce_shared_expert_outputs(), + reduce_results=False, + expert_gate=torch.nn.Linear(config.hidden_size, 1, bias=False), prefix=f"{prefix}.shared_expert", ) else: self.shared_expert = None - self.shared_expert_gate = torch.nn.Linear(config.hidden_size, 1, bias=False) + + self.experts = SharedFusedMoE( + shared_experts=self.shared_expert, + num_experts=self.n_routed_experts, + top_k=config.num_experts_per_tok, + hidden_size=config.hidden_size, + intermediate_size=config.moe_intermediate_size, + reduce_results=False, + renormalize=config.norm_topk_prob, + quant_config=quant_config, + prefix=f"{prefix}.experts", + enable_eplb=self.enable_eplb, + num_redundant_experts=self.n_redundant_experts, + is_sequence_parallel=self.is_sequence_parallel) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # NOTE: hidden_states can have either 1D or 2D shape. @@ -180,22 +176,15 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: if self.is_sequence_parallel: hidden_states = sequence_parallel_chunk(hidden_states) - shared_output = None - if self.shared_expert is not None: - shared_output = self.shared_expert(hidden_states) - if self.shared_expert_gate is not None: - shared_output = ( - F.sigmoid(self.shared_expert_gate(hidden_states)) * shared_output - ) - # router_logits: (num_tokens, n_experts) router_logits, _ = self.gate(hidden_states) final_hidden_states = self.experts( hidden_states=hidden_states, router_logits=router_logits ) - if shared_output is not None: - final_hidden_states = final_hidden_states + shared_output + if self.shared_expert is not None: + final_hidden_states = final_hidden_states[0] + final_hidden_states[ + 1] if self.is_sequence_parallel: final_hidden_states = tensor_model_parallel_all_gather( @@ -1008,7 +997,7 @@ def forward( def get_expert_mapping(self) -> list[tuple[str, str, int, str]]: # Params for weights, fp8 weight scales, fp8 activation scales # (param_name, weight_name, expert_id, shard_id) - return FusedMoE.make_expert_params_mapping( + return SharedFusedMoE.make_expert_params_mapping( ckpt_gate_proj_name="gate_proj", ckpt_down_proj_name="down_proj", ckpt_up_proj_name="up_proj", @@ -1150,7 +1139,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): # Set MoE hyperparameters self.expert_weights = [] - self.moe_layers: list[FusedMoE] = [] + self.moe_layers: list[SharedFusedMoE] = [] example_layer = None for layer in self.model.layers: if isinstance(layer, PPMissingLayer): From 0e42864662969386147f635dc5f19559a637402b Mon Sep 17 00:00:00 2001 From: Bill Nell Date: Mon, 6 Oct 2025 15:39:50 +0000 Subject: [PATCH 2/7] fix formatting nonsense Signed-off-by: Bill Nell --- vllm/model_executor/models/qwen3_next.py | 46 ++++++++++++++---------- 1 file changed, 27 insertions(+), 19 deletions(-) diff --git a/vllm/model_executor/models/qwen3_next.py b/vllm/model_executor/models/qwen3_next.py index 5541df23a67f..b0ae22879dea 100644 --- a/vllm/model_executor/models/qwen3_next.py +++ b/vllm/model_executor/models/qwen3_next.py @@ -31,17 +31,23 @@ from vllm.forward_context import ForwardContext, get_forward_context from vllm.logger import init_logger from vllm.model_executor.layers.fla.ops import ( - RMSNormGated, chunk_gated_delta_rule, fused_recurrent_gated_delta_rule) + RMSNormGated, + chunk_gated_delta_rule, + fused_recurrent_gated_delta_rule, +) from vllm.model_executor.layers.fused_moe import SharedFusedMoE + # yapf conflicts with isort for this block # yapf: disable -from vllm.model_executor.layers.layernorm import ( - GemmaRMSNorm as Qwen3NextRMSNorm) +from vllm.model_executor.layers.layernorm import GemmaRMSNorm as Qwen3NextRMSNorm + # yapf: enable -from vllm.model_executor.layers.linear import (ColumnParallelLinear, - QKVParallelLinear, - ReplicatedLinear, - RowParallelLinear) +from vllm.model_executor.layers.linear import ( + ColumnParallelLinear, + QKVParallelLinear, + ReplicatedLinear, + RowParallelLinear, +) from vllm.model_executor.layers.logits_processor import LogitsProcessor from vllm.model_executor.layers.mamba.abstract import MambaBase from vllm.model_executor.layers.mamba.mamba_mixer2 import mamba_v2_sharded_weight_loader @@ -129,16 +135,18 @@ def __init__(self, vllm_config: VllmConfig, prefix: str = ""): self.n_physical_experts = self.n_logical_experts + self.n_redundant_experts self.n_local_physical_experts = self.n_physical_experts // self.ep_size - self.physical_expert_start = (self.ep_rank * - self.n_local_physical_experts) - self.physical_expert_end = (self.physical_expert_start + - self.n_local_physical_experts) + self.physical_expert_start = self.ep_rank * self.n_local_physical_experts + self.physical_expert_end = ( + self.physical_expert_start + self.n_local_physical_experts + ) - self.gate = ReplicatedLinear(config.hidden_size, - config.num_experts, - bias=False, - quant_config=quant_config, - prefix=f"{prefix}.gate") + self.gate = ReplicatedLinear( + config.hidden_size, + config.num_experts, + bias=False, + quant_config=quant_config, + prefix=f"{prefix}.gate", + ) if config.shared_expert_intermediate_size > 0: self.shared_expert = Qwen3NextMLP( @@ -165,7 +173,8 @@ def __init__(self, vllm_config: VllmConfig, prefix: str = ""): prefix=f"{prefix}.experts", enable_eplb=self.enable_eplb, num_redundant_experts=self.n_redundant_experts, - is_sequence_parallel=self.is_sequence_parallel) + is_sequence_parallel=self.is_sequence_parallel, + ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # NOTE: hidden_states can have either 1D or 2D shape. @@ -183,8 +192,7 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: ) if self.shared_expert is not None: - final_hidden_states = final_hidden_states[0] + final_hidden_states[ - 1] + final_hidden_states = final_hidden_states[0] + final_hidden_states[1] if self.is_sequence_parallel: final_hidden_states = tensor_model_parallel_all_gather( From 4a1a2a89a205c265917c2ed19557433bc8699df8 Mon Sep 17 00:00:00 2001 From: Bill Nell Date: Mon, 6 Oct 2025 16:58:25 +0000 Subject: [PATCH 3/7] more formatting nonsense Signed-off-by: Bill Nell --- vllm/model_executor/models/dots1.py | 13 +++++----- vllm/model_executor/models/ernie45_moe.py | 11 ++++---- vllm/model_executor/models/hunyuan_v1.py | 20 ++++++++------- vllm/model_executor/models/qwen2_moe.py | 31 +++++++++-------------- 4 files changed, 35 insertions(+), 40 deletions(-) diff --git a/vllm/model_executor/models/dots1.py b/vllm/model_executor/models/dots1.py index 460c8ff1c1cd..55f8d4b231f7 100644 --- a/vllm/model_executor/models/dots1.py +++ b/vllm/model_executor/models/dots1.py @@ -146,8 +146,7 @@ def __init__( self.gate.e_score_correction_bias = None if config.n_shared_experts is not None: - intermediate_size = (config.moe_intermediate_size * - config.n_shared_experts) + intermediate_size = config.moe_intermediate_size * config.n_shared_experts self.shared_experts = Dots1MLP( hidden_size=config.hidden_size, intermediate_size=intermediate_size, @@ -183,13 +182,13 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = hidden_states.view(-1, hidden_dim) router_logits, _ = self.gate(hidden_states) - final_hidden_states = self.experts( - hidden_states=hidden_states, - router_logits=router_logits) * self.routed_scaling_factor + final_hidden_states = ( + self.experts(hidden_states=hidden_states, router_logits=router_logits) + * self.routed_scaling_factor + ) if self.shared_experts is not None: - final_hidden_states = final_hidden_states[0] + final_hidden_states[ - 1] + final_hidden_states = final_hidden_states[0] + final_hidden_states[1] if self.tp_size > 1: final_hidden_states = tensor_model_parallel_all_reduce(final_hidden_states) diff --git a/vllm/model_executor/models/ernie45_moe.py b/vllm/model_executor/models/ernie45_moe.py index b3cfb39a10e6..7516cb5abaf9 100644 --- a/vllm/model_executor/models/ernie45_moe.py +++ b/vllm/model_executor/models/ernie45_moe.py @@ -146,15 +146,17 @@ def __init__( ) if self.has_shared_experts: - intermediate_size = (config.moe_intermediate_size * - config.moe_num_shared_experts) + intermediate_size = ( + config.moe_intermediate_size * config.moe_num_shared_experts + ) self.shared_experts = Ernie4_5_MoeMLP( hidden_size=config.hidden_size, intermediate_size=intermediate_size, hidden_act=config.hidden_act, quant_config=quant_config, prefix=f"{prefix}.shared_experts", - reduce_results=False) + reduce_results=False, + ) else: self.shared_experts = None @@ -183,8 +185,7 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: ) if self.has_shared_experts: - final_hidden_states = final_hidden_states[0] + final_hidden_states[ - 1] + final_hidden_states = final_hidden_states[0] + final_hidden_states[1] if self.tp_size > 1: final_hidden_states = self.experts.maybe_all_reduce_tensor_model_parallel( diff --git a/vllm/model_executor/models/hunyuan_v1.py b/vllm/model_executor/models/hunyuan_v1.py index 07cf3eaf2590..0b9b7287fdc3 100644 --- a/vllm/model_executor/models/hunyuan_v1.py +++ b/vllm/model_executor/models/hunyuan_v1.py @@ -414,11 +414,13 @@ def __init__( self.physical_expert_start + self.n_local_physical_experts ) - self.gate = ReplicatedLinear(config.hidden_size, - config.num_experts, - bias=False, - quant_config=None, - prefix=f"{prefix}.gate") + self.gate = ReplicatedLinear( + config.hidden_size, + config.num_experts, + bias=False, + quant_config=None, + prefix=f"{prefix}.gate", + ) if config.use_mixed_mlp_moe > 0: # Get layer_id num_shared_expert if config.num_shared_expert is # a list. @@ -461,11 +463,11 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # router_logits: (num_tokens, n_experts) router_logits, _ = self.gate(hidden_states) - final_hidden_states = self.experts(hidden_states=hidden_states, - router_logits=router_logits) + final_hidden_states = self.experts( + hidden_states=hidden_states, router_logits=router_logits + ) if self.shared_mlp is not None: - final_hidden_states = final_hidden_states[0] + final_hidden_states[ - 1] + final_hidden_states = final_hidden_states[0] + final_hidden_states[1] if self.tp_size > 1: final_hidden_states = tensor_model_parallel_all_reduce(final_hidden_states) diff --git a/vllm/model_executor/models/qwen2_moe.py b/vllm/model_executor/models/qwen2_moe.py index 33537ce8f1b8..ea16134cf778 100644 --- a/vllm/model_executor/models/qwen2_moe.py +++ b/vllm/model_executor/models/qwen2_moe.py @@ -132,22 +132,14 @@ def __init__( f"the number of experts {config.num_experts}." ) - self.experts = FusedMoE( - num_experts=config.num_experts, - top_k=config.num_experts_per_tok, - hidden_size=config.hidden_size, - intermediate_size=config.moe_intermediate_size, - reduce_results=False, - renormalize=config.norm_topk_prob, - quant_config=quant_config, - prefix=f"{prefix}.experts", + self.gate = ReplicatedLinear( + config.hidden_size, + config.num_experts, + bias=False, + quant_config=None, + prefix=f"{prefix}.gate", ) - self.gate = ReplicatedLinear(config.hidden_size, - config.num_experts, - bias=False, - quant_config=None, - prefix=f"{prefix}.gate") if config.shared_expert_intermediate_size > 0: self.shared_expert = Qwen2MoeMLP( hidden_size=config.hidden_size, @@ -170,7 +162,8 @@ def __init__( reduce_results=False, renormalize=config.norm_topk_prob, quant_config=quant_config, - prefix=f"{prefix}.experts") + prefix=f"{prefix}.experts", + ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # NOTE: hidden_states can have either 1D or 2D shape. @@ -180,11 +173,11 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # router_logits: (num_tokens, n_experts) router_logits, _ = self.gate(hidden_states) - final_hidden_states = self.experts(hidden_states=hidden_states, - router_logits=router_logits) + final_hidden_states = self.experts( + hidden_states=hidden_states, router_logits=router_logits + ) if self.shared_expert is not None: - final_hidden_states = final_hidden_states[0] + final_hidden_states[ - 1] + final_hidden_states = final_hidden_states[0] + final_hidden_states[1] if self.tp_size > 1: final_hidden_states = self.experts.maybe_all_reduce_tensor_model_parallel( # noqa E501 final_hidden_states From b20980c65ac7d30cdb633b5175ed3633dfe1fc3d Mon Sep 17 00:00:00 2001 From: Bill Nell Date: Mon, 6 Oct 2025 17:03:01 +0000 Subject: [PATCH 4/7] more formatting nonsense Signed-off-by: Bill Nell --- .../layers/fused_moe/__init__.py | 9 ++++---- .../layers/fused_moe/shared_fused_moe.py | 15 ++++++++----- vllm/model_executor/models/aria.py | 9 ++++---- vllm/model_executor/models/ernie45_vl_moe.py | 22 +++++++++---------- vllm/model_executor/models/glm4_moe.py | 3 ++- vllm/model_executor/models/llama4.py | 6 +++-- 6 files changed, 36 insertions(+), 28 deletions(-) diff --git a/vllm/model_executor/layers/fused_moe/__init__.py b/vllm/model_executor/layers/fused_moe/__init__.py index 5ff1e83617b7..799f78284894 100644 --- a/vllm/model_executor/layers/fused_moe/__init__.py +++ b/vllm/model_executor/layers/fused_moe/__init__.py @@ -11,10 +11,11 @@ FusedMoeWeightScaleSupported, ) from vllm.model_executor.layers.fused_moe.modular_kernel import ( - FusedMoEActivationFormat, FusedMoEPermuteExpertsUnpermute, - FusedMoEPrepareAndFinalize) -from vllm.model_executor.layers.fused_moe.shared_fused_moe import ( - SharedFusedMoE) + FusedMoEActivationFormat, + FusedMoEPermuteExpertsUnpermute, + FusedMoEPrepareAndFinalize, +) +from vllm.model_executor.layers.fused_moe.shared_fused_moe import SharedFusedMoE from vllm.model_executor.layers.fused_moe.utils import activation_without_mul from vllm.triton_utils import HAS_TRITON diff --git a/vllm/model_executor/layers/fused_moe/shared_fused_moe.py b/vllm/model_executor/layers/fused_moe/shared_fused_moe.py index 4f9cc682b6ec..a678fdae8833 100644 --- a/vllm/model_executor/layers/fused_moe/shared_fused_moe.py +++ b/vllm/model_executor/layers/fused_moe/shared_fused_moe.py @@ -28,9 +28,11 @@ def __init__( # flashinfer + DP since there is nothing to be gained in this case. # Disabling the overlap optimization also prevents the shared experts # from being hidden from torch.compile. - self.use_overlapped = use_overlapped and not ( - self.use_ep or self.use_flashinfer_cutlass_kernels - ) and self._shared_experts is not None + self.use_overlapped = ( + use_overlapped + and not (self.use_ep or self.use_flashinfer_cutlass_kernels) + and self._shared_experts is not None + ) @property def shared_experts(self) -> Optional[torch.nn.Module]: @@ -47,8 +49,11 @@ def forward( # Reduce shared expert outputs if necessary, since the MLP # should have been created with reduce_results=False. - if (self.reduce_results and self.tp_size > 1 - and self.must_reduce_shared_expert_outputs()): + if ( + self.reduce_results + and self.tp_size > 1 + and self.must_reduce_shared_expert_outputs() + ): shared_out = tensor_model_parallel_all_reduce(shared_out) else: shared_out = None diff --git a/vllm/model_executor/models/aria.py b/vllm/model_executor/models/aria.py index 79d535ec904d..734ae8cbd608 100644 --- a/vllm/model_executor/models/aria.py +++ b/vllm/model_executor/models/aria.py @@ -14,8 +14,7 @@ from vllm.distributed import get_tensor_model_parallel_rank from vllm.model_executor.layers.activation import get_act_fn from vllm.model_executor.layers.fused_moe import SharedFusedMoE -from vllm.model_executor.layers.linear import (ColumnParallelLinear, - RowParallelLinear) +from vllm.model_executor.layers.linear import ColumnParallelLinear, RowParallelLinear from vllm.model_executor.layers.logits_processor import LogitsProcessor from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.layers.vocab_parallel_embedding import ParallelLMHead @@ -208,9 +207,9 @@ def forward( class AriaFusedMoE(SharedFusedMoE): - - def weight_loader(self, param: nn.Parameter, loaded_weight: torch.Tensor, - shard_id: str) -> None: + def weight_loader( + self, param: nn.Parameter, loaded_weight: torch.Tensor, shard_id: str + ) -> None: # Override the weight_loader to handle the expert weights in the Aria # model, which are already packed with experts, and merge the gate and # up weights for each expert. diff --git a/vllm/model_executor/models/ernie45_vl_moe.py b/vllm/model_executor/models/ernie45_vl_moe.py index 82ab89f4c6ce..2c4989556140 100644 --- a/vllm/model_executor/models/ernie45_vl_moe.py +++ b/vllm/model_executor/models/ernie45_vl_moe.py @@ -74,10 +74,7 @@ class Ernie4_5_VLMoeMLP(Ernie4_5_MoeMLP): - - def __init__(self, - shared_experts: Optional[torch.nn.Module] = None, - **kwargs): + def __init__(self, shared_experts: Optional[torch.nn.Module] = None, **kwargs): super().__init__(**kwargs) self.shared_experts = shared_experts @@ -235,20 +232,24 @@ def __init__( assert text_moe_layer_start_index <= text_moe_layer_end_index if self.has_shared_experts: - intermediate_size = (config.moe_intermediate_size[0] * - config.moe_num_shared_experts) + intermediate_size = ( + config.moe_intermediate_size[0] * config.moe_num_shared_experts + ) self.shared_experts = Ernie4_5_VLMoeMLP( hidden_size=config.hidden_size, intermediate_size=intermediate_size, hidden_act=config.hidden_act, quant_config=quant_config, prefix=f"{prefix}.shared_experts", - reduce_results=False) + reduce_results=False, + ) else: self.shared_experts = None - if layer_idx >= text_moe_layer_start_index and \ - layer_idx <= text_moe_layer_end_index: + if ( + layer_idx >= text_moe_layer_start_index + and layer_idx <= text_moe_layer_end_index + ): self.text_experts_gate = ReplicatedLinear( config.hidden_size, config.moe_num_experts[0], @@ -373,8 +374,7 @@ def forward( ) if self.has_shared_experts: - final_hidden_states = final_hidden_states[0] + final_hidden_states[ - 1] + final_hidden_states = final_hidden_states[0] + final_hidden_states[1] if self.tp_size > 1: final_hidden_states = ( diff --git a/vllm/model_executor/models/glm4_moe.py b/vllm/model_executor/models/glm4_moe.py index d7e15e02544c..b9cdee29417a 100644 --- a/vllm/model_executor/models/glm4_moe.py +++ b/vllm/model_executor/models/glm4_moe.py @@ -196,7 +196,8 @@ def __init__( routed_scaling_factor=1.0, e_score_correction_bias=self.gate.e_score_correction_bias, enable_eplb=self.enable_eplb, - num_redundant_experts=self.n_redundant_experts) + num_redundant_experts=self.n_redundant_experts, + ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: num_tokens, hidden_dim = hidden_states.shape diff --git a/vllm/model_executor/models/llama4.py b/vllm/model_executor/models/llama4.py index eb260d4176e5..df7bd9b7f6d1 100644 --- a/vllm/model_executor/models/llama4.py +++ b/vllm/model_executor/models/llama4.py @@ -29,8 +29,10 @@ from vllm.attention.layers.chunked_local_attention import ChunkedLocalAttention from vllm.compilation.decorators import support_torch_compile from vllm.config import CacheConfig, VllmConfig -from vllm.distributed import (get_tensor_model_parallel_world_size, - tensor_model_parallel_all_gather) +from vllm.distributed import ( + get_tensor_model_parallel_world_size, + tensor_model_parallel_all_gather, +) from vllm.model_executor.layers.fused_moe import SharedFusedMoE from vllm.model_executor.layers.layernorm import RMSNorm from vllm.model_executor.layers.linear import ( From 275d525e5deaa68f71f5aa417b3f1bf5c3282c5f Mon Sep 17 00:00:00 2001 From: Bill Nell Date: Wed, 8 Oct 2025 00:31:55 +0000 Subject: [PATCH 5/7] get rid of yapf comment Signed-off-by: Bill Nell --- vllm/model_executor/models/qwen3_next.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/vllm/model_executor/models/qwen3_next.py b/vllm/model_executor/models/qwen3_next.py index b0ae22879dea..ef5978129dd0 100644 --- a/vllm/model_executor/models/qwen3_next.py +++ b/vllm/model_executor/models/qwen3_next.py @@ -36,12 +36,7 @@ fused_recurrent_gated_delta_rule, ) from vllm.model_executor.layers.fused_moe import SharedFusedMoE - -# yapf conflicts with isort for this block -# yapf: disable from vllm.model_executor.layers.layernorm import GemmaRMSNorm as Qwen3NextRMSNorm - -# yapf: enable from vllm.model_executor.layers.linear import ( ColumnParallelLinear, QKVParallelLinear, From 6d474d69efd0e03986774f80e3bc343c715fbaf6 Mon Sep 17 00:00:00 2001 From: Bill Nell Date: Thu, 9 Oct 2025 13:06:27 +0000 Subject: [PATCH 6/7] fix tests Signed-off-by: Bill Nell --- vllm/model_executor/models/qwen2_moe.py | 4 +++- vllm/model_executor/models/qwen3_next.py | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/vllm/model_executor/models/qwen2_moe.py b/vllm/model_executor/models/qwen2_moe.py index ea16134cf778..c57299a2d390 100644 --- a/vllm/model_executor/models/qwen2_moe.py +++ b/vllm/model_executor/models/qwen2_moe.py @@ -140,6 +140,8 @@ def __init__( prefix=f"{prefix}.gate", ) + self.shared_expert_gate = torch.nn.Linear(config.hidden_size, 1, bias=False) + if config.shared_expert_intermediate_size > 0: self.shared_expert = Qwen2MoeMLP( hidden_size=config.hidden_size, @@ -147,7 +149,7 @@ def __init__( hidden_act=config.hidden_act, quant_config=quant_config, reduce_results=False, - expert_gate=torch.nn.Linear(config.hidden_size, 1, bias=False), + expert_gate=self.shared_expert_gate, prefix=f"{prefix}.shared_expert", ) else: diff --git a/vllm/model_executor/models/qwen3_next.py b/vllm/model_executor/models/qwen3_next.py index ef5978129dd0..50629bb2e4a2 100644 --- a/vllm/model_executor/models/qwen3_next.py +++ b/vllm/model_executor/models/qwen3_next.py @@ -143,6 +143,8 @@ def __init__(self, vllm_config: VllmConfig, prefix: str = ""): prefix=f"{prefix}.gate", ) + self.shared_expert_gate = torch.nn.Linear(config.hidden_size, 1, bias=False) + if config.shared_expert_intermediate_size > 0: self.shared_expert = Qwen3NextMLP( hidden_size=config.hidden_size, @@ -150,7 +152,7 @@ def __init__(self, vllm_config: VllmConfig, prefix: str = ""): hidden_act=config.hidden_act, quant_config=quant_config, reduce_results=False, - expert_gate=torch.nn.Linear(config.hidden_size, 1, bias=False), + expert_gate=self.shared_expert_gate, prefix=f"{prefix}.shared_expert", ) else: From 663389f6b9602ffef4070d642a477bdadfe10d00 Mon Sep 17 00:00:00 2001 From: Bill Nell Date: Thu, 9 Oct 2025 13:08:54 +0000 Subject: [PATCH 7/7] fix tests Signed-off-by: Bill Nell --- vllm/model_executor/models/bailing_moe.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/vllm/model_executor/models/bailing_moe.py b/vllm/model_executor/models/bailing_moe.py index 050af23352ff..c016d46e194f 100644 --- a/vllm/model_executor/models/bailing_moe.py +++ b/vllm/model_executor/models/bailing_moe.py @@ -294,8 +294,6 @@ def __init__( self.experts = SharedFusedMoE( shared_experts=self.shared_experts, - fused_output_scaling_factor=self.routed_scaling_factor, - shared_output_scaling_factor=1.0, num_experts=self.num_experts, top_k=self.top_k, hidden_size=self.hidden_size,