diff --git a/examples/modeling/configuration_doge.py b/examples/modeling/configuration_doge.py new file mode 100644 index 0000000..624312b --- /dev/null +++ b/examples/modeling/configuration_doge.py @@ -0,0 +1,241 @@ +# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 +# This file was automatically generated from src/transformers/models/doge/modular_doge.py. +# Do NOT edit this file manually as any edits will be overwritten by the generation of +# the file from the modular. If any change should be done, please apply the change to the +# modular_doge.py file directly. One of our CI enforces this. +# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 +# coding=utf-8 +# Copyright 2025 Jingze Shi and the HuggingFace Inc. team. All rights reserved. +# +# The Doge family of small language models is trained by SmallDoge Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from transformers.configuration_utils import PretrainedConfig +from transformers.modeling_rope_utils import rope_config_validation + + +class DogeConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`DogeModel`]. It is used to instantiate an Doge + model according to the specified arguments, defining the model architecture like [SmallDoge/Doge-320M](https://huggingface.co/SmallDoge/Doge-320M). + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + vocab_size (`int`, *optional*, defaults to 32768): + Vocabulary size of the Doge2 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`DogeModel`] + hidden_size (`int`, *optional*, defaults to 1024): + Dimension of the hidden representations. + intermediate_size (`int`, *optional*, defaults to 2048): + Dimension of the MLP representations. + num_hidden_layers (`int`, *optional*, defaults to 32): + Number of hidden layers in the Transformer decoder. + hidden_dropout (`float`, *optional*, defaults to 0.0): + Dropout probability for each sequence transformation and state transformation module. + hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): + The non-linear activation function (function or string) in the decoder. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + rms_norm_eps (`float`, *optional*, defaults to 1e-06): + The epsilon used by the rms normalization layers. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + tie_word_embeddings (`bool`, *optional*, defaults to `False`): + Whether the model's input and output word embeddings should be tied. + max_position_embeddings (`int`, *optional*, defaults to 2048): + The maximum sequence length that this model might ever be used with. + rope_theta (`float`, *optional*, defaults to 10000.0): + The base period of the RoPE embeddings. + rope_scaling (`Dict`, *optional*): + Dictionary containing the scaling configuration for the RoPE embeddings. + NOTE: if you apply new rope type and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value accordingly. + Doge family of small models use `{ 'rope_type': 'dynamic', 'factor': 4.0, 'original_max_position_embeddings': 2048 }` as the default value. + Expected contents: + `rope_type` (`str`): + The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope', 'llama3'], with 'default' being the original RoPE implementation. + `factor` (`float`, *optional*): + Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. + In most scaling types, a `factor` of x will enable the model to handle sequences of length x * original maximum pre-trained length. + `original_max_position_embeddings` (`int`, *optional*): + Used with 'dynamic', 'longrope' and 'llama3'. + The original max position embeddings used during pretraining. + `attention_factor` (`float`, *optional*): + Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention + computation. + If unspecified, it defaults to value recommended by the implementation, using the `factor` field to infer the suggested value. + `beta_fast` (`float`, *optional*): + Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear + ramp function. If unspecified, it defaults to 32. + `beta_slow` (`float`, *optional*): + Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear + ramp function. If unspecified, it defaults to 1. + `short_factor` (`List[float]`, *optional*): + Only used with 'longrope'. The scaling factor to be applied to short contexts (<`original_max_position_embeddings`). + Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 + `long_factor` (`List[float]`, *optional*): + Only used with 'longrope'. The scaling factor to be applied to long contexts (<`original_max_position_embeddings`). + Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 + `low_freq_factor` (`float`, *optional*): + Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE + `high_freq_factor` (`float`, *optional*): + Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE + num_attention_heads (`int`, *optional*, defaults to 8): + Number of attention heads for each attention layer in the Transformer decoder. + num_key_value_heads (`int`, *optional*): + This is the number of key_value heads that should be used to implement Grouped Query Attention. + If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if + `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. + When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. + For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf). + If it is not specified, will default to `num_attention_heads`. + attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): + Whether to use a bias in the query, key, value and output projection layers during self-attention. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + mlp_bias (`bool`, *optional*, defaults to `False`): + Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers. + sliding_window (`int`, *optional*): + Sliding window attention window size. If not specified, will default to `None`. + keep_window_size (`int`, *optional*, defaults to 2048): + The window size of tokens that are not dynamically masked, and dynamic masking is only performed when the sequence length exceeds this value. + is_moe (`bool`, *optional*, defaults to `False`): + Whether to use the Cross Domain Mixture of Experts, if `True`, the MoE will inherit the MLP to initialize. + num_experts (`int`, *optional*, defaults to 16384): + Number of routed experts in the model. This is only used when `is_moe=True`. + num_experts_per_tok (`int`, *optional*, defaults to 64): + Number of selected experts to route per-token. + norm_topk_prob (`bool`, *optional*, defaults to `False`): + Whether to normalize the topk probabilities. + output_router_logits (`bool`, *optional*, defaults to `False`): + Whether or not the router logits should be returned by the model. Enabling this will also + allow the model to output the auxiliary loss, including load balancing loss and router z-loss. + router_aux_loss_coef (`float`, *optional*, defaults to 0.001): + The aux loss factor for the total loss. + + ```python + >>> from transformers import DogeConfig, DogeModel + + >>> # Initializing a Doge-320M style configuration + >>> configuration = DogeConfig() + + >>> # Initializing a model from the Doge-320M style configuration + >>> model = DogeModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "doge" + keys_to_ignore_at_inference = ["past_key_values"] + # Default tensor parallel plan for base model `DogeModel` + base_model_tp_plan = { + "layers.*.self_attn.q_proj": "colwise", + "layers.*.self_attn.k_proj": "colwise", + "layers.*.self_attn.v_proj": "colwise", + "layers.*.self_attn.dt_proj": "rowwise", + "layers.*.self_attn.o_proj": "rowwise", + "layers.*.input_layernorm.weight": "sequence_parallel", + "layers.*.input_residual.weight": "sequence_parallel", + "layers.*.post_attention_layernorm.weight": "sequence_parallel", + "layers.*.post_attention_residual.weight": "sequence_parallel", + "norm.weight": "sequence_parallel", + "layers.*.mlp.gate_proj": "colwise", + "layers.*.mlp.up_proj": "colwise", + "layers.*.mlp.down_proj": "rowwise", + "layers.*.mlp.router_gate": "colwise_rep", + "layers.*.mlp.down_embed": "rowwise_rep", + "layers.*.mlp.up_embed": "rowwise_rep", + } + base_model_pp_plan = { + "embed_tokens": (["input_ids"], ["inputs_embeds"]), + "layers": (["hidden_states", "attention_mask"], ["hidden_states"]), + "norm": (["hidden_states"], ["hidden_states"]), + } + + def __init__( + self, + vocab_size=32768, + hidden_size=1024, + intermediate_size=2048, + num_hidden_layers=32, + hidden_dropout=0.0, + hidden_act="silu", + initializer_range=0.02, + rms_norm_eps=1e-06, + use_cache=True, + tie_word_embeddings=False, + max_position_embeddings=2048, + rope_theta=10000.0, + rope_scaling=None, + num_attention_heads=8, + num_key_value_heads=None, + attention_bias=False, + attention_dropout=0.0, + mlp_bias=False, + sliding_window=None, + keep_window_size=2048, + is_moe=False, + num_experts=16384, + num_experts_per_tok=64, + norm_topk_prob=False, + output_router_logits=False, + router_aux_loss_coef=0.001, + **kwargs, + ): + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.num_hidden_layers = num_hidden_layers + + self.hidden_dropout = hidden_dropout + self.hidden_act = hidden_act + self.initializer_range = initializer_range + self.rms_norm_eps = rms_norm_eps + self.use_cache = use_cache + + self.max_position_embeddings = max_position_embeddings + self.rope_theta = rope_theta + self.rope_scaling = rope_scaling + self.num_attention_heads = num_attention_heads + self.num_key_value_heads = num_key_value_heads + self.attention_bias = attention_bias + self.attention_dropout = attention_dropout + self.mlp_bias = mlp_bias + self.sliding_window = sliding_window + self.keep_window_size = keep_window_size + self.is_moe = is_moe + self.num_experts = num_experts + self.num_experts_per_tok = num_experts_per_tok + self.norm_topk_prob = norm_topk_prob + self.output_router_logits = output_router_logits + self.router_aux_loss_coef = router_aux_loss_coef + + # Validate the correctness of rotary position embeddings parameters + # BC: if there is a 'type' field, copy it it to 'rope_type'. + if self.rope_scaling is not None and "type" in self.rope_scaling: + self.rope_scaling["rope_type"] = self.rope_scaling["type"] + rope_config_validation(self) + + # for backward compatibility + if num_key_value_heads is None: + self.num_key_value_heads = num_attention_heads + + super().__init__( + tie_word_embeddings=tie_word_embeddings, + **kwargs, + ) + + +__all__ = ["DogeConfig"] diff --git a/examples/modeling/modeling_doge.py b/examples/modeling/modeling_doge.py new file mode 100644 index 0000000..8810539 --- /dev/null +++ b/examples/modeling/modeling_doge.py @@ -0,0 +1,829 @@ +# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 +# This file was automatically generated from src/transformers/models/doge/modular_doge.py. +# Do NOT edit this file manually as any edits will be overwritten by the generation of +# the file from the modular. If any change should be done, please apply the change to the +# modular_doge.py file directly. One of our CI enforces this. +# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 +# coding=utf-8 +# Copyright 2025 Jingze Shi and the HuggingFace Inc. team. All rights reserved. +# +# The Doge family of small language models is trained by SmallDoge Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import Callable, Optional, Union + +import torch +import torch.nn.functional as F +from torch import nn + +from transformers.activations import ACT2FN +from transformers.cache_utils import Cache, DynamicCache +from transformers.generation import GenerationMixin +from transformers.integrations import use_kernel_forward_from_hub +from transformers.integrations.flex_attention import compile_friendly_flex_attention +from transformers.masking_utils import create_causal_mask, create_sliding_window_causal_mask +from transformers.modeling_layers import GenericForSequenceClassification, GradientCheckpointingLayer +from transformers.modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast +from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update +from transformers.modeling_utils import AttentionInterface, PreTrainedModel +from transformers.processing_utils import Unpack +from transformers.utils import TransformersKwargs, auto_docstring, can_return_tuple, is_torch_flex_attn_available +from transformers.utils.generic import OutputRecorder, check_model_inputs +from .configuration_doge import DogeConfig + +try: + from flash_dmattn import flash_dmattn_func_auto +except ImportError: + def flash_dmattn_func_auto(*args, **kwargs): + raise ImportError( + "flash_dmattn is not installed. Please install it to use flash_dmattn_func_auto. " + "You can install it with `pip install flash-dmattn` or consult the documentation." + ) + +if is_torch_flex_attn_available(): + from torch.nn.attention.flex_attention import BlockMask + + +@use_kernel_forward_from_hub("RMSNorm") +class DogeRMSNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-6): + """ + DogeRMSNorm is equivalent to T5LayerNorm + """ + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + input_dtype = hidden_states.dtype + hidden_states = hidden_states.to(torch.float32) + variance = hidden_states.pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + return self.weight * hidden_states.to(input_dtype) + + def extra_repr(self): + return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}" + + +class DogeRotaryEmbedding(nn.Module): + def __init__(self, config: DogeConfig, device=None): + super().__init__() + # BC: "rope_type" was originally "type" + if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict): + self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) + else: + self.rope_type = "default" + self.max_seq_len_cached = config.max_position_embeddings + self.original_max_seq_len = config.max_position_embeddings + + self.config = config + self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] + + inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) + self.register_buffer("inv_freq", inv_freq, persistent=False) + self.original_inv_freq = self.inv_freq + + @torch.no_grad() + @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope) + def forward(self, x, position_ids): + inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) + position_ids_expanded = position_ids[:, None, :].float() + + device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" + with torch.autocast(device_type=device_type, enabled=False): # Force float32 + freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) + emb = torch.cat((freqs, freqs), dim=-1) + cos = emb.cos() * self.attention_scaling + sin = emb.sin() * self.attention_scaling + + return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) + + +def rotate_half(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return torch.cat((-x2, x1), dim=-1) + + +def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): + """Applies Rotary Position Embedding to the query and key tensors. + + Args: + q (`torch.Tensor`): The query tensor. + k (`torch.Tensor`): The key tensor. + cos (`torch.Tensor`): The cosine part of the rotary embedding. + sin (`torch.Tensor`): The sine part of the rotary embedding. + position_ids (`torch.Tensor`, *optional*): + Deprecated and unused. + unsqueeze_dim (`int`, *optional*, defaults to 1): + The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and + sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note + that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and + k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes + cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have + the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. + Returns: + `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. + """ + cos = cos.unsqueeze(unsqueeze_dim) + sin = sin.unsqueeze(unsqueeze_dim) + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + return q_embed, k_embed + + +def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: + """ + This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, + num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) + """ + batch, num_key_value_heads, slen, head_dim = hidden_states.shape + if n_rep == 1: + return hidden_states + hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) + return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) + + +def eager_attention_forward( + module: nn.Module, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + attention_mask: Optional[torch.Tensor], + scaling: float, + dropout: float = 0.0, + **kwargs: Unpack[TransformersKwargs], +): + key_states = repeat_kv(key, module.num_key_value_groups) + value_states = repeat_kv(value, module.num_key_value_groups) + + attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling + if attention_mask is not None: + causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] + attn_weights = attn_weights + causal_mask + + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) + attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) + attn_output = torch.matmul(attn_weights, value_states) + attn_output = attn_output.transpose(1, 2).contiguous() + + return attn_output, attn_weights + + +def flex_attention_forward( + module: nn.Module, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + attention_mask: Union[torch.Tensor, "BlockMask"], + scaling: Optional[float] = None, + softcap: Optional[float] = None, + head_mask: Optional[torch.Tensor] = None, + **kwargs, +) -> tuple[torch.Tensor, torch.Tensor]: + block_mask = None + causal_mask = None + if isinstance(attention_mask, BlockMask): + block_mask = attention_mask + else: + causal_mask = attention_mask + + if causal_mask is not None: + causal_mask = causal_mask[:, :, :, : key.shape[-2]] + + def score_mod(score, batch_idx, head_idx, q_idx, kv_idx): + if softcap is not None: + score = softcap * torch.tanh(score / softcap) + if causal_mask is not None: + score = score + causal_mask[batch_idx][head_idx][q_idx][kv_idx] + if head_mask is not None: + score = score + head_mask[batch_idx][head_idx][0][0] + return score + + attn_output, attention_weights = compile_friendly_flex_attention( + query, + key, + value, + score_mod=score_mod, + block_mask=block_mask, + enable_gqa=True, + scale=scaling, + # Last time checked on PyTorch == 2.5.1: Flex Attention always computes the lse regardless. + # For simplification, we thus always return it as no additional computations are introduced. + return_lse=True, + ) + # lse is returned in float32 + attention_weights = attention_weights.to(value.dtype) + attn_output = attn_output.transpose(1, 2).contiguous() + + return attn_output, attention_weights + + +ALL_ATTENTION_FUNCTIONS = AttentionInterface() +ALL_ATTENTION_FUNCTIONS["doge_flex_attention"] = flex_attention_forward + + +class DogeAttention(nn.Module): + def __init__(self, config: DogeConfig, layer_idx: Optional[int] = None): + super().__init__() + self.config = config + self.layer_idx = layer_idx + self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) + self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads + self.scaling = self.head_dim**-0.5 + self.attention_dropout = config.attention_dropout + self.keep_window_size = config.keep_window_size + self.is_causal = True + + self.q_proj = nn.Linear( + config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias + ) + self.k_proj = nn.Linear( + config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias + ) + self.v_proj = nn.Linear( + config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias + ) + # dynamic mask for the QK^T attention weights matrix + self.A = nn.Parameter(torch.zeros(config.num_key_value_heads)) + self.dt_proj = nn.Linear( + config.num_key_value_heads * self.head_dim, config.num_key_value_heads, bias=config.attention_bias + ) + self.o_proj = nn.Linear( + config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias + ) + self.q_norm = DogeRMSNorm(self.head_dim, eps=config.rms_norm_eps) + self.k_norm = DogeRMSNorm(self.head_dim, eps=config.rms_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + position_embeddings: tuple[torch.Tensor, torch.Tensor], + attention_mask: Optional[torch.Tensor] = None, + past_key_value: Optional[Cache] = None, + cache_position: Optional[torch.LongTensor] = None, + **kwargs, + ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: + input_shape = hidden_states.shape[:-1] + hidden_shape = (*input_shape, -1, self.head_dim) + + query_states = self.q_norm(self.q_proj(hidden_states).view(hidden_shape)).transpose(1, 2) + key_states = self.k_norm(self.k_proj(hidden_states).view(hidden_shape)).transpose(1, 2) + value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) + + cos, sin = position_embeddings + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) + + if past_key_value is not None: + # sin and cos are specific to RoPE models; cache_position needed for the static cache + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + # calculate dynamic mask from value_states + dt_states = self.dt_proj( + value_states.transpose(1, 2).reshape(value_states.shape[0], value_states.shape[-2], -1) + ) + dt_states = torch.exp(self.A * F.softplus(dt_states)).transpose(-1, -2) + attn_bias, attn_mask = self.prepare_dynamic_mask( + hidden_states=hidden_states, + dt_states=dt_states, + keep_window_size=self.keep_window_size, + attention_mask=attention_mask, + ) + + attention_interface: Callable = flash_dmattn_func_auto(backend="flex") + query_states = query_states.transpose(1, 2).contiguous() # [B, H, Q_LEN, D] + key_states = key_states.transpose(1, 2).contiguous() # [B, H, KV_LEN, D] + value_states = value_states.transpose(1, 2).contiguous() # [B, H, KV_LEN, D] + + attn_output = attention_interface( + query_states, + key_states, + value_states, + attn_mask=attn_mask, + attn_bias=attn_bias, + is_causal=self.is_causal, + scale=self.scaling, + ) + + attn_output = attn_output.reshape(*input_shape, -1).contiguous() + attn_output = self.o_proj(attn_output) + return attn_output, None + + def prepare_dynamic_mask( + self, + hidden_states: torch.Tensor, + dt_states: torch.Tensor, + keep_window_size: int = 2048, + attention_mask: Optional[torch.Tensor] = None, + ): + """ + The core idea of DMA is to calculate the dynamic attention mask to mask the tokens that should be masked, so as to form sparse attention. + + Combine `dt_states` with `attention_mask` to generate the final `attn_mask`. + + Args: + hidden_states (`torch.Tensor`): The input hidden_states, used to determine the minimum value of the current input precision. + dt_states (`torch.Tensor`): dt_states of shape `(batch_size, num_heads, key_sequence_length)`. + keep_window_size (`int`): The window size of tokens that are not dynamically masked, and dynamic masking is only performed when the sequence length exceeds this value. + attention_mask (`torch.Tensor`, *optional*): attention mask of shape `(batch_size, 1, query_sequence_length, key_sequence_length)`. + """ + min_dtype = torch.finfo(hidden_states.dtype).min + dtype = hidden_states.dtype + attn_bias = dt_states[:, :, None, :].expand( + -1, -1, hidden_states.shape[1], -1 + ) # [batch_size, num_heads, query_len, key_len] + if attention_mask is not None and not isinstance(attention_mask, BlockMask): + if attention_mask.dtype == torch.bool: + attention_mask = torch.where( + attention_mask, torch.tensor(0.0, device=attention_mask.device, dtype=dtype), min_dtype + ) + attn_bias = attn_bias.masked_fill(attention_mask[:, :, :, : attn_bias.shape[-1]] != 0, min_dtype) + if attn_bias.shape[-1] > keep_window_size: + topk_indices = torch.topk( + attn_bias, keep_window_size, dim=-1, largest=True, sorted=False + ).indices + attn_mask = torch.zeros_like(attn_bias, dtype=dtype, device=attn_bias.device) + attn_mask = attn_mask.scatter(-1, topk_indices, 1.0) + attn_bias = attn_bias.masked_fill(attn_mask == 0.0, min_dtype) + else: + attn_mask = torch.ones_like(attn_bias, dtype=dtype, device=attn_bias.device) + return attn_bias, attn_mask + + +class DogeMLP(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + self.intermediate_size = config.intermediate_size + self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias) + self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias) + self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias) + self.act_fn = ACT2FN[config.hidden_act] + + def forward(self, x): + down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) + return down_proj + + +class DogeCDMoE(nn.Module): + def __init__(self, config: DogeConfig): + super().__init__() + self.hidden_size = config.hidden_size + self.intermediate_size = config.intermediate_size + self.act_fn = ACT2FN[config.hidden_act] + + self.num_experts = config.num_experts + self.num_keys = math.floor(math.sqrt(self.num_experts)) + self.top_k = config.num_experts_per_tok + self.norm_topk_prob = config.norm_topk_prob + + # shared expert + self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias) + self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias) + self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias) + + # router gate for retrieval experts + self.router_gate = nn.Linear(self.hidden_size, self.num_keys * 2, bias=False) + + # routed experts + self.down_embed = nn.Embedding(self.num_experts, self.hidden_size) + self.up_embed = nn.Embedding(self.num_experts, self.hidden_size) + + def forward( + self, + hidden_states: torch.Tensor, + **kwargs, + ) -> torch.Tensor: + bsz, seq_len, _ = hidden_states.shape + + # get routing logits with router gate + router_logits = self.router_gate(hidden_states).view(2, bsz * seq_len, -1) + + # get experts with the highest routing logits + (scores_x, scores_y), (indices_x, indices_y) = router_logits.topk(self.num_keys, dim=-1) + all_scores = scores_x.unsqueeze(-1) + scores_y.unsqueeze(-2) + all_indices = indices_x.unsqueeze(-1) * self.num_keys + indices_y.unsqueeze(-2) + all_scores = all_scores.view(*all_scores.shape[:-2], -1) + all_indices = all_indices.view(*all_indices.shape[:-2], -1) + scores, position_indices = all_scores.topk(self.top_k, dim=-1) + indices = all_indices.gather(-1, position_indices) + routing_weights = F.softmax(scores, dim=-1) + if self.norm_topk_prob: + routing_weights /= routing_weights.sum(dim=-1, keepdim=True) + + # mix routed experts states with shared expert states + down_embed = self.down_embed(indices) + up_embed = self.up_embed(indices) + experts_weights = torch.matmul(down_embed, hidden_states.view(bsz * seq_len, -1, 1)).view(bsz * seq_len, -1) + experts_weights = self.act_fn(experts_weights) * routing_weights + experts_states = torch.matmul(experts_weights.view(bsz * seq_len, 1, -1), up_embed).view(bsz, seq_len, -1) + hidden_states = self.down_proj(self.act_fn(self.gate_proj(hidden_states)) * self.up_proj(hidden_states)) + hidden_states = hidden_states + experts_states + return hidden_states, router_logits + + +class DogeDecoderLayer(GradientCheckpointingLayer): + def __init__(self, config: DogeConfig, layer_idx: Optional[int] = None): + super().__init__() + self.hidden_dropout = config.hidden_dropout + + self.input_layernorm = DogeRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + self.self_attn = DogeAttention(config=config, layer_idx=layer_idx) + self.input_residual = nn.Parameter(torch.ones(config.hidden_size)) + + self.post_attention_layernorm = DogeRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + self.mlp = DogeMLP(config) if not config.is_moe else DogeCDMoE(config) + self.post_attention_residual = nn.Parameter(torch.ones(config.hidden_size)) + + def forward( + self, + hidden_states: torch.Tensor, + position_embeddings: tuple[torch.Tensor, torch.Tensor], + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[tuple[torch.Tensor]] = None, + use_cache: Optional[bool] = False, + cache_position: Optional[torch.LongTensor] = None, + **kwargs: Unpack[TransformersKwargs], + ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: + # sequence transformation + residual = hidden_states + hidden_states = self.input_layernorm(hidden_states) + hidden_states, self_attn_weights = self.self_attn( + hidden_states=hidden_states, + position_embeddings=position_embeddings, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + use_cache=use_cache, + cache_position=cache_position, + **kwargs, + ) + hidden_states = F.dropout(hidden_states, p=self.hidden_dropout, training=self.training) + hidden_states = self.input_residual * residual + hidden_states + + # state transformation + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + if isinstance(hidden_states, tuple): + hidden_states, _ = hidden_states + hidden_states = F.dropout(hidden_states, p=self.hidden_dropout, training=self.training) + hidden_states = self.post_attention_residual * residual + hidden_states + + return hidden_states + + +@auto_docstring +class DogePreTrainedModel(PreTrainedModel): + config: DogeConfig + base_model_prefix = "model" + supports_gradient_checkpointing = True + _no_split_modules = ["DogeDecoderLayer"] + _skip_keys_device_placement = ["past_key_values"] + _supports_flash_attn = False + _supports_sdpa = True + _supports_flex_attn = True + _can_compile_fullgraph = False + _supports_attention_backend = True + _can_record_outputs = { + "router_logits": OutputRecorder(DogeCDMoE, index=1), + "hidden_states": DogeDecoderLayer, + "attentions": DogeAttention, + } + + def _init_weights(self, module): + """Initialize the weights""" + super()._init_weights(module) + if isinstance(module, DogeAttention): + if hasattr(module, "A"): + module.A.data.zero_() + elif isinstance(module, DogeCDMoE): + if hasattr(module, "router_gate"): + module.router_gate.weight.data.zero_() + elif isinstance(module, DogeDecoderLayer): + if hasattr(module, "input_residual"): + module.input_residual.data.fill_(1.0) + if hasattr(module, "post_attention_residual"): + module.post_attention_residual.data.fill_(1.0) + + +@auto_docstring +class DogeModel(DogePreTrainedModel): + def __init__(self, config: DogeConfig): + super().__init__(config) + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + + self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) + self.layers = nn.ModuleList( + [DogeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] + ) + self.norm = DogeRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + self.rotary_emb = DogeRotaryEmbedding(config=config) + self.gradient_checkpointing = False + + # Initialize weights and apply final processing + self.post_init() + + @check_model_inputs + @auto_docstring + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Cache] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + **kwargs: Unpack[TransformersKwargs], + ) -> MoeModelOutputWithPast: + if (input_ids is None) ^ (inputs_embeds is not None): + raise ValueError("You must specify exactly one of input_ids or inputs_embeds") + + if use_cache and past_key_values is None: + past_key_values = DynamicCache() + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + + if cache_position is None: + past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 + cache_position = torch.arange( + past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device + ) + if position_ids is None: + position_ids = cache_position.unsqueeze(0) + + mask_function = create_causal_mask if self.config.sliding_window is None else create_sliding_window_causal_mask + causal_mask = mask_function( + config=self.config, + input_embeds=inputs_embeds, + attention_mask=attention_mask, + cache_position=cache_position, + past_key_values=past_key_values, + position_ids=position_ids, + ) + + hidden_states = inputs_embeds + + # create position embeddings to be shared across the decoder layers + position_embeddings = self.rotary_emb(hidden_states, position_ids) + + for decoder_layer in self.layers[: self.config.num_hidden_layers]: + hidden_states = decoder_layer( + hidden_states, + position_embeddings=position_embeddings, + attention_mask=causal_mask, + position_ids=position_ids, + past_key_value=past_key_values, + use_cache=use_cache, + cache_position=cache_position, + **kwargs, + ) + + hidden_states = self.norm(hidden_states) + + return MoeModelOutputWithPast( # only diff with Mistral is the output type, we need MoE + last_hidden_state=hidden_states, + past_key_values=past_key_values, + ) + + +def load_balancing_loss_func( + gate_logits: Union[torch.Tensor, tuple[torch.Tensor], None], + num_experts: Optional[int] = None, + num_keys: Optional[int] = None, + top_k: int = 2, + attention_mask: Optional[torch.Tensor] = None, +) -> Union[torch.Tensor, int]: + r""" + Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch. + + See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss + function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between + experts is too unbalanced. + + Args: + gate_logits: + Logits from the `router_gate`, should be a tuple of model.config.num_hidden_layers tensors of + shape [2, batch_size * sequence_length, num_keys]. + num_experts: + Number of experts + num_keys: + Number of keys + top_k: + The number of experts to route per-token, can be also interpreted as the `top-k` routing + parameter. + attention_mask (`torch.Tensor`, *optional*): + The attention_mask used in forward function + shape [batch_size X sequence_length] if not None. + + Returns: + The auxiliary loss. + """ + if gate_logits is None or not isinstance(gate_logits, tuple): + return 0 + + compute_dtype = gate_logits[0].dtype + compute_device = gate_logits[0].device + all_expert_indices = [] + all_routing_weights = [] + + for layer_gate_logits in gate_logits: + layer_gate_logits = layer_gate_logits.to(compute_device) + + (scores_x, scores_y), (indices_x, indices_y) = layer_gate_logits.topk(num_keys, dim=-1) + + all_scores = scores_x.unsqueeze(-1) + scores_y.unsqueeze(-2) + all_indices = indices_x.unsqueeze(-1) * num_keys + indices_y.unsqueeze(-2) + all_scores = all_scores.view(*all_scores.shape[:-2], -1) + all_indices = all_indices.view(*all_indices.shape[:-2], -1) + + _, position_indices = all_scores.topk(top_k, dim=-1) + expert_indices = all_indices.gather(-1, position_indices) + + routing_weights = F.softmax(all_scores, dim=-1) + + all_expert_indices.append(expert_indices) + all_routing_weights.append(routing_weights) + all_expert_indices = torch.cat(all_expert_indices, dim=0) + all_routing_weights = torch.cat(all_routing_weights, dim=0) + + if attention_mask is None: + # Compute the percentage of tokens routed to each experts + all_expert_indices = all_expert_indices.view(-1) + tokens_per_expert = torch.zeros(num_experts, dtype=compute_dtype, device=compute_device) + pad = torch.ones_like(all_expert_indices, dtype=compute_dtype, device=compute_device) + tokens_per_expert = tokens_per_expert.scatter_add_(0, all_expert_indices, pad) / all_expert_indices.shape[0] + + # Compute the average probability of routing to these experts + router_prob_per_expert = torch.mean(all_routing_weights, dim=0) + else: + batch_size, sequence_length = attention_mask.shape + num_hidden_layers = len(gate_logits) + + # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask + expert_attention_mask = ( + attention_mask[None, :, :, None] + .expand((num_hidden_layers, batch_size, sequence_length, top_k)) + .reshape(-1) + .to(compute_device) + ) + all_expert_indices = all_expert_indices.view(-1)[expert_attention_mask.bool()] + + # Compute the percentage of tokens routed to each experts + tokens_per_expert = torch.zeros(num_experts, dtype=compute_dtype, device=compute_device) + pad = torch.ones_like(all_expert_indices, dtype=compute_dtype, device=compute_device) + tokens_per_expert = tokens_per_expert.scatter_add_(0, all_expert_indices, pad) / torch.sum( + expert_attention_mask + ) + + # Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert + router_per_expert_attention_mask = ( + attention_mask[None, :, :, None] + .expand((num_hidden_layers, batch_size, sequence_length, num_experts)) + .reshape(-1, num_experts) + .to(compute_device) + ) + + # Compute the average probability of routing to these experts + router_prob_per_expert = torch.sum(all_routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum( + router_per_expert_attention_mask, dim=0 + ) + + overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert) + return overall_loss * num_experts + + +@auto_docstring +class DogeForCausalLM(DogePreTrainedModel, GenerationMixin): + _tied_weights_keys = ["lm_head.weight"] + _tp_plan = {"lm_head": "colwise_rep"} + _pp_plan = {"lm_head": (["hidden_states"], ["logits"])} + + def __init__(self, config): + super().__init__(config) + self.model = DogeModel(config) + self.vocab_size = config.vocab_size + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + self.router_aux_loss_coef = config.router_aux_loss_coef + self.num_experts = config.num_experts + self.num_experts_per_tok = config.num_experts_per_tok + + # Initialize weights and apply final processing + self.post_init() + + def set_decoder(self, decoder): + self.model = decoder + + def get_decoder(self): + return self.model + + @can_return_tuple + @auto_docstring + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[list[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + logits_to_keep: Union[int, torch.Tensor] = 0, + output_router_logits: Optional[bool] = None, + **kwargs: Unpack[TransformersKwargs], + ) -> MoeCausalLMOutputWithPast: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Example: + + ```python + >>> from transformers import AutoTokenizer, DogeForCausalLM + + >>> model = DogeForCausalLM.from_pretrained("SmallDoge/Doge-320M") + >>> tokenizer = AutoTokenizer.from_pretrained("SmallDoge/Doge-320M") + + >>> prompt = "Hey, are you conscious? Can you talk to me?" + >>> inputs = tokenizer(prompt, return_tensors="pt") + + >>> # Generate + >>> generate_ids = model.generate(inputs.input_ids, max_length=30) + >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." + ```""" + output_router_logits = ( + output_router_logits if output_router_logits is not None else self.config.output_router_logits + ) + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs: MoeModelOutputWithPast = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + cache_position=cache_position, + **kwargs, + ) + + hidden_states = outputs.last_hidden_state + # Only compute necessary logits, and do not upcast them to float if we are not computing the loss + slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep + logits = self.lm_head(hidden_states[:, slice_indices, :]) + + loss = None + if labels is not None: + loss = self.loss_function(logits, labels, self.vocab_size, **kwargs) + + aux_loss = None + if output_router_logits: + aux_loss = load_balancing_loss_func( + outputs.router_logits, + self.num_experts, + math.floor(math.sqrt(self.num_experts)), + self.num_experts_per_tok, + attention_mask, + ) + if labels is not None: + loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device + + return MoeCausalLMOutputWithPast( + loss=loss, + aux_loss=aux_loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + router_logits=outputs.router_logits, + ) + + +class DogeForSequenceClassification(GenericForSequenceClassification, DogePreTrainedModel): + pass + + +__all__ = ["DogeForCausalLM", "DogeModel", "DogePreTrainedModel", "DogeForSequenceClassification"]