From 5ec3789b82a2289dda73284e2bac25a0aec2092c Mon Sep 17 00:00:00 2001 From: raushan Date: Thu, 21 Aug 2025 12:03:08 +0200 Subject: [PATCH 01/19] allow `check_model_inputs` in core VLMs --- .../models/aya_vision/modeling_aya_vision.py | 9 -- .../models/got_ocr2/modeling_got_ocr2.py | 5 + .../models/internvl/modeling_internvl.py | 131 ++++++------------ .../models/internvl/modular_internvl.py | 113 +++++---------- .../models/llava/modeling_llava.py | 31 +---- .../models/mistral3/modeling_mistral3.py | 5 + .../perception_lm/modeling_perception_lm.py | 5 + .../models/vipllava/modeling_vipllava.py | 5 + src/transformers/utils/generic.py | 20 ++- 9 files changed, 113 insertions(+), 211 deletions(-) diff --git a/src/transformers/models/aya_vision/modeling_aya_vision.py b/src/transformers/models/aya_vision/modeling_aya_vision.py index 2e489c6f3a03..06942a282ab3 100644 --- a/src/transformers/models/aya_vision/modeling_aya_vision.py +++ b/src/transformers/models/aya_vision/modeling_aya_vision.py @@ -448,11 +448,6 @@ def forward( >>> gen_tokens = model.generate(**inputs, max_new_tokens=300, do_sample=True, temperature=0.3) >>> processor.tokenizer.decode(gen_tokens[0][inputs.input_ids.shape[1]:], skip_special_tokens=True) ```""" - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_feature_layer = ( vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer ) @@ -471,10 +466,6 @@ def forward( inputs_embeds=inputs_embeds, vision_feature_layer=vision_feature_layer, vision_feature_select_strategy=vision_feature_select_strategy, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=True, cache_position=cache_position, image_sizes=image_sizes, **kwargs, diff --git a/src/transformers/models/got_ocr2/modeling_got_ocr2.py b/src/transformers/models/got_ocr2/modeling_got_ocr2.py index 394d501cf890..ee4ceeb897a6 100644 --- a/src/transformers/models/got_ocr2/modeling_got_ocr2.py +++ b/src/transformers/models/got_ocr2/modeling_got_ocr2.py @@ -287,6 +287,11 @@ class GotOcr2PreTrainedModel(PreTrainedModel): _supports_flex_attn = False _supports_attention_backend = True + _can_record_outputs = { + "hidden_states": "DecoderLayer", + "attentions": "Attention", + } + def _init_weights(self, module): super()._init_weights(module) if isinstance(module, GotOcr2VisionAttention): diff --git a/src/transformers/models/internvl/modeling_internvl.py b/src/transformers/models/internvl/modeling_internvl.py index 8e1d2709bc92..a95f980c4f2c 100644 --- a/src/transformers/models/internvl/modeling_internvl.py +++ b/src/transformers/models/internvl/modeling_internvl.py @@ -37,6 +37,7 @@ from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, torch_int +from ...utils.generic import check_model_inputs from ..auto import AutoModel from .configuration_internvl import InternVLConfig, InternVLVisionConfig @@ -160,34 +161,7 @@ def forward( output = self.projection_layer(attn_output) output = self.projection_dropout(output) - outputs = (output, attn_weights) if output_attentions else (output, None) - return outputs - - -@auto_docstring -class InternVLVisionPreTrainedModel(PreTrainedModel): - config: InternVLVisionConfig - base_model_prefix = "internvl_vision" - main_input_name = "pixel_values" - supports_gradient_checkpointing = True - _no_split_modules = ["InternVLVisionLayer"] - _supports_sdpa = True - _supports_flash_attn = True - _supports_flex_attn = True - _supports_attention_backend = True - - def _init_weights(self, module): - """Initialize the weights""" - super()._init_weights(module) - if isinstance(module, InternVLVisionEmbeddings): - module.cls_token.data.zero_() - if module.mask_token is not None: - module.mask_token.data.zero_() - if module.position_embeddings is not None: - module.position_embeddings.data.zero_() - elif isinstance(module, InternVLVisionLayer): - module.lambda_1.data.fill_(self.config.layer_scale_init_value) - module.lambda_2.data.fill_(self.config.layer_scale_init_value) + return output, attn_weights @dataclass @@ -376,11 +350,9 @@ def __init__(self, config: InternVLVisionConfig) -> None: def forward( self, hidden_states: torch.Tensor, - output_attentions: bool = False, ) -> Union[tuple[torch.Tensor], tuple[torch.Tensor, torch.Tensor]]: - attention_output, attention_weights = self.attention( + attention_output, _ = self.attention( self.layernorm_before(hidden_states), # in InternVLVision, layernorm is applied before self-attention - output_attentions=output_attentions, ) attention_output = self.lambda_1 * attention_output @@ -400,7 +372,7 @@ def forward( # second residual connection layer_output = layer_output + hidden_states - return layer_output, attention_weights + return layer_output class InternVLVisionEncoder(nn.Module): @@ -410,35 +382,48 @@ def __init__(self, config: InternVLVisionConfig) -> None: self.layer = nn.ModuleList([InternVLVisionLayer(config) for i in range(config.num_hidden_layers)]) self.gradient_checkpointing = False - @can_return_tuple + @check_model_inputs def forward( self, hidden_states: torch.Tensor, - output_attentions: bool = False, - output_hidden_states: bool = False, ) -> Union[tuple, BaseModelOutput]: - all_hidden_states = () if output_hidden_states else None - all_self_attentions = () if output_attentions else None - for i, layer_module in enumerate(self.layer): - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) + hidden_states = layer_module(hidden_states) - layer_outputs = layer_module(hidden_states, output_attentions) + return BaseModelOutput( + last_hidden_state=hidden_states, + ) - hidden_states = layer_outputs[0] - if output_attentions: - all_self_attentions = all_self_attentions + (layer_outputs[1],) +@auto_docstring +class InternVLVisionPreTrainedModel(PreTrainedModel): + config: InternVLVisionConfig + base_model_prefix = "internvl_vision" + main_input_name = "pixel_values" + supports_gradient_checkpointing = True + _no_split_modules = ["InternVLVisionLayer"] + _supports_sdpa = True + _supports_flash_attn = True + _supports_flex_attn = True + _supports_attention_backend = True - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) + _can_record_outputs = { + "hidden_states": InternVLVisionLayer, + "attentions": InternVLVisionAttention, + } - return BaseModelOutput( - last_hidden_state=hidden_states, - hidden_states=all_hidden_states, - attentions=all_self_attentions, - ) + def _init_weights(self, module): + """Initialize the weights""" + super()._init_weights(module) + if isinstance(module, InternVLVisionEmbeddings): + module.cls_token.data.zero_() + if module.mask_token is not None: + module.mask_token.data.zero_() + if module.position_embeddings is not None: + module.position_embeddings.data.zero_() + elif isinstance(module, InternVLVisionLayer): + module.lambda_1.data.fill_(self.config.layer_scale_init_value) + module.lambda_2.data.fill_(self.config.layer_scale_init_value) @auto_docstring @@ -466,25 +451,14 @@ def forward( self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, ) -> Union[tuple, InternVLVisionModelOutputWithPooling]: r""" bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). """ - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - embedding_output, _ = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos) - encoder_outputs = self.encoder( - embedding_output, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - ) + encoder_outputs = self.encoder(embedding_output) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) @@ -509,6 +483,11 @@ class InternVLPreTrainedModel(PreTrainedModel): _supports_flex_attn = True _supports_attention_backend = True + _can_record_outputs = { + "hidden_states": "DecoderLayer", + "attentions": "Attention", + } + class InternVLMultiModalProjector(nn.Module): def __init__(self, config: InternVLConfig): @@ -668,18 +647,9 @@ def forward( inputs_embeds: Optional[torch.FloatTensor] = None, vision_feature_layer: Optional[Union[int, list[int]]] = None, vision_feature_select_strategy: Optional[str] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Union[tuple, InternVLModelOutputWithPast]: - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_feature_layer = ( vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer ) @@ -712,10 +682,6 @@ def forward( position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=True, cache_position=cache_position, **kwargs, ) @@ -870,10 +836,6 @@ def forward( vision_feature_layer: Optional[Union[int, list[int]]] = None, vision_feature_select_strategy: Optional[str] = None, labels: Optional[torch.LongTensor] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, image_sizes: Optional[torch.Tensor] = None, @@ -914,11 +876,6 @@ def forward( >>> print(processor.decode(generate_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True)) The images depict the Statue of Liberty and the Golden Gate Bridge. ```""" - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_feature_layer = ( vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer ) @@ -937,10 +894,6 @@ def forward( inputs_embeds=inputs_embeds, vision_feature_layer=vision_feature_layer, vision_feature_select_strategy=vision_feature_select_strategy, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=True, cache_position=cache_position, image_sizes=image_sizes, **kwargs, diff --git a/src/transformers/models/internvl/modular_internvl.py b/src/transformers/models/internvl/modular_internvl.py index 26be41d629be..086e6d4ac526 100644 --- a/src/transformers/models/internvl/modular_internvl.py +++ b/src/transformers/models/internvl/modular_internvl.py @@ -30,6 +30,7 @@ from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import auto_docstring, can_return_tuple, logging, torch_int +from ...utils.generic import check_model_inputs from ..clip.modeling_clip import CLIPMLP from ..janus.modeling_janus import JanusVisionAttention from ..llama.modeling_llama import LlamaRMSNorm @@ -129,34 +130,7 @@ def forward( output = self.projection_layer(attn_output) output = self.projection_dropout(output) - outputs = (output, attn_weights) if output_attentions else (output, None) - return outputs - - -@auto_docstring -class InternVLVisionPreTrainedModel(PreTrainedModel): - config: InternVLVisionConfig - base_model_prefix = "internvl_vision" - main_input_name = "pixel_values" - supports_gradient_checkpointing = True - _no_split_modules = ["InternVLVisionLayer"] - _supports_sdpa = True - _supports_flash_attn = True - _supports_flex_attn = True - _supports_attention_backend = True - - def _init_weights(self, module): - """Initialize the weights""" - super()._init_weights(module) - if isinstance(module, InternVLVisionEmbeddings): - module.cls_token.data.zero_() - if module.mask_token is not None: - module.mask_token.data.zero_() - if module.position_embeddings is not None: - module.position_embeddings.data.zero_() - elif isinstance(module, InternVLVisionLayer): - module.lambda_1.data.fill_(self.config.layer_scale_init_value) - module.lambda_2.data.fill_(self.config.layer_scale_init_value) + return output, attn_weights @dataclass @@ -334,11 +308,9 @@ def __init__(self, config: InternVLVisionConfig) -> None: def forward( self, hidden_states: torch.Tensor, - output_attentions: bool = False, ) -> Union[tuple[torch.Tensor], tuple[torch.Tensor, torch.Tensor]]: - attention_output, attention_weights = self.attention( + attention_output, _ = self.attention( self.layernorm_before(hidden_states), # in InternVLVision, layernorm is applied before self-attention - output_attentions=output_attentions, ) attention_output = self.lambda_1 * attention_output @@ -358,7 +330,7 @@ def forward( # second residual connection layer_output = layer_output + hidden_states - return layer_output, attention_weights + return layer_output class InternVLVisionEncoder(nn.Module): @@ -368,35 +340,48 @@ def __init__(self, config: InternVLVisionConfig) -> None: self.layer = nn.ModuleList([InternVLVisionLayer(config) for i in range(config.num_hidden_layers)]) self.gradient_checkpointing = False - @can_return_tuple + @check_model_inputs def forward( self, hidden_states: torch.Tensor, - output_attentions: bool = False, - output_hidden_states: bool = False, ) -> Union[tuple, BaseModelOutput]: - all_hidden_states = () if output_hidden_states else None - all_self_attentions = () if output_attentions else None - for i, layer_module in enumerate(self.layer): - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) + hidden_states = layer_module(hidden_states) - layer_outputs = layer_module(hidden_states, output_attentions) + return BaseModelOutput( + last_hidden_state=hidden_states, + ) - hidden_states = layer_outputs[0] - if output_attentions: - all_self_attentions = all_self_attentions + (layer_outputs[1],) +@auto_docstring +class InternVLVisionPreTrainedModel(PreTrainedModel): + config: InternVLVisionConfig + base_model_prefix = "internvl_vision" + main_input_name = "pixel_values" + supports_gradient_checkpointing = True + _no_split_modules = ["InternVLVisionLayer"] + _supports_sdpa = True + _supports_flash_attn = True + _supports_flex_attn = True + _supports_attention_backend = True - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) + _can_record_outputs = { + "hidden_states": InternVLVisionLayer, + "attentions": InternVLVisionAttention, + } - return BaseModelOutput( - last_hidden_state=hidden_states, - hidden_states=all_hidden_states, - attentions=all_self_attentions, - ) + def _init_weights(self, module): + """Initialize the weights""" + super()._init_weights(module) + if isinstance(module, InternVLVisionEmbeddings): + module.cls_token.data.zero_() + if module.mask_token is not None: + module.mask_token.data.zero_() + if module.position_embeddings is not None: + module.position_embeddings.data.zero_() + elif isinstance(module, InternVLVisionLayer): + module.lambda_1.data.fill_(self.config.layer_scale_init_value) + module.lambda_2.data.fill_(self.config.layer_scale_init_value) @auto_docstring @@ -424,25 +409,14 @@ def forward( self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, ) -> Union[tuple, InternVLVisionModelOutputWithPooling]: r""" bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). """ - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - embedding_output, _ = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos) - encoder_outputs = self.encoder( - embedding_output, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - ) + encoder_outputs = self.encoder(embedding_output) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) @@ -584,18 +558,9 @@ def forward( inputs_embeds: Optional[torch.FloatTensor] = None, vision_feature_layer: Optional[Union[int, list[int]]] = None, vision_feature_select_strategy: Optional[str] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Union[tuple, InternVLModelOutputWithPast]: - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_feature_layer = ( vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer ) @@ -628,10 +593,6 @@ def forward( position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=True, cache_position=cache_position, **kwargs, ) diff --git a/src/transformers/models/llava/modeling_llava.py b/src/transformers/models/llava/modeling_llava.py index f81555279410..aa81def3c963 100644 --- a/src/transformers/models/llava/modeling_llava.py +++ b/src/transformers/models/llava/modeling_llava.py @@ -125,6 +125,11 @@ class LlavaPreTrainedModel(PreTrainedModel): _supports_flex_attn = True _supports_attention_backend = True + _can_record_outputs = { + "hidden_states": "DecoderLayer", + "attentions": "Attention", + } + @auto_docstring( custom_intro=""" @@ -254,19 +259,10 @@ def forward( inputs_embeds: Optional[torch.FloatTensor] = None, vision_feature_layer: Optional[Union[int, list[int]]] = None, vision_feature_select_strategy: Optional[str] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, image_sizes: torch.Tensor = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Union[tuple, LlavaModelOutputWithPast]: - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_feature_layer = ( vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer ) @@ -300,10 +296,6 @@ def forward( position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=True, cache_position=cache_position, **kwargs, ) @@ -392,10 +384,6 @@ def forward( vision_feature_layer: Optional[Union[int, list[int]]] = None, vision_feature_select_strategy: Optional[str] = None, labels: Optional[torch.LongTensor] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, image_sizes: Optional[torch.Tensor] = None, @@ -428,11 +416,6 @@ def forward( >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "USER: \nWhat's the content of the image? ASSISTANT: The image features a busy city street with a stop sign prominently displayed" ```""" - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_feature_layer = ( vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer ) @@ -451,10 +434,6 @@ def forward( inputs_embeds=inputs_embeds, vision_feature_layer=vision_feature_layer, vision_feature_select_strategy=vision_feature_select_strategy, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=True, cache_position=cache_position, image_sizes=image_sizes, **kwargs, diff --git a/src/transformers/models/mistral3/modeling_mistral3.py b/src/transformers/models/mistral3/modeling_mistral3.py index afa6bf44734c..bc95439cf25f 100644 --- a/src/transformers/models/mistral3/modeling_mistral3.py +++ b/src/transformers/models/mistral3/modeling_mistral3.py @@ -190,6 +190,11 @@ class Mistral3PreTrainedModel(PreTrainedModel): _supports_flex_attn = True _supports_attention_backend = True + _can_record_outputs = { + "hidden_states": "DecoderLayer", + "attentions": "Attention", + } + @auto_docstring( custom_intro=""" diff --git a/src/transformers/models/perception_lm/modeling_perception_lm.py b/src/transformers/models/perception_lm/modeling_perception_lm.py index 4210cd73e545..b3f40d778ae3 100644 --- a/src/transformers/models/perception_lm/modeling_perception_lm.py +++ b/src/transformers/models/perception_lm/modeling_perception_lm.py @@ -99,6 +99,11 @@ class PerceptionLMPreTrainedModel(PreTrainedModel): _supports_flex_attn = True _supports_attention_backend = True + _can_record_outputs = { + "hidden_states": "DecoderLayer", + "attentions": "Attention", + } + @dataclass @auto_docstring( diff --git a/src/transformers/models/vipllava/modeling_vipllava.py b/src/transformers/models/vipllava/modeling_vipllava.py index 02c27d8be578..870c00d13de4 100644 --- a/src/transformers/models/vipllava/modeling_vipllava.py +++ b/src/transformers/models/vipllava/modeling_vipllava.py @@ -126,6 +126,11 @@ class VipLlavaPreTrainedModel(PreTrainedModel): _supports_flex_attn = True _supports_attention_backend = True + _can_record_outputs = { + "hidden_states": "DecoderLayer", + "attentions": "Attention", + } + @auto_docstring( custom_intro=""" diff --git a/src/transformers/utils/generic.py b/src/transformers/utils/generic.py index 74197a3e7ac6..a2952da9d590 100644 --- a/src/transformers/utils/generic.py +++ b/src/transformers/utils/generic.py @@ -992,22 +992,20 @@ def check_model_inputs(func): @wraps(func) def wrapper(self, *args, **kwargs): - use_cache = kwargs.get("use_cache") - if use_cache is None: - use_cache = getattr(self.config, "use_cache", False) + use_cache = kwargs["use_cache"] if "use_cache" in kwargs else getattr(self.config, "use_cache", None) + if use_cache is not None: + if getattr(self, "gradient_checkpointing", False) and self.training and use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." + ) + use_cache = False + + kwargs["use_cache"] = use_cache return_dict = kwargs.pop("return_dict", None) if return_dict is None: return_dict = getattr(self.config, "return_dict", True) - if getattr(self, "gradient_checkpointing", False) and self.training and use_cache: - logger.warning_once( - "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." - ) - use_cache = False - - kwargs["use_cache"] = use_cache - all_args = kwargs.copy() if "kwargs" in all_args: for k, v in all_args["kwargs"].items(): From 6db7a441e29e5ac031d109fdda10b2ca59228312 Mon Sep 17 00:00:00 2001 From: raushan Date: Fri, 22 Aug 2025 12:50:41 +0200 Subject: [PATCH 02/19] address comments --- src/transformers/models/aya_vision/modeling_aya_vision.py | 3 +-- src/transformers/models/aya_vision/modular_aya_vision.py | 2 +- src/transformers/models/got_ocr2/modeling_got_ocr2.py | 3 +-- src/transformers/models/got_ocr2/modular_got_ocr2.py | 2 +- src/transformers/models/internvl/modeling_internvl.py | 8 +++----- src/transformers/models/internvl/modular_internvl.py | 7 +++---- src/transformers/models/llava/modeling_llava.py | 3 +-- src/transformers/models/mistral3/modeling_mistral3.py | 3 +-- src/transformers/models/mistral3/modular_mistral3.py | 2 +- 9 files changed, 13 insertions(+), 20 deletions(-) diff --git a/src/transformers/models/aya_vision/modeling_aya_vision.py b/src/transformers/models/aya_vision/modeling_aya_vision.py index 06942a282ab3..dd295c115a66 100644 --- a/src/transformers/models/aya_vision/modeling_aya_vision.py +++ b/src/transformers/models/aya_vision/modeling_aya_vision.py @@ -28,7 +28,6 @@ from ...activations import ACT2FN from ...cache_utils import Cache from ...generation import GenerationMixin -from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_outputs import BaseModelOutputWithPast, ModelOutput from ...modeling_utils import PreTrainedModel from ...processing_utils import Unpack @@ -280,7 +279,7 @@ def forward( vision_feature_select_strategy: Optional[str] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, - **kwargs: Unpack[FlashAttentionKwargs], + **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, AyaVisionModelOutputWithPast]: vision_feature_layer = ( vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer diff --git a/src/transformers/models/aya_vision/modular_aya_vision.py b/src/transformers/models/aya_vision/modular_aya_vision.py index 4d18b5806c1a..3b37bf77d920 100644 --- a/src/transformers/models/aya_vision/modular_aya_vision.py +++ b/src/transformers/models/aya_vision/modular_aya_vision.py @@ -177,7 +177,7 @@ def forward( vision_feature_select_strategy: Optional[str] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, - **kwargs: Unpack[FlashAttentionKwargs], + **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, AyaVisionModelOutputWithPast]: vision_feature_layer = ( vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer diff --git a/src/transformers/models/got_ocr2/modeling_got_ocr2.py b/src/transformers/models/got_ocr2/modeling_got_ocr2.py index ee4ceeb897a6..f1dc6ca23c46 100644 --- a/src/transformers/models/got_ocr2/modeling_got_ocr2.py +++ b/src/transformers/models/got_ocr2/modeling_got_ocr2.py @@ -33,7 +33,6 @@ from ...activations import ACT2FN from ...cache_utils import Cache from ...generation import GenerationMixin -from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutputWithPast, ModelOutput from ...modeling_utils import PreTrainedModel @@ -617,7 +616,7 @@ def forward( output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, - **kwargs: Unpack[FlashAttentionKwargs], + **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, GotOcr2ModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( diff --git a/src/transformers/models/got_ocr2/modular_got_ocr2.py b/src/transformers/models/got_ocr2/modular_got_ocr2.py index f1ec914bf4cd..eb9965516805 100644 --- a/src/transformers/models/got_ocr2/modular_got_ocr2.py +++ b/src/transformers/models/got_ocr2/modular_got_ocr2.py @@ -334,7 +334,7 @@ def forward( output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, - **kwargs: Unpack[FlashAttentionKwargs], + **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, GotOcr2ModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( diff --git a/src/transformers/models/internvl/modeling_internvl.py b/src/transformers/models/internvl/modeling_internvl.py index a95f980c4f2c..dcbbcae8226d 100644 --- a/src/transformers/models/internvl/modeling_internvl.py +++ b/src/transformers/models/internvl/modeling_internvl.py @@ -31,7 +31,6 @@ from ...cache_utils import Cache from ...generation import GenerationMixin from ...integrations import use_kernel_forward_from_hub -from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, BaseModelOutputWithPooling from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel @@ -125,8 +124,7 @@ def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[torch.Tensor] = None, - **kwargs: Unpack[FlashAttentionKwargs], + **kwargs: Unpack[TransformersKwargs], ): batch_size, seq_len, _ = hidden_states.size() @@ -387,7 +385,7 @@ def forward( self, hidden_states: torch.Tensor, ) -> Union[tuple, BaseModelOutput]: - for i, layer_module in enumerate(self.layer): + for layer_module in self.layer: hidden_states = layer_module(hidden_states) return BaseModelOutput( @@ -648,7 +646,7 @@ def forward( vision_feature_layer: Optional[Union[int, list[int]]] = None, vision_feature_select_strategy: Optional[str] = None, cache_position: Optional[torch.LongTensor] = None, - **kwargs: Unpack[FlashAttentionKwargs], + **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, InternVLModelOutputWithPast]: vision_feature_layer = ( vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer diff --git a/src/transformers/models/internvl/modular_internvl.py b/src/transformers/models/internvl/modular_internvl.py index 086e6d4ac526..e9bb20ac4434 100644 --- a/src/transformers/models/internvl/modular_internvl.py +++ b/src/transformers/models/internvl/modular_internvl.py @@ -94,8 +94,7 @@ def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[torch.Tensor] = None, - **kwargs: Unpack[FlashAttentionKwargs], + **kwargs: Unpack[TransformersKwargs], ): batch_size, seq_len, _ = hidden_states.size() @@ -345,7 +344,7 @@ def forward( self, hidden_states: torch.Tensor, ) -> Union[tuple, BaseModelOutput]: - for i, layer_module in enumerate(self.layer): + for layer_module in self.layer: hidden_states = layer_module(hidden_states) return BaseModelOutput( @@ -559,7 +558,7 @@ def forward( vision_feature_layer: Optional[Union[int, list[int]]] = None, vision_feature_select_strategy: Optional[str] = None, cache_position: Optional[torch.LongTensor] = None, - **kwargs: Unpack[FlashAttentionKwargs], + **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, InternVLModelOutputWithPast]: vision_feature_layer = ( vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer diff --git a/src/transformers/models/llava/modeling_llava.py b/src/transformers/models/llava/modeling_llava.py index aa81def3c963..a95fce412c1a 100644 --- a/src/transformers/models/llava/modeling_llava.py +++ b/src/transformers/models/llava/modeling_llava.py @@ -24,7 +24,6 @@ from ...activations import ACT2FN from ...cache_utils import Cache from ...generation import GenerationMixin -from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_outputs import BaseModelOutputWithPast, ModelOutput from ...modeling_utils import PreTrainedModel from ...processing_utils import Unpack @@ -261,7 +260,7 @@ def forward( vision_feature_select_strategy: Optional[str] = None, cache_position: Optional[torch.LongTensor] = None, image_sizes: torch.Tensor = None, - **kwargs: Unpack[FlashAttentionKwargs], + **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, LlavaModelOutputWithPast]: vision_feature_layer = ( vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer diff --git a/src/transformers/models/mistral3/modeling_mistral3.py b/src/transformers/models/mistral3/modeling_mistral3.py index bc95439cf25f..4b04a88f56bc 100644 --- a/src/transformers/models/mistral3/modeling_mistral3.py +++ b/src/transformers/models/mistral3/modeling_mistral3.py @@ -29,7 +29,6 @@ from ...cache_utils import Cache from ...generation import GenerationMixin from ...integrations import use_kernel_forward_from_hub -from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_outputs import BaseModelOutputWithPast, ModelOutput from ...modeling_utils import PreTrainedModel from ...processing_utils import Unpack @@ -308,7 +307,7 @@ def forward( return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, image_sizes: torch.Tensor = None, - **kwargs: Unpack[FlashAttentionKwargs], + **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, Mistral3ModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( diff --git a/src/transformers/models/mistral3/modular_mistral3.py b/src/transformers/models/mistral3/modular_mistral3.py index 454904c16b36..59fb64de8417 100644 --- a/src/transformers/models/mistral3/modular_mistral3.py +++ b/src/transformers/models/mistral3/modular_mistral3.py @@ -177,7 +177,7 @@ def forward( return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, image_sizes: torch.Tensor = None, - **kwargs: Unpack[FlashAttentionKwargs], + **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, Mistral3ModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( From 7e1d1a58d66b70389e9de691db06358fcb8d183e Mon Sep 17 00:00:00 2001 From: raushan Date: Fri, 22 Aug 2025 13:05:21 +0200 Subject: [PATCH 03/19] fix style --- src/transformers/models/aya_vision/modular_aya_vision.py | 1 - src/transformers/models/got_ocr2/modular_got_ocr2.py | 1 - src/transformers/models/internvl/modular_internvl.py | 3 +-- src/transformers/models/mistral3/modular_mistral3.py | 1 - 4 files changed, 1 insertion(+), 5 deletions(-) diff --git a/src/transformers/models/aya_vision/modular_aya_vision.py b/src/transformers/models/aya_vision/modular_aya_vision.py index 3b37bf77d920..5348c2a7d728 100644 --- a/src/transformers/models/aya_vision/modular_aya_vision.py +++ b/src/transformers/models/aya_vision/modular_aya_vision.py @@ -30,7 +30,6 @@ from ...activations import ACT2FN from ...cache_utils import Cache -from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...processing_utils import Unpack from ...utils import auto_docstring, logging from ...utils.generic import check_model_inputs diff --git a/src/transformers/models/got_ocr2/modular_got_ocr2.py b/src/transformers/models/got_ocr2/modular_got_ocr2.py index eb9965516805..80f26ec5c49f 100644 --- a/src/transformers/models/got_ocr2/modular_got_ocr2.py +++ b/src/transformers/models/got_ocr2/modular_got_ocr2.py @@ -37,7 +37,6 @@ from ...cache_utils import Cache from ...configuration_utils import PretrainedConfig -from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...processing_utils import Unpack from ...utils import auto_docstring, can_return_tuple, logging from ..auto import CONFIG_MAPPING, AutoConfig diff --git a/src/transformers/models/internvl/modular_internvl.py b/src/transformers/models/internvl/modular_internvl.py index e9bb20ac4434..9fba9ed8ea9d 100644 --- a/src/transformers/models/internvl/modular_internvl.py +++ b/src/transformers/models/internvl/modular_internvl.py @@ -24,12 +24,11 @@ from ...activations import ACT2FN from ...cache_utils import Cache -from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack -from ...utils import auto_docstring, can_return_tuple, logging, torch_int +from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int from ...utils.generic import check_model_inputs from ..clip.modeling_clip import CLIPMLP from ..janus.modeling_janus import JanusVisionAttention diff --git a/src/transformers/models/mistral3/modular_mistral3.py b/src/transformers/models/mistral3/modular_mistral3.py index 59fb64de8417..0277568d2b00 100644 --- a/src/transformers/models/mistral3/modular_mistral3.py +++ b/src/transformers/models/mistral3/modular_mistral3.py @@ -20,7 +20,6 @@ from ...activations import ACT2FN from ...cache_utils import Cache -from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...processing_utils import Unpack from ...utils import logging from ..llava.modeling_llava import ( From 268bd4964e2e5fd19a03192a62ef8b95c71534b9 Mon Sep 17 00:00:00 2001 From: raushan Date: Fri, 22 Aug 2025 13:26:02 +0200 Subject: [PATCH 04/19] why this didnt fail prev? --- src/transformers/models/aya_vision/modeling_aya_vision.py | 4 ---- src/transformers/models/aya_vision/modular_aya_vision.py | 8 -------- 2 files changed, 12 deletions(-) diff --git a/src/transformers/models/aya_vision/modeling_aya_vision.py b/src/transformers/models/aya_vision/modeling_aya_vision.py index dd295c115a66..e426005b8b5f 100644 --- a/src/transformers/models/aya_vision/modeling_aya_vision.py +++ b/src/transformers/models/aya_vision/modeling_aya_vision.py @@ -402,10 +402,6 @@ def forward( vision_feature_layer: Optional[Union[int, list[int]]] = None, vision_feature_select_strategy: Optional[str] = None, labels: Optional[torch.LongTensor] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, image_sizes: Optional[torch.Tensor] = None, diff --git a/src/transformers/models/aya_vision/modular_aya_vision.py b/src/transformers/models/aya_vision/modular_aya_vision.py index 5348c2a7d728..f3f75b3e2dbd 100644 --- a/src/transformers/models/aya_vision/modular_aya_vision.py +++ b/src/transformers/models/aya_vision/modular_aya_vision.py @@ -236,10 +236,6 @@ def forward( vision_feature_layer: Optional[Union[int, list[int]]] = None, vision_feature_select_strategy: Optional[str] = None, labels: Optional[torch.LongTensor] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, image_sizes: Optional[torch.Tensor] = None, @@ -291,10 +287,6 @@ def forward( vision_feature_layer=vision_feature_layer, vision_feature_select_strategy=vision_feature_select_strategy, labels=labels, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, cache_position=cache_position, logits_to_keep=logits_to_keep, image_sizes=image_sizes, From 0be7f5e026d857c97c0f0885e55c3a7b8bd34888 Mon Sep 17 00:00:00 2001 From: raushan Date: Fri, 22 Aug 2025 14:02:49 +0200 Subject: [PATCH 05/19] chec for Noneness instead --- src/transformers/utils/generic.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/transformers/utils/generic.py b/src/transformers/utils/generic.py index a2952da9d590..c49e8bcdd1b1 100644 --- a/src/transformers/utils/generic.py +++ b/src/transformers/utils/generic.py @@ -992,7 +992,9 @@ def check_model_inputs(func): @wraps(func) def wrapper(self, *args, **kwargs): - use_cache = kwargs["use_cache"] if "use_cache" in kwargs else getattr(self.config, "use_cache", None) + use_cache = ( + kwargs["use_cache"] if kwargs.get("use_cache") is not None else getattr(self.config, "use_cache", None) + ) if use_cache is not None: if getattr(self, "gradient_checkpointing", False) and self.training and use_cache: logger.warning_once( From ae9c66a111b454936ef063bc31e2e9ff05af6d19 Mon Sep 17 00:00:00 2001 From: raushan Date: Thu, 28 Aug 2025 16:18:11 +0200 Subject: [PATCH 06/19] batch update vlms --- .../models/aimv2/modeling_aimv2.py | 122 ++---- .../models/aimv2/modular_aimv2.py | 65 ++- src/transformers/models/blip/modeling_blip.py | 228 ++--------- .../models/blip_2/modeling_blip_2.py | 383 ++++-------------- .../models/got_ocr2/modeling_got_ocr2.py | 5 - .../models/got_ocr2/modular_got_ocr2.py | 6 - .../models/idefics/modeling_idefics.py | 290 ++----------- .../models/idefics2/modeling_idefics2.py | 111 +---- .../models/idefics3/modeling_idefics3.py | 99 +---- .../instructblip/modeling_instructblip.py | 306 +++----------- .../modeling_instructblipvideo.py | 254 +++--------- .../models/internvl/modeling_internvl.py | 5 - .../models/janus/modeling_janus.py | 222 ++++++---- .../models/llava/modeling_llava.py | 5 - .../models/mistral3/modeling_mistral3.py | 5 - .../models/ovis2/modeling_ovis2.py | 97 +---- .../models/ovis2/modular_ovis2.py | 46 ++- .../perception_lm/modeling_perception_lm.py | 5 - .../modeling_phi4_multimodal.py | 101 +---- .../modular_phi4_multimodal.py | 21 +- .../models/siglip/modeling_siglip.py | 187 ++------- .../models/siglip2/modeling_siglip2.py | 126 ++---- .../models/smolvlm/modeling_smolvlm.py | 99 +---- .../models/vipllava/modeling_vipllava.py | 5 - src/transformers/utils/generic.py | 5 + tests/models/blip_2/test_modeling_blip_2.py | 2 + tests/models/idefics/test_modeling_idefics.py | 7 +- 27 files changed, 666 insertions(+), 2141 deletions(-) diff --git a/src/transformers/models/aimv2/modeling_aimv2.py b/src/transformers/models/aimv2/modeling_aimv2.py index 472eccd0b575..6ca054fe3eac 100644 --- a/src/transformers/models/aimv2/modeling_aimv2.py +++ b/src/transformers/models/aimv2/modeling_aimv2.py @@ -34,7 +34,9 @@ from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel -from ...utils import ModelOutput, auto_docstring, can_return_tuple +from ...processing_utils import Unpack +from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple +from ...utils.generic import check_model_inputs from .configuration_aimv2 import Aimv2Config, Aimv2TextConfig, Aimv2VisionConfig @@ -300,17 +302,19 @@ def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = False, + **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor, torch.Tensor]: norm_hidden_states = self.rms_norm1(hidden_states) - attn_output, attn_weights = self.attention(hidden_states=norm_hidden_states, attention_mask=attention_mask) + attn_output, attn_weights = self.attention( + hidden_states=norm_hidden_states, attention_mask=attention_mask, **kwargs + ) hidden_states = hidden_states + attn_output norm_hidden_states = self.rms_norm2(hidden_states) mlp_output = self.ffn(norm_hidden_states) hidden_states = hidden_states + mlp_output - return (hidden_states, attn_weights) if output_attentions else (hidden_states, None) + return hidden_states class Aimv2Encoder(nn.Module): @@ -329,68 +333,22 @@ def __init__(self, config: Aimv2Config): self.gradient_checkpointing = False # Ignore copy - @can_return_tuple + @auto_docstring def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, + **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutput: - r""" - Args: - inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): - Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. - This is useful if you want more control over how to convert `input_ids` indices into associated vectors - than the model's internal embedding lookup matrix. - attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors - for more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. - """ - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - - encoder_states = () if output_hidden_states else None - all_attentions = () if output_attentions else None - hidden_states = inputs_embeds for encoder_layer in self.layers: - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - - layer_outputs = encoder_layer( + hidden_states = encoder_layer( hidden_states, attention_mask, - output_attentions=output_attentions, + **kwargs, ) - hidden_states = layer_outputs[0] - - if output_attentions: - all_attentions = all_attentions + (layer_outputs[1],) - - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - - return BaseModelOutput( - last_hidden_state=hidden_states, - hidden_states=encoder_states, - attentions=all_attentions, - ) + return BaseModelOutput(last_hidden_state=hidden_states) class Aimv2AttentionPoolingHead(nn.Module): @@ -464,6 +422,10 @@ def _init_weights(self, module): class Aimv2VisionModel(Aimv2PreTrainedModel): config: Aimv2VisionConfig main_input_name = "pixel_values" + _can_record_outputs = { + "hidden_states": Aimv2EncoderLayer, + "attentions": Aimv2Attention, + } def __init__(self, config: Aimv2VisionConfig): super().__init__(config) @@ -482,14 +444,13 @@ def __init__(self, config: Aimv2VisionConfig): def get_input_embeddings(self) -> nn.Module: return self.embeddings.patch_embed - @can_return_tuple + @check_model_inputs @auto_docstring def forward( self, pixel_values, attention_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, + **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPooling: r""" Examples: @@ -511,17 +472,11 @@ def forward( >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled features ```""" - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - hidden_states = self.embeddings(pixel_values) encoder_outputs = self.encoder( inputs_embeds=hidden_states, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, + **kwargs, ) last_hidden_state = encoder_outputs[0] @@ -532,8 +487,6 @@ def forward( return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooler_output, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, ) @@ -545,6 +498,11 @@ def forward( class Aimv2TextModel(Aimv2PreTrainedModel): main_input_name = "input_ids" + _can_record_outputs = { + "hidden_states": Aimv2EncoderLayer, + "attentions": Aimv2Attention, + } + def __init__(self, config: Aimv2TextConfig): super().__init__(config) self.config = config @@ -562,20 +520,14 @@ def get_input_embeddings(self) -> nn.Module: def set_input_embeddings(self, value): self.embeddings.token_embedding = value - @can_return_tuple + @check_model_inputs @auto_docstring def forward( self, input_ids, attention_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, + **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPooling: - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - hidden_states = self.embeddings(input_ids) batch_size, seq_len, _ = hidden_states.shape @@ -594,8 +546,7 @@ def forward( encoder_outputs = self.encoder( inputs_embeds=hidden_states, attention_mask=attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, + **kwargs, ) last_hidden_state = encoder_outputs[0] @@ -610,8 +561,6 @@ def forward( return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, ) @@ -749,8 +698,7 @@ def forward( input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, + **kwargs: Unpack[TransformersKwargs], ) -> Aimv2Output: r""" Examples: @@ -774,23 +722,15 @@ def forward( >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ```""" - - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - vision_outputs: BaseModelOutputWithPooling = self.vision_model( pixel_values=pixel_values, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, + **kwargs, ) text_outputs: BaseModelOutputWithPooling = self.text_model( input_ids=input_ids, attention_mask=attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, + **kwargs, ) image_embeds = vision_outputs.pooler_output diff --git a/src/transformers/models/aimv2/modular_aimv2.py b/src/transformers/models/aimv2/modular_aimv2.py index 5991b928a2f0..60cf2be3293a 100644 --- a/src/transformers/models/aimv2/modular_aimv2.py +++ b/src/transformers/models/aimv2/modular_aimv2.py @@ -26,10 +26,13 @@ from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutputWithPooling from ...modeling_utils import PreTrainedModel +from ...processing_utils import Unpack from ...utils import ( + TransformersKwargs, auto_docstring, can_return_tuple, ) +from ...utils.generic import check_model_inputs from ..clip.modeling_clip import CLIPModel, CLIPTextEmbeddings, _get_vector_norm from ..llama.modeling_llama import LlamaMLP, LlamaRMSNorm from ..siglip.configuration_siglip import SiglipConfig, SiglipTextConfig, SiglipVisionConfig @@ -373,17 +376,19 @@ def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = False, + **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor, torch.Tensor]: norm_hidden_states = self.rms_norm1(hidden_states) - attn_output, attn_weights = self.attention(hidden_states=norm_hidden_states, attention_mask=attention_mask) + attn_output, attn_weights = self.attention( + hidden_states=norm_hidden_states, attention_mask=attention_mask, **kwargs + ) hidden_states = hidden_states + attn_output norm_hidden_states = self.rms_norm2(hidden_states) mlp_output = self.ffn(norm_hidden_states) hidden_states = hidden_states + mlp_output - return (hidden_states, attn_weights) if output_attentions else (hidden_states, None) + return hidden_states class Aimv2Encoder(SiglipEncoder): @@ -461,6 +466,10 @@ def _init_weights(self, module): class Aimv2VisionModel(Aimv2PreTrainedModel): config: Aimv2VisionConfig main_input_name = "pixel_values" + _can_record_outputs = { + "hidden_states": Aimv2EncoderLayer, + "attentions": Aimv2Attention, + } def __init__(self, config: Aimv2VisionConfig): super().__init__(config) @@ -479,14 +488,13 @@ def __init__(self, config: Aimv2VisionConfig): def get_input_embeddings(self) -> nn.Module: return self.embeddings.patch_embed - @can_return_tuple + @check_model_inputs @auto_docstring def forward( self, pixel_values, attention_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, + **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPooling: r""" Examples: @@ -508,17 +516,11 @@ def forward( >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled features ```""" - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - hidden_states = self.embeddings(pixel_values) encoder_outputs = self.encoder( inputs_embeds=hidden_states, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, + **kwargs, ) last_hidden_state = encoder_outputs[0] @@ -529,8 +531,6 @@ def forward( return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooler_output, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, ) @@ -542,6 +542,11 @@ def forward( class Aimv2TextModel(Aimv2PreTrainedModel): main_input_name = "input_ids" + _can_record_outputs = { + "hidden_states": Aimv2EncoderLayer, + "attentions": Aimv2Attention, + } + def __init__(self, config: Aimv2TextConfig): super().__init__(config) self.config = config @@ -559,20 +564,14 @@ def get_input_embeddings(self) -> nn.Module: def set_input_embeddings(self, value): self.embeddings.token_embedding = value - @can_return_tuple + @check_model_inputs @auto_docstring def forward( self, input_ids, attention_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, + **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPooling: - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - hidden_states = self.embeddings(input_ids) batch_size, seq_len, _ = hidden_states.shape @@ -591,8 +590,7 @@ def forward( encoder_outputs = self.encoder( inputs_embeds=hidden_states, attention_mask=attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, + **kwargs, ) last_hidden_state = encoder_outputs[0] @@ -607,8 +605,6 @@ def forward( return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, ) @@ -641,8 +637,7 @@ def forward( input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, + **kwargs: Unpack[TransformersKwargs], ) -> Aimv2Output: r""" Examples: @@ -666,23 +661,15 @@ def forward( >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ```""" - - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - vision_outputs: BaseModelOutputWithPooling = self.vision_model( pixel_values=pixel_values, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, + **kwargs, ) text_outputs: BaseModelOutputWithPooling = self.text_model( input_ids=input_ids, attention_mask=attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, + **kwargs, ) image_embeds = vision_outputs.pooler_output diff --git a/src/transformers/models/blip/modeling_blip.py b/src/transformers/models/blip/modeling_blip.py index 4a58d70eb0c7..aac6fbd02a67 100644 --- a/src/transformers/models/blip/modeling_blip.py +++ b/src/transformers/models/blip/modeling_blip.py @@ -28,7 +28,9 @@ from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from ...modeling_utils import PreTrainedModel -from ...utils import ModelOutput, auto_docstring, logging, torch_int +from ...processing_utils import Unpack +from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int +from ...utils.generic import check_model_inputs from .configuration_blip import BlipConfig, BlipTextConfig, BlipVisionConfig from .modeling_blip_text import BlipTextLMHeadModel, BlipTextModel @@ -327,7 +329,7 @@ def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = False, + **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" @@ -363,9 +365,7 @@ def forward( output = self.projection(context_layer) - outputs = (output, attention_probs) if output_attentions else (output, None) - - return outputs + return output, attention_probs # Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->Blip @@ -393,29 +393,20 @@ def __init__(self, config: BlipConfig): self.mlp = BlipMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) + @auto_docstring def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, - output_attentions: Optional[bool] = False, + **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.FloatTensor]: - """ - Args: - hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` - attention_mask (`torch.FloatTensor`): attention mask of size - `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. - `(config.encoder_attention_heads,)`. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) - hidden_states, attn_weights = self.self_attn( + hidden_states, _ = self.self_attn( hidden_states=hidden_states, head_mask=attention_mask, - output_attentions=output_attentions, + **kwargs, ) hidden_states = hidden_states + residual residual = hidden_states @@ -424,12 +415,7 @@ def forward( hidden_states = hidden_states + residual - outputs = (hidden_states,) - - if output_attentions: - outputs += (attn_weights,) - - return outputs + return hidden_states @auto_docstring @@ -486,72 +472,31 @@ def __init__(self, config: BlipConfig): self.layers = nn.ModuleList([BlipEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False + @auto_docstring def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, + **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, BaseModelOutput]: - r""" - Args: - inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): - Embedded representation of the inputs. Should be float, not int tokens. - attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors - for more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. - """ - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - encoder_states = () if output_hidden_states else None - all_attentions = () if output_attentions else None - hidden_states = inputs_embeds - for idx, encoder_layer in enumerate(self.layers): - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - - layer_outputs = encoder_layer( + for encoder_layer in self.layers: + hidden_states = encoder_layer( hidden_states, attention_mask=attention_mask, - output_attentions=output_attentions, + **kwargs, ) - hidden_states = layer_outputs[0] - - if output_attentions: - all_attentions = all_attentions + (layer_outputs[1],) - - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - - if not return_dict: - return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) - return BaseModelOutput( - last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions - ) + return BaseModelOutput(last_hidden_state=hidden_states) class BlipVisionModel(BlipPreTrainedModel): main_input_name = "pixel_values" config: BlipVisionConfig + _can_record_outputs = { + "hidden_states": BlipEncoderLayer, + "attentions": BlipAttention, + } def __init__(self, config: BlipVisionConfig): super().__init__(config) @@ -564,21 +509,14 @@ def __init__(self, config: BlipVisionConfig): self.post_init() + @check_model_inputs @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, interpolate_pos_encoding: bool = False, + **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, BaseModelOutputWithPooling]: - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - if pixel_values is None: raise ValueError("You have to specify pixel_values") @@ -586,9 +524,7 @@ def forward( encoder_outputs = self.encoder( inputs_embeds=hidden_states, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, + **kwargs, ) last_hidden_state = encoder_outputs[0] @@ -597,14 +533,9 @@ def forward( pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) - if not return_dict: - return (last_hidden_state, pooled_output) + encoder_outputs[1:] - return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, ) def get_input_embeddings(self): @@ -667,7 +598,6 @@ def get_text_features( input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, - return_dict: Optional[bool] = None, ) -> torch.FloatTensor: r""" Returns: @@ -685,13 +615,10 @@ def get_text_features( >>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") >>> text_features = model.get_text_features(**inputs) ```""" - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, - return_dict=return_dict, ) pooled_output = text_outputs[1] @@ -703,7 +630,6 @@ def get_text_features( def get_image_features( self, pixel_values: Optional[torch.FloatTensor] = None, - return_dict: Optional[bool] = None, interpolate_pos_encoding: bool = False, ) -> torch.FloatTensor: r""" @@ -728,11 +654,9 @@ def get_image_features( >>> image_features = model.get_image_features(**inputs) ```""" - return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, - return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding, ) @@ -747,7 +671,6 @@ def get_multimodal_features( input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, - return_dict: Optional[bool] = None, interpolate_pos_encoding: bool = False, ) -> torch.FloatTensor: r""" @@ -771,12 +694,8 @@ def get_multimodal_features( >>> multimodal_features = model.get_multimodal_features(**inputs) ```""" - return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, - output_attentions=True, - output_hidden_states=True, - return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding, ) @@ -788,7 +707,6 @@ def get_multimodal_features( attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_atts, - return_dict=return_dict, ) pooled_output = text_outputs[1] # pooled_output @@ -796,6 +714,7 @@ def get_multimodal_features( return multimodal_features + @can_return_tuple @auto_docstring def forward( self, @@ -804,10 +723,8 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, return_loss: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, interpolate_pos_encoding: bool = False, + **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, BlipOutput]: r""" return_loss (`bool`, *optional*): @@ -834,28 +751,17 @@ def forward( >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ```""" - # Use BLIP model's config for some fields (if specified) instead of those of vision & text components. - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - vision_outputs = self.vision_model( pixel_values=pixel_values, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding, + **kwargs, ) text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, + **kwargs, ) image_embeds = vision_outputs[1] @@ -878,10 +784,6 @@ def forward( if return_loss: loss = blip_loss(logits_per_text) - if not return_dict: - output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) - return ((loss,) + output) if loss is not None else output - return BlipOutput( loss=loss, logits_per_image=logits_per_image, @@ -925,17 +827,16 @@ def get_input_embeddings(self): def set_input_embeddings(self, value): self.text_decoder.set_input_embeddings(value) + @can_return_tuple @auto_docstring def forward( self, pixel_values: torch.FloatTensor, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.LongTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, - return_dict: Optional[bool] = None, interpolate_pos_encoding: bool = False, + **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, BlipForConditionalGenerationModelOutput]: r""" Examples: @@ -957,18 +858,10 @@ def forward( >>> outputs = model(**inputs) ```""" - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - vision_outputs = self.vision_model( pixel_values=pixel_values, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding, + **kwargs, ) image_embeds = vision_outputs[0] @@ -978,15 +871,10 @@ def forward( attention_mask=attention_mask, encoder_hidden_states=image_embeds, labels=labels, - return_dict=return_dict, reduction="mean", + **kwargs, ) - if not return_dict: - outputs = (outputs[0], outputs[1]) if labels is not None else (outputs[0],) - outputs += (image_embeds, vision_outputs[0]) + vision_outputs[2:] - return tuple(output for output in outputs if output is not None) - return BlipForConditionalGenerationModelOutput( loss=outputs.loss, logits=outputs.logits, @@ -1105,6 +993,7 @@ def get_input_embeddings(self): # This will return shared embeddings if they are shared else specific to encoder. return self.text_encoder.get_input_embeddings() + @can_return_tuple @auto_docstring def forward( self, @@ -1113,11 +1002,9 @@ def forward( decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.LongTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, - return_dict: Optional[bool] = None, interpolate_pos_encoding: bool = False, + **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, BlipTextVisionModelOutput]: r""" Examples: @@ -1158,18 +1045,10 @@ def forward( " are using the model for inference make sure that `decoder_input_ids` is passed or call `generate`" ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - vision_outputs = self.vision_model( pixel_values=pixel_values, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding, + **kwargs, ) image_embeds = vision_outputs[0] @@ -1180,14 +1059,14 @@ def forward( attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, - return_dict=return_dict, + **kwargs, ) if labels is not None and decoder_input_ids is None: # labels are already shifted right, see: https://github.com/huggingface/transformers/pull/23153 decoder_input_ids = labels - question_embeds = question_embeds[0] if not return_dict else question_embeds.last_hidden_state + question_embeds = question_embeds[0] answer_output = self.text_decoder( input_ids=decoder_input_ids, @@ -1195,19 +1074,15 @@ def forward( encoder_hidden_states=question_embeds, encoder_attention_mask=attention_mask, labels=labels, - return_dict=return_dict, reduction="mean", + **kwargs, ) if labels is not None: - decoder_loss = answer_output.loss.mean() if return_dict else answer_output[0].mean() + decoder_loss = answer_output.loss.mean() else: decoder_loss = None - if not return_dict: - outputs = (decoder_loss, image_embeds, vision_outputs[0]) + vision_outputs[2:] - return tuple(output for output in outputs if output is not None) - return BlipTextVisionModelOutput( loss=decoder_loss, image_embeds=image_embeds, @@ -1348,6 +1223,7 @@ def get_input_embeddings(self): def set_input_embeddings(self, value): self.text_encoder.set_input_embeddings(value) + @can_return_tuple @auto_docstring def forward( self, @@ -1355,10 +1231,8 @@ def forward( pixel_values: torch.FloatTensor, use_itm_head: Optional[bool] = True, attention_mask: Optional[torch.LongTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, interpolate_pos_encoding: bool = False, + **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, BlipTextVisionModelOutput]: r""" use_itm_head (`bool`, *optional*, defaults to `True`): @@ -1382,18 +1256,10 @@ def forward( >>> outputs = model(**inputs) ``` """ - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - vision_outputs = self.vision_model( pixel_values=pixel_values, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding, + **kwargs, ) image_embeds = vision_outputs[0] @@ -1405,28 +1271,24 @@ def forward( attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_atts, - return_dict=return_dict, + **kwargs, ) - question_embeds = question_embeds[0] if not return_dict else question_embeds.last_hidden_state + question_embeds = question_embeds[0] output = self.itm_head(question_embeds[:, 0, :]) else: question_embeds = self.text_encoder( input_ids=input_ids, attention_mask=attention_mask, - return_dict=return_dict, + **kwargs, ) - question_embeds = question_embeds[0] if not return_dict else question_embeds.last_hidden_state + question_embeds = question_embeds[0] image_feat = normalize(self.vision_proj(image_embeds[:, 0, :]), dim=-1) text_feat = normalize(self.text_proj(question_embeds[:, 0, :]), dim=-1) output = image_feat @ text_feat.t() - if not return_dict: - outputs = (output, vision_outputs[0]) + vision_outputs[2:] + (question_embeds,) - return tuple(output for output in outputs if output is not None) - return BlipImageTextMatchingModelOutput( itm_score=output, last_hidden_state=vision_outputs.last_hidden_state, diff --git a/src/transformers/models/blip_2/modeling_blip_2.py b/src/transformers/models/blip_2/modeling_blip_2.py index 2a9bda9f9a00..2ee9aac11274 100644 --- a/src/transformers/models/blip_2/modeling_blip_2.py +++ b/src/transformers/models/blip_2/modeling_blip_2.py @@ -35,7 +35,8 @@ from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer -from ...utils import ModelOutput, TransformersKwargs, auto_docstring, logging, torch_int +from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int +from ...utils.generic import OutputRecorder, check_model_inputs from ..auto import AutoModelForCausalLM, AutoModelForSeq2SeqLM from .configuration_blip_2 import Blip2Config, Blip2QFormerConfig, Blip2VisionConfig @@ -293,7 +294,6 @@ def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = False, **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" @@ -310,13 +310,7 @@ def forward( attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": - if self.config._attn_implementation == "sdpa" and output_attentions: - logger.warning_once( - "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " - 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' - ) - else: - attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] + attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, @@ -332,8 +326,7 @@ def forward( attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous() attn_output = self.projection(attn_output) - outputs = (attn_output, attn_weights) if output_attentions else (attn_output, None) - return outputs + return attn_output, attn_weights # Copied from transformers.models.blip.modeling_blip.BlipMLP @@ -362,29 +355,20 @@ def __init__(self, config: Blip2Config): self.mlp = Blip2MLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) + @auto_docstring def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, - output_attentions: Optional[bool] = False, + **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.FloatTensor]: - """ - Args: - hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` - attention_mask (`torch.FloatTensor`): attention mask of size - `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. - `(config.encoder_attention_heads,)`. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) - hidden_states, attn_weights = self.self_attn( + hidden_states, _ = self.self_attn( hidden_states=hidden_states, head_mask=attention_mask, - output_attentions=output_attentions, + **kwargs, ) hidden_states = hidden_states + residual residual = hidden_states @@ -393,12 +377,7 @@ def forward( hidden_states = hidden_states + residual - outputs = (hidden_states,) - - if output_attentions: - outputs += (attn_weights,) - - return outputs + return hidden_states @auto_docstring @@ -467,67 +446,22 @@ def __init__(self, config: Blip2Config): self.layers = nn.ModuleList([Blip2EncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False + @auto_docstring def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, + **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, BaseModelOutput]: - r""" - Args: - inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): - Embedded representation of the inputs. Should be float, not int tokens. - attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors - for more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. - """ - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - encoder_states = () if output_hidden_states else None - all_attentions = () if output_attentions else None - hidden_states = inputs_embeds - for idx, encoder_layer in enumerate(self.layers): - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - - layer_outputs = encoder_layer( + for encoder_layer in self.layers: + hidden_states = encoder_layer( hidden_states, attention_mask=attention_mask, - output_attentions=output_attentions, + **kwargs, ) - hidden_states = layer_outputs[0] - - if output_attentions: - all_attentions = all_attentions + (layer_outputs[1],) - - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - - if not return_dict: - return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) - return BaseModelOutput( - last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions - ) + return BaseModelOutput(last_hidden_state=hidden_states) @auto_docstring @@ -535,6 +469,10 @@ def forward( class Blip2VisionModel(Blip2PreTrainedModel): main_input_name = "pixel_values" config: Blip2VisionConfig + _can_record_outputs = { + "hidden_states": Blip2EncoderLayer, + "attentions": Blip2Attention, + } def __init__(self, config: Blip2VisionConfig): super().__init__(config) @@ -547,21 +485,14 @@ def __init__(self, config: Blip2VisionConfig): self.post_init() + @check_model_inputs @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, interpolate_pos_encoding: bool = False, + **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, BaseModelOutputWithPooling]: - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - if pixel_values is None: raise ValueError("You have to specify pixel_values") @@ -569,9 +500,7 @@ def forward( encoder_outputs = self.encoder( inputs_embeds=hidden_states, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, + **kwargs, ) last_hidden_state = encoder_outputs[0] @@ -580,14 +509,9 @@ def forward( pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) - if not return_dict: - return (last_hidden_state, pooled_output) + encoder_outputs[1:] - return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, ) def get_input_embeddings(self): @@ -647,7 +571,7 @@ def forward( head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, - output_attentions=False, + **kwargs: Unpack[TransformersKwargs], ): # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be @@ -712,15 +636,10 @@ def forward( new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) - outputs = ( - ( - context_layer, - attention_probs, - ) - if output_attentions - else (context_layer,) + return ( + context_layer, + attention_probs, ) - return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->Blip2QFormer @@ -770,19 +689,18 @@ def forward( head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, - output_attentions: Optional[bool] = False, + **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor]: - self_outputs = self.attention( + attn_output, _ = self.attention( hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, - output_attentions=output_attentions, + **kwargs, ) - attention_output = self.output(self_outputs[0], hidden_states) - outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them - return outputs + attention_output = self.output(attn_output, hidden_states) + return attention_output # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Blip2QFormer @@ -845,17 +763,15 @@ def forward( head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, - output_attentions=False, query_length=0, + **kwargs: Unpack[TransformersKwargs], ): - self_attention_outputs = self.attention( + attention_output = self.attention( hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask, - output_attentions=output_attentions, + **kwargs, ) - attention_output = self_attention_outputs[0] - outputs = self_attention_outputs[1:] if query_length > 0: query_attention_output = attention_output[:, :query_length, :] @@ -863,17 +779,14 @@ def forward( if self.has_cross_attention: if encoder_hidden_states is None: raise ValueError("encoder_hidden_states must be given for cross-attention layers") - cross_attention_outputs = self.crossattention( + query_attention_output = self.crossattention( hidden_states=query_attention_output, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, - output_attentions=output_attentions, + **kwargs, ) - query_attention_output = cross_attention_outputs[0] - # add cross attentions if we output attention weights - outputs = outputs + cross_attention_outputs[1:] layer_output = apply_chunking_to_forward( self.feed_forward_chunk_query, @@ -897,9 +810,7 @@ def forward( self.seq_len_dim, attention_output, ) - outputs = (layer_output,) + outputs - - return outputs + return layer_output def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) @@ -921,6 +832,7 @@ def __init__(self, config): ) self.gradient_checkpointing = False + @can_return_tuple def forward( self, hidden_states, @@ -928,57 +840,25 @@ def forward( head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, - output_attentions=False, - output_hidden_states=False, - return_dict=True, query_length=0, + **kwargs: Unpack[TransformersKwargs], ): - all_hidden_states = () if output_hidden_states else None - all_self_attentions = () if output_attentions else None - all_cross_attentions = () if output_attentions else None - for i in range(self.config.num_hidden_layers): layer_module = self.layer[i] - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - layer_head_mask = head_mask[i] if head_mask is not None else None - layer_outputs = layer_module( + hidden_states = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, # as a positional argument for gradient checkpointing encoder_attention_mask=encoder_attention_mask, - output_attentions=output_attentions, query_length=query_length, + **kwargs, ) - hidden_states = layer_outputs[0] - if output_attentions: - all_self_attentions = all_self_attentions + (layer_outputs[1],) - if query_length > 0 and layer_module.has_cross_attention: - all_cross_attentions = all_cross_attentions + (layer_outputs[2],) - - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - if not return_dict: - return tuple( - v - for v in [ - hidden_states, - all_hidden_states, - all_self_attentions, - all_cross_attentions, - ] - if v is not None - ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, - hidden_states=all_hidden_states, - attentions=all_self_attentions, - cross_attentions=all_cross_attentions, ) @@ -1039,6 +919,16 @@ class Blip2QFormerModel(Blip2PreTrainedModel): _supports_sdpa = False _supports_flex_attn = False + _can_record_outputs = { + "hidden_states": Blip2QFormerLayer, + "attentions": [ + OutputRecorder(Blip2QFormerMultiHeadAttention, index=1, layer_name=".attention"), + ], + "cross_attentions": [ + OutputRecorder(Blip2QFormerMultiHeadAttention, index=1, layer_name=".crossattention"), + ], + } + def __init__(self, config: Blip2QFormerConfig): super().__init__(config) self.config = config @@ -1107,6 +997,7 @@ def get_extended_attention_mask( extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask + @check_model_inputs @auto_docstring def forward( self, @@ -1116,9 +1007,7 @@ def forward( head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, + **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: r""" query_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): @@ -1128,12 +1017,6 @@ def forward( Length of the query, usually based on the number of query tokens. If no value is provided, query_length will be inferred by the query_embeds. """ - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - query_length = ( query_length if query_length is not None else query_embeds.shape[1] if query_embeds is not None else 0 ) @@ -1190,24 +1073,15 @@ def forward( head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, query_length=query_length, + **kwargs, ) sequence_output = encoder_outputs[0] pooled_output = sequence_output[:, 0, :] - if not return_dict: - return (sequence_output, pooled_output) + encoder_outputs[1:] - return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, - past_key_values=encoder_outputs.past_key_values, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, - cross_attentions=encoder_outputs.cross_attentions, ) @@ -1277,9 +1151,6 @@ def get_text_features( decoder_input_ids: Optional[torch.Tensor] = None, decoder_attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, **kwargs: Unpack[TransformersKwargs], ): r""" @@ -1316,19 +1187,10 @@ def get_text_features( >>> inputs = tokenizer(["a photo of a cat"], padding=True, return_tensors="pt") >>> text_features = model.get_text_features(**inputs) ```""" - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - if self.config.use_decoder_only_language_model: text_outputs = self.language_model( input_ids=input_ids, attention_mask=attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, **kwargs, ) else: @@ -1339,9 +1201,6 @@ def get_text_features( attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, labels=labels, **kwargs, ) @@ -1352,10 +1211,8 @@ def get_text_features( def get_image_features( self, pixel_values: Optional[torch.FloatTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, interpolate_pos_encoding: bool = False, + **kwargs: Unpack[TransformersKwargs], ): r""" Returns: @@ -1378,18 +1235,10 @@ def get_image_features( >>> inputs = processor(images=image, return_tensors="pt") >>> image_outputs = model.get_image_features(**inputs) ```""" - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - vision_outputs = self.vision_model( pixel_values=pixel_values, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding, + **kwargs, ) return vision_outputs @@ -1398,10 +1247,8 @@ def get_image_features( def get_qformer_features( self, pixel_values: Optional[torch.FloatTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, interpolate_pos_encoding: bool = False, + **kwargs: Unpack[TransformersKwargs], ): r""" Returns: @@ -1424,18 +1271,10 @@ def get_qformer_features( >>> inputs = processor(images=image, return_tensors="pt") >>> qformer_outputs = model.get_qformer_features(**inputs) ```""" - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - vision_outputs = self.vision_model( pixel_values=pixel_values, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding, + **kwargs, ) image_embeds = vision_outputs[0] @@ -1448,9 +1287,7 @@ def get_qformer_features( query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, + **kwargs, ) return query_outputs @@ -1470,6 +1307,7 @@ def get_placeholder_mask(self, input_ids: torch.LongTensor, inputs_embeds: torch special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) return special_image_mask + @can_return_tuple @auto_docstring def forward( self, @@ -1478,21 +1316,11 @@ def forward( attention_mask: Optional[torch.LongTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, - return_dict: Optional[bool] = None, interpolate_pos_encoding: bool = False, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, Blip2ForConditionalGenerationModelOutput]: r""" - input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): - Indices of input sequence tokens in the vocabulary of the language model. Input tokens can optionally be - provided to serve as text prompt, which the language model can continue. - - Indices can be obtained using [`Blip2Processor`]. See [`Blip2Processor.__call__`] for details. - - [What are input IDs?](../glossary#input-ids) decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. @@ -1521,16 +1349,13 @@ def forward( >>> outputs = model(**inputs) ```""" - return_dict = return_dict if return_dict is not None else self.config.use_return_dict # step 1: forward the images through the vision encoder, # to get image embeddings of shape (batch_size, seq_len, hidden_size) vision_outputs = self.vision_model( pixel_values=pixel_values, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding, + **kwargs, ) image_embeds = vision_outputs[0] @@ -1542,9 +1367,7 @@ def forward( query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, + **kwargs, ) query_output = query_outputs[0] @@ -1570,12 +1393,9 @@ def forward( outputs = self.language_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, **kwargs, ) - logits = outputs.logits if return_dict else outputs[0] + logits = outputs[0] loss = None # we compute the loss here since we need to take into account the sequence length of the query embeds if labels is not None: @@ -1595,19 +1415,12 @@ def forward( attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=True, # toggle for easier access to loss/logits below labels=labels, + return_dict=True, **kwargs, ) loss = outputs.loss logits = outputs.logits - outputs = outputs.to_tuple() if not return_dict else outputs - - if not return_dict: - output = (logits, vision_outputs, query_outputs, outputs) - return ((loss,) + output) if loss is not None else output return Blip2ForConditionalGenerationModelOutput( loss=loss, @@ -1643,15 +1456,14 @@ def get_input_embeddings(self): def set_input_embeddings(self, value): self.embeddings.word_embeddings = value + @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, + **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, Blip2TextModelOutput]: r""" Examples: @@ -1677,7 +1489,6 @@ def forward( >>> print(text_embeds.shape) torch.Size([2, 7, 256]) ```""" - return_dict = return_dict if return_dict is not None else self.config.use_return_dict query_embeds = self.embeddings( input_ids=input_ids, @@ -1688,21 +1499,15 @@ def forward( query_embeds=query_embeds, query_length=0, attention_mask=attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, + **kwargs, ) - pooled_output = text_outputs[0] if not return_dict else text_outputs.last_hidden_state + pooled_output = text_outputs[0] pooled_output = pooled_output.to(dtype=self.text_projection.weight.dtype) text_embeds = self.text_projection(pooled_output) text_embeds = nn.functional.normalize(text_embeds, dim=-1) - if not return_dict: - outputs = (text_embeds, text_outputs[0]) + text_outputs[2:] - return tuple(output for output in outputs if output is not None) - return Blip2TextModelOutput( text_embeds=text_embeds, last_hidden_state=text_outputs.last_hidden_state, @@ -1734,13 +1539,12 @@ def __init__(self, config: Blip2Config): def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding + @can_return_tuple @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, + **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, Blip2VisionModelOutput]: r""" Examples: @@ -1769,41 +1573,27 @@ def forward( >>> print(image_embeds.shape) torch.Size([1, 32, 256]) ```""" - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - vision_outputs = self.vision_model( pixel_values=pixel_values, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, + **kwargs, ) - pooled_output = vision_outputs[0] if not return_dict else vision_outputs.last_hidden_state - + pooled_output = vision_outputs[0] image_attention_mask = torch.ones(pooled_output.size()[:-1], dtype=torch.long, device=pooled_output.device) - query_tokens = self.query_tokens.expand(pooled_output.shape[0], -1, -1) query_outputs = self.qformer( query_embeds=query_tokens, encoder_hidden_states=pooled_output, encoder_attention_mask=image_attention_mask, - return_dict=return_dict, + **kwargs, ) - embeds = query_outputs[0] if not return_dict else query_outputs.last_hidden_state + embeds = query_outputs[0] embeds = embeds.to(dtype=self.vision_projection.weight.dtype) image_embeds = self.vision_projection(embeds) image_embeds = nn.functional.normalize(image_embeds, dim=-1) - if not return_dict: - outputs = (image_embeds, vision_outputs[0]) + vision_outputs[2:] - return tuple(output for output in outputs if output is not None) - return Blip2VisionModelOutput( image_embeds=image_embeds, last_hidden_state=vision_outputs.last_hidden_state, @@ -1960,6 +1750,7 @@ def get_placeholder_mask(self, input_ids: torch.LongTensor, inputs_embeds: torch special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) return special_image_mask + @can_return_tuple @auto_docstring def forward( self, @@ -1969,12 +1760,8 @@ def forward( decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, - return_dict: Optional[bool] = None, interpolate_pos_encoding: bool = False, - use_cache: Optional[bool] = None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, Blip2ForConditionalGenerationModelOutput]: r""" @@ -2050,14 +1837,10 @@ def forward( >>> print(generated_text) two ```""" - return_dict = return_dict if return_dict is not None else self.config.use_return_dict language_model_inputs, vision_outputs, query_outputs = self.get_image_features( pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True ) - vision_outputs = vision_outputs.to_tuple() if not return_dict else vision_outputs - query_outputs = query_outputs.to_tuple() if not return_dict else query_outputs - if inputs_embeds is None: inputs_embeds = self.get_input_embeddings()(input_ids) @@ -2074,13 +1857,9 @@ def forward( outputs = self.language_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - use_cache=use_cache, **kwargs, ) - logits = outputs.logits if return_dict else outputs[0] + logits = outputs[0] loss = None # we compute the loss here since we need to take into account the sequence length of the query embeds if labels is not None: @@ -2095,25 +1874,17 @@ def forward( loss = loss_fct(shift_logits.view(-1, self.config.text_config.vocab_size), shift_labels.view(-1)) else: + kwargs["return_dict"] = True outputs = self.language_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=True, # toggle for easier access to loss/logits below labels=labels, - use_cache=use_cache, **kwargs, ) loss = outputs.loss logits = outputs.logits - outputs = outputs.to_tuple() if not return_dict else outputs - - if not return_dict: - output = (logits, vision_outputs, query_outputs, outputs) - return ((loss,) + output) if loss is not None else output return Blip2ForConditionalGenerationModelOutput( loss=loss, diff --git a/src/transformers/models/got_ocr2/modeling_got_ocr2.py b/src/transformers/models/got_ocr2/modeling_got_ocr2.py index cd1a1f3f94c4..98515b46532d 100644 --- a/src/transformers/models/got_ocr2/modeling_got_ocr2.py +++ b/src/transformers/models/got_ocr2/modeling_got_ocr2.py @@ -286,11 +286,6 @@ class GotOcr2PreTrainedModel(PreTrainedModel): _supports_flex_attn = False _supports_attention_backend = True - _can_record_outputs = { - "hidden_states": "DecoderLayer", - "attentions": "Attention", - } - def _init_weights(self, module): super()._init_weights(module) if isinstance(module, GotOcr2VisionAttention): diff --git a/src/transformers/models/got_ocr2/modular_got_ocr2.py b/src/transformers/models/got_ocr2/modular_got_ocr2.py index d36187a38d7c..551a72a987b1 100644 --- a/src/transformers/models/got_ocr2/modular_got_ocr2.py +++ b/src/transformers/models/got_ocr2/modular_got_ocr2.py @@ -21,7 +21,6 @@ from ...cache_utils import Cache from ...configuration_utils import PretrainedConfig -from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_utils import PreTrainedModel from ...processing_utils import Unpack from ...utils import auto_docstring, can_return_tuple, logging @@ -42,11 +41,6 @@ SamVisionLayer, ) -from ...cache_utils import Cache -from ...configuration_utils import PretrainedConfig -from ...processing_utils import Unpack -from ...utils import auto_docstring, can_return_tuple, logging -from ..auto import CONFIG_MAPPING, AutoConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/models/idefics/modeling_idefics.py b/src/transformers/models/idefics/modeling_idefics.py index 3da2259e0865..26f7f2fe90be 100644 --- a/src/transformers/models/idefics/modeling_idefics.py +++ b/src/transformers/models/idefics/modeling_idefics.py @@ -30,23 +30,21 @@ from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache from ...generation import GenerationMixin -from ...modeling_attn_mask_utils import AttentionMaskConverter -from ...modeling_flash_attention_utils import FlashAttentionKwargs +from ...masking_utils import create_causal_mask from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ModelOutput from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PretrainedConfig, PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, is_torch_flex_attn_available, logging from ...utils.deprecation import deprecate_kwarg +from ...utils.generic import OutputRecorder, check_model_inputs from .configuration_idefics import IdeficsConfig from .perceiver import IdeficsPerceiverResampler from .vision import IdeficsVisionEmbeddings, IdeficsVisionTransformer if is_torch_flex_attn_available(): - from torch.nn.attention.flex_attention import BlockMask - - from ...integrations.flex_attention import make_flex_block_causal_mask + pass logger = logging.get_logger(__name__) @@ -582,10 +580,8 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[tuple[torch.Tensor]] = None, - output_attentions: bool = False, - use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, - **kwargs, + **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: # if key_value_states are provided this layer is used as a cross-attention layer is_cross_attention = self.is_cross_attention or key_value_states is not None @@ -624,13 +620,7 @@ def forward( attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": - if self.config._attn_implementation == "sdpa" and output_attentions: - logger.warning_once( - "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " - 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' - ) - else: - attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] + attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, @@ -646,9 +636,6 @@ def forward( attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() attn_output = self.o_proj(attn_output) - if output_attentions: - attn_weights = None - return attn_output, attn_weights @@ -674,43 +661,26 @@ def __init__(self, config: IdeficsConfig, layer_idx: Optional[int] = None): self.dropout = config.dropout @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") + @auto_docstring def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[tuple[torch.Tensor]] = None, - output_attentions: Optional[bool] = False, - use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, - **kwargs, + **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: - """ - Args: - hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` - attention_mask (`torch.FloatTensor`, *optional*): attention mask of size - `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - use_cache (`bool`, *optional*): - If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding - (see `past_key_values`). - past_key_values (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states - """ - residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention - hidden_states, self_attn_weights = self.self_attn( + hidden_states, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, - output_attentions=output_attentions, - use_cache=use_cache, cache_position=cache_position, **kwargs, ) @@ -724,12 +694,7 @@ def forward( hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states - outputs = (hidden_states,) - - if output_attentions: - outputs += (self_attn_weights,) - - return outputs + return hidden_states class IdeficsGatedCrossAttentionLayer(GradientCheckpointingLayer): @@ -800,6 +765,7 @@ def __init__(self, config: IdeficsConfig, layer_idx: Optional[int] = None): raise ValueError("Alpha parameters not initialized correctly!") @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") + @auto_docstring def forward( self, hidden_states: torch.Tensor, @@ -807,28 +773,9 @@ def forward( image_hidden_states: Optional[torch.Tensor] = None, image_attention_mask: Optional[torch.Tensor] = None, cross_attention_gate: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = False, - use_cache: Optional[bool] = False, past_key_values: Optional[tuple[torch.Tensor]] = None, - **kwargs, + **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: - """ - Args: - hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` - attention_mask (`torch.FloatTensor`, *optional*): attention mask of size - `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. - image_attention_mask (`torch.FloatTensor`, *optional*): image attention mask of size - `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. - cross_attention_gate (`torch.FloatTensor`, *optional*): - gate of size `(batch, seq_len)` used to zero-out cross-attention output for tokens attending no images. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - use_cache (`bool`, *optional*): - If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding - (see `past_key_values`). - past_key_values (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states - """ if image_hidden_states is None: raise ValueError( "`image_hidden_states` is required for Idefics cross attention module which are visual features to be" @@ -848,11 +795,9 @@ def forward( hidden_states = self.input_layernorm(hidden_states) # Self Attention - hidden_states, self_attn_weights = self.cross_attn( + hidden_states, _ = self.cross_attn( hidden_states=hidden_states, key_value_states=image_hidden_states, - attention_mask=image_attention_mask, - output_attentions=output_attentions, **kwargs, ) hidden_states = nn.functional.dropout(hidden_states, p=self.config, training=self.training) @@ -867,12 +812,7 @@ def forward( hidden_states = nn.functional.dropout(hidden_states, p=self.config, training=self.training) hidden_states = residual + self.act_dense(self.alpha_dense) * hidden_states - outputs = (hidden_states,) - - if output_attentions: - outputs += (self_attn_weights,) - - return outputs + return hidden_states @auto_docstring @@ -887,6 +827,11 @@ class IdeficsPreTrainedModel(PreTrainedModel): _can_compile_fullgraph = False # IDEFICS cannot compile due to dynamic control flow when checking inputs _supports_attention_backend = True + _can_record_outputs = { + "hidden_states": IdeficsDecoderLayer, + "attentions": OutputRecorder(IdeficsAttention, index=1, layer_name="self_attn"), + } + def _init_weights(self, module): # important: this ported version of Idefics isn't meant for training from scratch - only # inference and fine-tuning - so the proper init weights code has been removed - the m4 code @@ -997,7 +942,7 @@ def freeze_text_layers(self, module_exceptions=[]): def freeze_vision_layers(self, module_exceptions=[]): freeze_model(self.vision_model, module_exceptions=module_exceptions) - @can_return_tuple + @check_model_inputs @auto_docstring def forward( self, @@ -1011,12 +956,9 @@ def forward( perceiver_embeddings: Optional[torch.FloatTensor] = None, image_attention_mask: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = False, - return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, - **kwargs: Unpack[FlashAttentionKwargs], + **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, IdeficsBaseModelOutputWithPast]: r""" image_encoder_embeddings (`torch.FloatTensor`, *optional*): @@ -1028,30 +970,12 @@ def forward( """ device = input_ids.device if input_ids is not None else inputs_embeds.device - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - use_cache = use_cache if use_cache is not None else self.config.use_cache - - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") - if self.gradient_checkpointing and self.training and use_cache: - logger.warning_once( - "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." - ) - use_cache = False - if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) - # TODO (joao): remove this exception in v4.56 -- it exists for users that try to pass a legacy cache - if not isinstance(past_key_values, (type(None), Cache)): - raise ValueError("The `past_key_values` should be either a `Cache` object or `None`.") - if use_cache and past_key_values is None: past_key_values = DynamicCache() @@ -1137,192 +1061,48 @@ def forward( (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device ) - attention_mask = self._update_causal_mask( - attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions + attention_mask = create_causal_mask( + config=self.config, + input_embeds=inputs_embeds, + attention_mask=attention_mask, + cache_position=cache_position, + past_key_values=past_key_values, ) hidden_states = inputs_embeds - # decoder layers - all_hidden_states = () if output_hidden_states else None - all_self_attns = () if output_attentions else None - for idx, decoder_layer in enumerate(self.layers): - if output_hidden_states: - all_hidden_states += (hidden_states,) - # TODO(ls): Add cross attention values to respective lists if idx % self.cross_layer_interval == 0: cross_attn_block = self.gated_cross_attn_layers[idx // self.cross_layer_interval] - outputs = cross_attn_block( + hidden_states = cross_attn_block( hidden_states, attention_mask, image_hidden_states, image_attention_mask=image_attention_mask, cross_attention_gate=cross_attention_gate, - output_attentions=output_attentions, - use_cache=use_cache, past_key_values=None, # not implemented **kwargs, ) - hidden_states = outputs[0] - layer_outputs = decoder_layer( + hidden_states = decoder_layer( hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, - output_attentions=output_attentions, - use_cache=use_cache, cache_position=cache_position, **kwargs, ) - hidden_states = layer_outputs[0] - - if output_attentions: - all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) - - # add hidden states from the last decoder layer - if output_hidden_states: - all_hidden_states += (hidden_states,) - image_hidden_states = image_hidden_states.view(batch_size, num_images, image_seq_len, image_hidden_size) return IdeficsBaseModelOutputWithPast( last_hidden_state=hidden_states, - past_key_values=past_key_values, - hidden_states=all_hidden_states, - attentions=all_self_attns, image_hidden_states=image_hidden_states, + past_key_values=past_key_values, ) - # Copied from transformers.models.gptj.modeling_gptj.GPTJModel._update_causal_mask - def _update_causal_mask( - self, - attention_mask: Union[torch.Tensor, "BlockMask"], - input_tensor: torch.Tensor, - cache_position: torch.Tensor, - past_key_values: Cache, - output_attentions: bool = False, - ): - if self.config._attn_implementation == "flash_attention_2": - if attention_mask is not None and (attention_mask == 0.0).any(): - return attention_mask - return None - if self.config._attn_implementation == "flex_attention": - if isinstance(attention_mask, torch.Tensor): - attention_mask = make_flex_block_causal_mask(attention_mask) - return attention_mask - - # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in - # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail - # to infer the attention mask. - past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 - using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False - - # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward - if self.config._attn_implementation == "sdpa" and not using_compilable_cache and not output_attentions: - if AttentionMaskConverter._ignore_causal_mask_sdpa( - attention_mask, - inputs_embeds=input_tensor, - past_key_values_length=past_seen_tokens, - is_training=self.training, - ): - return None - - dtype = input_tensor.dtype - sequence_length = input_tensor.shape[1] - if using_compilable_cache: - target_length = past_key_values.get_max_cache_shape() - else: - target_length = ( - attention_mask.shape[-1] - if isinstance(attention_mask, torch.Tensor) - else past_seen_tokens + sequence_length + 1 - ) - - # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). - causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( - attention_mask, - sequence_length=sequence_length, - target_length=target_length, - dtype=dtype, - cache_position=cache_position, - batch_size=input_tensor.shape[0], - ) - - if ( - self.config._attn_implementation == "sdpa" - and attention_mask is not None - and attention_mask.device.type in ["cuda", "xpu", "npu"] - and not output_attentions - ): - # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when - # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. - # Details: https://github.com/pytorch/pytorch/issues/110213 - min_dtype = torch.finfo(dtype).min - causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) - - return causal_mask - - @staticmethod - # Copied from transformers.models.gptj.modeling_gptj.GPTJModel._prepare_4d_causal_attention_mask_with_cache_position - def _prepare_4d_causal_attention_mask_with_cache_position( - attention_mask: torch.Tensor, - sequence_length: int, - target_length: int, - dtype: torch.dtype, - cache_position: torch.Tensor, - batch_size: int, - **kwargs, - ): - """ - Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape - `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. - - Args: - attention_mask (`torch.Tensor`): - A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape - `(batch_size, 1, query_length, key_value_length)`. - sequence_length (`int`): - The sequence length being processed. - target_length (`int`): - The target length: when generating with static cache, the mask should be as long as the static cache, - to account for the 0 padding, the part of the cache that is not filled yet. - dtype (`torch.dtype`): - The dtype to use for the 4D attention mask. - cache_position (`torch.Tensor`): - Indices depicting the position of the input sequence tokens in the sequence. - batch_size (`torch.Tensor`): - Batch size. - """ - if attention_mask is not None and attention_mask.dim() == 4: - # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. - causal_mask = attention_mask - else: - min_dtype = torch.finfo(dtype).min - causal_mask = torch.full( - (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device - ) - if sequence_length != 1: - causal_mask = torch.triu(causal_mask, diagonal=1) - causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) - causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) - if attention_mask is not None: - causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit - mask_length = attention_mask.shape[-1] - padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to( - causal_mask.device - ) - padding_mask = padding_mask == 0 - causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( - padding_mask, min_dtype - ) - - return causal_mask - class IdeficsForVisionText2Text(IdeficsPreTrainedModel, GenerationMixin): _tied_weights_keys = ["model.embed_tokens.weight", "lm_head.weight"] @@ -1378,10 +1158,7 @@ def forward( image_attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = False, - return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, IdeficsCausalLMOutputWithPast]: @@ -1422,13 +1199,6 @@ def forward( >>> generate_ids = model.generate(**inputs, max_new_tokens=6) >>> processor.batch_decode(generate_ids, skip_special_tokens=True) ```""" - - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, @@ -1441,8 +1211,6 @@ def forward( perceiver_embeddings=perceiver_embeddings, image_attention_mask=image_attention_mask, use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True, cache_position=cache_position, diff --git a/src/transformers/models/idefics2/modeling_idefics2.py b/src/transformers/models/idefics2/modeling_idefics2.py index 00b8b18cd3d1..65577551a68b 100644 --- a/src/transformers/models/idefics2/modeling_idefics2.py +++ b/src/transformers/models/idefics2/modeling_idefics2.py @@ -32,6 +32,7 @@ from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging from ...utils.deprecation import deprecate_kwarg +from ...utils.generic import check_model_inputs from ..auto import AutoModel from .configuration_idefics2 import Idefics2Config, Idefics2PerceiverConfig, Idefics2VisionConfig @@ -335,30 +336,21 @@ def __init__(self, config: Idefics2VisionConfig): self.mlp = Idefics2VisionMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) + @auto_docstring # Copied from transformers.models.siglip.modeling_siglip.SiglipEncoderLayer.forward def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, - output_attentions: Optional[bool] = False, + **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.FloatTensor]: - """ - Args: - hidden_states (`torch.FloatTensor`): - Input to the layer of shape `(batch, seq_len, embed_dim)`. - attention_mask (`torch.FloatTensor`): - Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values. - output_attentions (`bool`, *optional*, defaults to `False`): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) - hidden_states, attn_weights = self.self_attn( + hidden_states, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, - output_attentions=output_attentions, + **kwargs, ) hidden_states = residual + hidden_states @@ -367,12 +359,7 @@ def forward( hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states - outputs = (hidden_states,) - - if output_attentions: - outputs += (attn_weights,) - - return outputs + return hidden_states # Copied from transformers.models.siglip.modeling_siglip.SiglipEncoder with Siglip->Idefics2 @@ -392,68 +379,23 @@ def __init__(self, config: Idefics2Config): self.gradient_checkpointing = False # Ignore copy + @can_return_tuple + @auto_docstring def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[tuple, BaseModelOutput]: - r""" - Args: - inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): - Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. - This is useful if you want more control over how to convert `input_ids` indices into associated vectors - than the model's internal embedding lookup matrix. - attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors - for more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. - """ - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - encoder_states = () if output_hidden_states else None - all_attentions = () if output_attentions else None - + **kwargs: Unpack[TransformersKwargs], + ) -> BaseModelOutput: hidden_states = inputs_embeds for encoder_layer in self.layers: - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - layer_outputs = encoder_layer( + hidden_states = encoder_layer( hidden_states, attention_mask, - output_attentions=output_attentions, + **kwargs, ) - hidden_states = layer_outputs[0] - - if output_attentions: - all_attentions = all_attentions + (layer_outputs[1],) - - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - - if not return_dict: - return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) - return BaseModelOutput( - last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions - ) + return BaseModelOutput(last_hidden_state=hidden_states) @auto_docstring @@ -503,6 +445,10 @@ class Idefics2VisionTransformer(Idefics2PreTrainedModel): _supports_sdpa = True _supports_flash_attn = True _supports_flex_attn = True + _can_record_outputs = { + "hidden_states": Idefics2EncoderLayer, + "attentions": Idefics2VisionAttention, + } def __init__(self, config: Idefics2VisionConfig): super().__init__(config) @@ -520,25 +466,17 @@ def get_input_embeddings(self): def set_input_embeddings(self, value): self.embeddings = value + @check_model_inputs @auto_docstring def forward( self, pixel_values, patch_attention_mask: Optional[torch.BoolTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: r""" patch_attention_mask (`torch.BoolTensor` of shape `(batch_size, num_patches_height, num_patches_width)`, *optional*): The attention mask for the patches. """ - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - batch_size = pixel_values.size(0) if patch_attention_mask is None: patch_size = self.config.patch_size @@ -565,22 +503,11 @@ def forward( encoder_outputs = self.encoder( inputs_embeds=hidden_states, attention_mask=patch_attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] - last_hidden_state = self.post_layernorm(last_hidden_state) - - if not return_dict: - return (last_hidden_state,) + encoder_outputs[1:] - return BaseModelOutput( - last_hidden_state=last_hidden_state, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, - ) + return BaseModelOutput(last_hidden_state=last_hidden_state) # Copied from transformers.models.llama.modeling_llama.repeat_kv diff --git a/src/transformers/models/idefics3/modeling_idefics3.py b/src/transformers/models/idefics3/modeling_idefics3.py index 3eafc992540c..3858d1cb9f27 100644 --- a/src/transformers/models/idefics3/modeling_idefics3.py +++ b/src/transformers/models/idefics3/modeling_idefics3.py @@ -31,6 +31,7 @@ from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging +from ...utils.generic import check_model_inputs from ..auto import AutoModel from .configuration_idefics3 import Idefics3Config, Idefics3VisionConfig @@ -294,30 +295,21 @@ def __init__(self, config: Idefics3VisionConfig): self.mlp = Idefics3VisionMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) + @auto_docstring # Copied from transformers.models.siglip.modeling_siglip.SiglipEncoderLayer.forward def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, - output_attentions: Optional[bool] = False, + **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.FloatTensor]: - """ - Args: - hidden_states (`torch.FloatTensor`): - Input to the layer of shape `(batch, seq_len, embed_dim)`. - attention_mask (`torch.FloatTensor`): - Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values. - output_attentions (`bool`, *optional*, defaults to `False`): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) - hidden_states, attn_weights = self.self_attn( + hidden_states, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, - output_attentions=output_attentions, + **kwargs, ) hidden_states = residual + hidden_states @@ -326,12 +318,7 @@ def forward( hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states - outputs = (hidden_states,) - - if output_attentions: - outputs += (attn_weights,) - - return outputs + return hidden_states # Copied from transformers.models.siglip.modeling_siglip.SiglipEncoder with Siglip->Idefics3 @@ -351,68 +338,22 @@ def __init__(self, config: Idefics3Config): self.gradient_checkpointing = False # Ignore copy + @auto_docstring def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: - r""" - Args: - inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): - Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. - This is useful if you want more control over how to convert `input_ids` indices into associated vectors - than the model's internal embedding lookup matrix. - attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors - for more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. - """ - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - encoder_states = () if output_hidden_states else None - all_attentions = () if output_attentions else None - hidden_states = inputs_embeds for encoder_layer in self.layers: - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) layer_outputs = encoder_layer( hidden_states, attention_mask, - output_attentions=output_attentions, ) - hidden_states = layer_outputs[0] - - if output_attentions: - all_attentions = all_attentions + (layer_outputs[1],) + hidden_states = layer_outputs - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - - if not return_dict: - return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) - return BaseModelOutput( - last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions - ) + return BaseModelOutput(last_hidden_state=hidden_states) # Copied from transformers.models.llama.modeling_llama.repeat_kv @@ -513,6 +454,10 @@ class Idefics3VisionTransformer(Idefics3PreTrainedModel): _supports_sdpa = True _supports_flash_attn = True _supports_flex_attn = True + _can_record_outputs = { + "hidden_states": Idefics3EncoderLayer, + "attentions": Idefics3VisionAttention, + } def __init__(self, config: Idefics3VisionConfig): super().__init__(config) @@ -532,20 +477,12 @@ def get_input_embeddings(self): def set_input_embeddings(self, value): self.embeddings = value + @check_model_inputs def forward( self, pixel_values, patch_attention_mask: Optional[torch.BoolTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - batch_size = pixel_values.size(0) if patch_attention_mask is None: patch_size = self.patch_size @@ -572,21 +509,13 @@ def forward( encoder_outputs = self.encoder( inputs_embeds=hidden_states, attention_mask=patch_attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] last_hidden_state = self.post_layernorm(last_hidden_state) - if not return_dict: - return (last_hidden_state,) + encoder_outputs[1:] - return BaseModelOutput( last_hidden_state=last_hidden_state, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, ) diff --git a/src/transformers/models/instructblip/modeling_instructblip.py b/src/transformers/models/instructblip/modeling_instructblip.py index 49943140fee3..cb6436f9b99f 100644 --- a/src/transformers/models/instructblip/modeling_instructblip.py +++ b/src/transformers/models/instructblip/modeling_instructblip.py @@ -36,6 +36,7 @@ from ...processing_utils import Unpack from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int +from ...utils.generic import OutputRecorder, check_model_inputs from ..auto import AutoModel, AutoModelForCausalLM, AutoModelForSeq2SeqLM from .configuration_instructblip import InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig @@ -220,7 +221,6 @@ def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = False, **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" @@ -237,13 +237,7 @@ def forward( attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": - if self.config._attn_implementation == "sdpa" and output_attentions: - logger.warning_once( - "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " - 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' - ) - else: - attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] + attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, @@ -259,8 +253,7 @@ def forward( attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous() attn_output = self.projection(attn_output) - outputs = (attn_output, attn_weights) if output_attentions else (attn_output, None) - return outputs + return attn_output, attn_weights # Copied from transformers.models.blip.modeling_blip.BlipMLP @@ -289,29 +282,20 @@ def __init__(self, config: InstructBlipConfig): self.mlp = InstructBlipMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) + @auto_docstring def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, - output_attentions: Optional[bool] = False, + **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.FloatTensor]: - """ - Args: - hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` - attention_mask (`torch.FloatTensor`): attention mask of size - `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. - `(config.encoder_attention_heads,)`. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) - hidden_states, attn_weights = self.self_attn( + hidden_states, _ = self.self_attn( hidden_states=hidden_states, head_mask=attention_mask, - output_attentions=output_attentions, + **kwargs, ) hidden_states = hidden_states + residual residual = hidden_states @@ -320,12 +304,7 @@ def forward( hidden_states = hidden_states + residual - outputs = (hidden_states,) - - if output_attentions: - outputs += (attn_weights,) - - return outputs + return hidden_states @auto_docstring @@ -384,67 +363,22 @@ def __init__(self, config: InstructBlipConfig): self.layers = nn.ModuleList([InstructBlipEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False + @auto_docstring def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, + **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, BaseModelOutput]: - r""" - Args: - inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): - Embedded representation of the inputs. Should be float, not int tokens. - attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors - for more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. - """ - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - encoder_states = () if output_hidden_states else None - all_attentions = () if output_attentions else None - hidden_states = inputs_embeds - for idx, encoder_layer in enumerate(self.layers): - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - - layer_outputs = encoder_layer( + for encoder_layer in self.layers: + hidden_states = encoder_layer( hidden_states, attention_mask=attention_mask, - output_attentions=output_attentions, + **kwargs, ) - hidden_states = layer_outputs[0] - - if output_attentions: - all_attentions = all_attentions + (layer_outputs[1],) - - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - - if not return_dict: - return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) - return BaseModelOutput( - last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions - ) + return BaseModelOutput(last_hidden_state=hidden_states) # Copied from transformers.models.blip.modeling_blip.BlipVisionModel with Blip->InstructBlip, BLIP->INSTRUCTBLIP @@ -452,6 +386,11 @@ class InstructBlipVisionModel(InstructBlipPreTrainedModel): main_input_name = "pixel_values" config: InstructBlipVisionConfig + _can_record_outputs = { + "hidden_states": InstructBlipEncoderLayer, + "attentions": InstructBlipAttention, + } + def __init__(self, config: InstructBlipVisionConfig): super().__init__(config) self.config = config @@ -463,21 +402,14 @@ def __init__(self, config: InstructBlipVisionConfig): self.post_init() + @check_model_inputs @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, interpolate_pos_encoding: bool = False, + **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, BaseModelOutputWithPooling]: - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - if pixel_values is None: raise ValueError("You have to specify pixel_values") @@ -485,9 +417,7 @@ def forward( encoder_outputs = self.encoder( inputs_embeds=hidden_states, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, + **kwargs, ) last_hidden_state = encoder_outputs[0] @@ -496,14 +426,9 @@ def forward( pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) - if not return_dict: - return (last_hidden_state, pooled_output) + encoder_outputs[1:] - return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, ) def get_input_embeddings(self): @@ -563,7 +488,7 @@ def forward( head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, - output_attentions=False, + **kwargs: Unpack[TransformersKwargs], ): # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be @@ -629,9 +554,7 @@ def forward( new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) - outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) - - return outputs + return context_layer, attention_probs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->InstructBlipQFormer @@ -682,19 +605,18 @@ def forward( head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, - output_attentions: Optional[bool] = False, + **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor]: - self_outputs = self.attention( + attn_output, _ = self.attention( hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, - output_attentions=output_attentions, + **kwargs, ) - attention_output = self.output(self_outputs[0], hidden_states) - outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them - return outputs + attention_output = self.output(attn_output, hidden_states) + return attention_output # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->InstructBlipQFormer @@ -756,17 +678,15 @@ def forward( head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, - output_attentions=False, query_length=0, + **kwargs: Unpack[TransformersKwargs], ): - self_attention_outputs = self.attention( + attention_output = self.attention( hidden_states, attention_mask=attention_mask, head_mask=head_mask, - output_attentions=output_attentions, + **kwargs, ) - attention_output = self_attention_outputs[0] - outputs = self_attention_outputs[1:] if query_length > 0: query_attention_output = attention_output[:, :query_length, :] @@ -774,17 +694,14 @@ def forward( if self.has_cross_attention: if encoder_hidden_states is None: raise ValueError("encoder_hidden_states must be given for cross-attention layers") - cross_attention_outputs = self.crossattention( + query_attention_output = self.crossattention( query_attention_output, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, - output_attentions=output_attentions, + **kwargs, ) - query_attention_output = cross_attention_outputs[0] - # add cross attentions if we output attention weights - outputs = outputs + cross_attention_outputs[1:] layer_output = apply_chunking_to_forward( self.feed_forward_chunk_query, @@ -808,9 +725,7 @@ def forward( self.seq_len_dim, attention_output, ) - outputs = (layer_output,) + outputs - - return outputs + return layer_output def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) @@ -833,6 +748,7 @@ def __init__(self, config): ) self.gradient_checkpointing = False + @can_return_tuple def forward( self, hidden_states, @@ -840,57 +756,25 @@ def forward( head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, - output_attentions=False, - output_hidden_states=False, - return_dict=True, query_length=0, + **kwargs: Unpack[TransformersKwargs], ): - all_hidden_states = () if output_hidden_states else None - all_self_attentions = () if output_attentions else None - all_cross_attentions = () if output_attentions else None - for i in range(self.config.num_hidden_layers): layer_module = self.layer[i] - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - layer_head_mask = head_mask[i] if head_mask is not None else None - layer_outputs = layer_module( + hidden_states = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, # as a positional argument for gradient checkpointing encoder_attention_mask=encoder_attention_mask, - output_attentions=output_attentions, query_length=query_length, + **kwargs, ) - hidden_states = layer_outputs[0] - if output_attentions: - all_self_attentions = all_self_attentions + (layer_outputs[1],) - if query_length > 0 and layer_module.has_cross_attention: - all_cross_attentions = all_cross_attentions + (layer_outputs[2],) - - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - if not return_dict: - return tuple( - v - for v in [ - hidden_states, - all_hidden_states, - all_self_attentions, - all_cross_attentions, - ] - if v is not None - ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, - hidden_states=all_hidden_states, - attentions=all_self_attentions, - cross_attentions=all_cross_attentions, ) @@ -956,6 +840,16 @@ class InstructBlipQFormerModel(InstructBlipPreTrainedModel): _supports_sdpa = False _supports_flex_attn = False + _can_record_outputs = { + "hidden_states": InstructBlipQFormerLayer, + "attentions": [ + OutputRecorder(InstructBlipQFormerMultiHeadAttention, index=1, layer_name=".attention"), + ], + "cross_attentions": [ + OutputRecorder(InstructBlipQFormerMultiHeadAttention, index=1, layer_name=".crossattention"), + ], + } + def __init__(self, config: InstructBlipQFormerConfig): super().__init__(config) self.config = config @@ -1023,6 +917,8 @@ def get_extended_attention_mask( extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask + @check_model_inputs + @auto_docstring def forward( self, input_ids: torch.LongTensor, @@ -1032,35 +928,13 @@ def forward( head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, + **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple[torch.FloatTensor], BaseModelOutputWithPoolingAndCrossAttentions]: r""" - encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): - Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if - the model is configured as a decoder. - encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in - the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - past_key_values (`Cache` of length `config.n_layers` with each tuple having 4 tensors of: - shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and - value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are - used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key - value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape - `(batch_size, sequence_length)`. - use_cache (`bool`, *optional*): - If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see - `past_key_values`). + query_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Hidden states to be used in the attention computation. If cross-attention, + will be used for the query (i.e., key and value will use the encoder_hidden_states). """ - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - if input_ids is None and query_embeds is None: raise ValueError("You have to specify query_embeds when input_ids is None") @@ -1115,24 +989,15 @@ def forward( head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, query_length=query_length, + **kwargs, ) sequence_output = encoder_outputs[0] pooled_output = sequence_output[:, 0, :] - if not return_dict: - return (sequence_output, pooled_output) + encoder_outputs[1:] - return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, - past_key_values=encoder_outputs.past_key_values, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, - cross_attentions=encoder_outputs.cross_attentions, ) @@ -1222,11 +1087,7 @@ def forward( decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, interpolate_pos_encoding: bool = False, - use_cache: Optional[bool] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Union[tuple, InstructBlipForConditionalGenerationModelOutput]: r""" @@ -1245,30 +1106,19 @@ def forward( - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) - input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): - Indices of input sequence tokens in the vocabulary of the language model. Input tokens can optionally be - provided to serve as text prompt, which the language model can continue. - - Indices can be obtained using [`InstructBlipProcessor`]. See [`InstructBlipProcessor.__call__`] for - details. - - [What are input IDs?](../glossary#input-ids) decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. Only relevant in case an encoder-decoder language model (like T5) is used. """ - return_dict = return_dict if return_dict is not None else self.config.use_return_dict # step 1: forward the images through the vision encoder, # to get image embeddings of shape (batch_size, seq_len, hidden_size) vision_outputs = self.vision_model( pixel_values=pixel_values, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding, + **kwargs, ) image_embeds = vision_outputs[0] @@ -1287,9 +1137,7 @@ def forward( query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, + **kwargs, ) query_output = query_outputs[0][:, : query_tokens.size(1), :] @@ -1308,10 +1156,6 @@ def forward( outputs = self.language_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - use_cache=use_cache, **kwargs, ) else: @@ -1320,10 +1164,6 @@ def forward( attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - use_cache=use_cache, **kwargs, ) @@ -1497,12 +1337,8 @@ def forward( decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, - return_dict: Optional[bool] = None, interpolate_pos_encoding: bool = False, - use_cache: Optional[bool] = None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, InstructBlipForConditionalGenerationModelOutput]: r""" @@ -1521,14 +1357,6 @@ def forward( - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) - input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): - Indices of input sequence tokens in the vocabulary of the language model. Input tokens can optionally be - provided to serve as text prompt, which the language model can continue. - - Indices can be obtained using [`InstructBlipProcessor`]. See [`InstructBlipProcessor.__call__`] for - details. - - [What are input IDs?](../glossary#input-ids) decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. @@ -1573,7 +1401,6 @@ def forward( >>> print(generated_text) The unusual aspect of this image is that a man is ironing clothes on the back of a yellow SUV, which is parked in the middle of a busy city street. This is an unconventional approach to ironing clothes, as it requires the man to balance himself and his ironing equipment on top of the vehicle while navigating through traffic. Additionally, the presence of taxis and other vehicles in the scene further emphasizes the unusual nature of this situation. ```""" - return_dict = return_dict if return_dict is not None else self.config.use_return_dict language_model_inputs, vision_outputs, query_outputs = self.get_image_features( pixel_values, @@ -1582,8 +1409,6 @@ def forward( interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True, ) - vision_outputs = vision_outputs.to_tuple() if not return_dict else vision_outputs - query_outputs = query_outputs.to_tuple() if not return_dict else query_outputs if inputs_embeds is None: inputs_embeds = self.get_input_embeddings()(input_ids) @@ -1599,13 +1424,9 @@ def forward( outputs = self.language_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - use_cache=use_cache, **kwargs, ) - logits = outputs.logits if return_dict else outputs[0] + logits = outputs[0] loss = None if labels is not None: loss = self.loss_function( @@ -1613,20 +1434,17 @@ def forward( ) else: + kwargs["return_dict"] = True outputs = self.language_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, labels=labels, - use_cache=use_cache, **kwargs, ) - loss = outputs.loss if return_dict else outputs[0] - logits = outputs.logits if return_dict else outputs[1] + loss = outputs.loss + logits = outputs.logits return InstructBlipForConditionalGenerationModelOutput( loss=loss, diff --git a/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py b/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py index adf9a9ced698..d66d8af2e8a5 100644 --- a/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py +++ b/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py @@ -40,6 +40,7 @@ from ...processing_utils import Unpack from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int +from ...utils.generic import OutputRecorder, check_model_inputs from ..auto import AutoModel, AutoModelForCausalLM, AutoModelForSeq2SeqLM from .configuration_instructblipvideo import ( InstructBlipVideoConfig, @@ -190,7 +191,6 @@ def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = False, **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" @@ -207,13 +207,7 @@ def forward( attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": - if self.config._attn_implementation == "sdpa" and output_attentions: - logger.warning_once( - "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " - 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' - ) - else: - attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] + attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, @@ -229,8 +223,7 @@ def forward( attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous() attn_output = self.projection(attn_output) - outputs = (attn_output, attn_weights) if output_attentions else (attn_output, None) - return outputs + return attn_output, attn_weights class InstructBlipVideoMLP(nn.Module): @@ -257,29 +250,20 @@ def __init__(self, config: InstructBlipVideoConfig): self.mlp = InstructBlipVideoMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) + @auto_docstring def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, - output_attentions: Optional[bool] = False, + **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.FloatTensor]: - """ - Args: - hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` - attention_mask (`torch.FloatTensor`): attention mask of size - `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. - `(config.encoder_attention_heads,)`. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) - hidden_states, attn_weights = self.self_attn( + hidden_states, _ = self.self_attn( hidden_states=hidden_states, head_mask=attention_mask, - output_attentions=output_attentions, + **kwargs, ) hidden_states = hidden_states + residual residual = hidden_states @@ -288,12 +272,7 @@ def forward( hidden_states = hidden_states + residual - outputs = (hidden_states,) - - if output_attentions: - outputs += (attn_weights,) - - return outputs + return hidden_states class InstructBlipVideoEncoder(nn.Module): @@ -312,67 +291,22 @@ def __init__(self, config: InstructBlipVideoConfig): self.layers = nn.ModuleList([InstructBlipVideoEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False + @auto_docstring def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, + **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, BaseModelOutput]: - r""" - Args: - inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): - Embedded representation of the inputs. Should be float, not int tokens. - attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors - for more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. - """ - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - encoder_states = () if output_hidden_states else None - all_attentions = () if output_attentions else None - hidden_states = inputs_embeds - for idx, encoder_layer in enumerate(self.layers): - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - - layer_outputs = encoder_layer( + for encoder_layer in self.layers: + hidden_states = encoder_layer( hidden_states, attention_mask=attention_mask, - output_attentions=output_attentions, + **kwargs, ) - hidden_states = layer_outputs[0] - - if output_attentions: - all_attentions = all_attentions + (layer_outputs[1],) - - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - - if not return_dict: - return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) - return BaseModelOutput( - last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions - ) + return BaseModelOutput(last_hidden_state=hidden_states) class InstructBlipVideoQFormerMultiHeadAttention(nn.Module): @@ -428,7 +362,7 @@ def forward( head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, - output_attentions=False, + **kwargs: Unpack[TransformersKwargs], ): # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be @@ -494,9 +428,7 @@ def forward( new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) - outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) - - return outputs + return context_layer, attention_probs class InstructBlipVideoQFormerSelfOutput(nn.Module): @@ -545,19 +477,18 @@ def forward( head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, - output_attentions: Optional[bool] = False, + **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor]: - self_outputs = self.attention( + attn_output, _ = self.attention( hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, - output_attentions=output_attentions, + **kwargs, ) - attention_output = self.output(self_outputs[0], hidden_states) - outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them - return outputs + attention_output = self.output(attn_output, hidden_states) + return attention_output class InstructBlipVideoQFormerIntermediate(nn.Module): @@ -617,17 +548,15 @@ def forward( head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, - output_attentions=False, query_length=0, + **kwargs: Unpack[TransformersKwargs], ): - self_attention_outputs = self.attention( + attention_output = self.attention( hidden_states, attention_mask=attention_mask, head_mask=head_mask, - output_attentions=output_attentions, + **kwargs, ) - attention_output = self_attention_outputs[0] - outputs = self_attention_outputs[1:] if query_length > 0: query_attention_output = attention_output[:, :query_length, :] @@ -635,17 +564,14 @@ def forward( if self.has_cross_attention: if encoder_hidden_states is None: raise ValueError("encoder_hidden_states must be given for cross-attention layers") - cross_attention_outputs = self.crossattention( + query_attention_output = self.crossattention( query_attention_output, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, - output_attentions=output_attentions, + **kwargs, ) - query_attention_output = cross_attention_outputs[0] - # add cross attentions if we output attention weights - outputs = outputs + cross_attention_outputs[1:] layer_output = apply_chunking_to_forward( self.feed_forward_chunk_query, @@ -669,9 +595,7 @@ def forward( self.seq_len_dim, attention_output, ) - outputs = (layer_output,) + outputs - - return outputs + return layer_output def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) @@ -693,6 +617,7 @@ def __init__(self, config): ) self.gradient_checkpointing = False + @can_return_tuple def forward( self, hidden_states, @@ -700,57 +625,25 @@ def forward( head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, - output_attentions=False, - output_hidden_states=False, - return_dict=True, query_length=0, + **kwargs: Unpack[TransformersKwargs], ): - all_hidden_states = () if output_hidden_states else None - all_self_attentions = () if output_attentions else None - all_cross_attentions = () if output_attentions else None - for i in range(self.config.num_hidden_layers): layer_module = self.layer[i] - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - layer_head_mask = head_mask[i] if head_mask is not None else None - layer_outputs = layer_module( + hidden_states = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, # as a positional argument for gradient checkpointing encoder_attention_mask=encoder_attention_mask, - output_attentions=output_attentions, query_length=query_length, + **kwargs, ) - hidden_states = layer_outputs[0] - if output_attentions: - all_self_attentions = all_self_attentions + (layer_outputs[1],) - if query_length > 0 and layer_module.has_cross_attention: - all_cross_attentions = all_cross_attentions + (layer_outputs[2],) - - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - if not return_dict: - return tuple( - v - for v in [ - hidden_states, - all_hidden_states, - all_self_attentions, - all_cross_attentions, - ] - if v is not None - ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, - hidden_states=all_hidden_states, - attentions=all_self_attentions, - cross_attentions=all_cross_attentions, ) @@ -848,6 +741,11 @@ class InstructBlipVideoVisionModel(InstructBlipVideoPreTrainedModel): main_input_name = "pixel_values" config: InstructBlipVideoVisionConfig + _can_record_outputs = { + "hidden_states": InstructBlipVideoEncoderLayer, + "attentions": InstructBlipVideoAttention, + } + def __init__(self, config: InstructBlipVideoVisionConfig): super().__init__(config) self.config = config @@ -859,21 +757,14 @@ def __init__(self, config: InstructBlipVideoVisionConfig): self.post_init() + @check_model_inputs @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, interpolate_pos_encoding: bool = False, + **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, BaseModelOutputWithPooling]: - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - if pixel_values is None: raise ValueError("You have to specify pixel_values") @@ -881,9 +772,7 @@ def forward( encoder_outputs = self.encoder( inputs_embeds=hidden_states, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, + **kwargs, ) last_hidden_state = encoder_outputs[0] @@ -892,14 +781,9 @@ def forward( pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) - if not return_dict: - return (last_hidden_state, pooled_output) + encoder_outputs[1:] - return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, ) def get_input_embeddings(self): @@ -917,6 +801,16 @@ class InstructBlipVideoQFormerModel(InstructBlipVideoPreTrainedModel): _supports_sdpa = False _supports_flex_attn = False + _can_record_outputs = { + "hidden_states": InstructBlipVideoQFormerLayer, + "attentions": [ + OutputRecorder(InstructBlipVideoQFormerMultiHeadAttention, index=1, layer_name=".attention"), + ], + "cross_attentions": [ + OutputRecorder(InstructBlipVideoQFormerMultiHeadAttention, index=1, layer_name=".crossattention"), + ], + } + def __init__(self, config: InstructBlipVideoQFormerConfig): super().__init__(config) self.config = config @@ -984,6 +878,8 @@ def get_extended_attention_mask( extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask + @check_model_inputs + @auto_docstring def forward( self, input_ids: torch.LongTensor, @@ -993,35 +889,12 @@ def forward( head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, + **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple[torch.FloatTensor], BaseModelOutputWithPoolingAndCrossAttentions]: r""" - encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): - Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if - the model is configured as a decoder. - encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in - the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - past_key_values (`Cache` of length `config.n_layers` with each tuple having 4 tensors of: - shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and - value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are - used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key - value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape - `(batch_size, sequence_length)`. - use_cache (`bool`, *optional*): - If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see - `past_key_values`). + query_embeds (): + """ - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - if input_ids is None and query_embeds is None: raise ValueError("You have to specify query_embeds when input_ids is None") @@ -1076,24 +949,15 @@ def forward( head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, query_length=query_length, + **kwargs, ) sequence_output = encoder_outputs[0] pooled_output = sequence_output[:, 0, :] - if not return_dict: - return (sequence_output, pooled_output) + encoder_outputs[1:] - return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, - past_key_values=encoder_outputs.past_key_values, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, - cross_attentions=encoder_outputs.cross_attentions, ) @@ -1241,14 +1105,6 @@ def forward( - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) - input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): - Indices of input sequence tokens in the vocabulary of the language model. Input tokens can optionally be - provided to serve as text prompt, which the language model can continue. - - Indices can be obtained using [`InstructBlipVideoProcessor`]. See [`InstructBlipVideoProcessor.__call__`] for - details. - - [What are input IDs?](../glossary#input-ids) decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. diff --git a/src/transformers/models/internvl/modeling_internvl.py b/src/transformers/models/internvl/modeling_internvl.py index e4f98ec6e664..f545d70fc9fd 100644 --- a/src/transformers/models/internvl/modeling_internvl.py +++ b/src/transformers/models/internvl/modeling_internvl.py @@ -481,11 +481,6 @@ class InternVLPreTrainedModel(PreTrainedModel): _supports_flex_attn = True _supports_attention_backend = True - _can_record_outputs = { - "hidden_states": "DecoderLayer", - "attentions": "Attention", - } - class InternVLMultiModalProjector(nn.Module): def __init__(self, config: InternVLConfig): diff --git a/src/transformers/models/janus/modeling_janus.py b/src/transformers/models/janus/modeling_janus.py index 8ee43bd29184..3da38dde14ba 100644 --- a/src/transformers/models/janus/modeling_janus.py +++ b/src/transformers/models/janus/modeling_janus.py @@ -42,6 +42,7 @@ logging, torch_int, ) +from ...utils.generic import check_model_inputs from ..auto import AutoModel from .configuration_janus import JanusConfig, JanusVisionConfig, JanusVQVAEConfig @@ -373,29 +374,20 @@ def __init__(self, config: JanusVisionConfig): self.mlp = JanusVisionMLP(config) self.config = config + @auto_docstring def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, - output_attentions: Optional[bool] = False, + **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.FloatTensor]: - """ - Args: - hidden_states (`torch.FloatTensor`): - Input to the layer of shape `(batch, seq_len, embed_dim)`. - attention_mask (`torch.FloatTensor`): - Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values. - output_attentions (`bool`, *optional*, defaults to `False`): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) - hidden_states, attn_weights = self.self_attn( + hidden_states, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, - output_attentions=output_attentions, + **kwargs, ) hidden_states = residual + hidden_states @@ -404,12 +396,7 @@ def forward( hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states - outputs = (hidden_states,) - - if output_attentions: - outputs += (attn_weights,) - - return outputs + return hidden_states class JanusVisionEncoder(nn.Module): @@ -428,74 +415,157 @@ def __init__(self, config: JanusVisionConfig): self.gradient_checkpointing = False # Ignore copy - @can_return_tuple + @auto_docstring def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, + **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutput: - r""" - Args: - inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): - Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. - This is useful if you want more control over how to convert `input_ids` indices into associated vectors - than the model's internal embedding lookup matrix. - attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors - for more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. - """ - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - - encoder_states = () if output_hidden_states else None - all_attentions = () if output_attentions else None - hidden_states = inputs_embeds for encoder_layer in self.layers: - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - - layer_outputs = encoder_layer( + hidden_states = encoder_layer( hidden_states, attention_mask, - output_attentions=output_attentions, + **kwargs, ) - hidden_states = layer_outputs[0] + return BaseModelOutput(last_hidden_state=hidden_states) - if output_attentions: - all_attentions = all_attentions + (layer_outputs[1],) - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) +class JanusAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config): + super().__init__() + self.config = config + self.embed_dim = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.embed_dim // self.num_heads + if self.head_dim * self.num_heads != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" + f" {self.num_heads})." + ) + self.scale = self.head_dim**-0.5 + self.is_causal = False + self.attention_dropout = config.attention_dropout + + # small tweak here compared to CLIP, no bias here + self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=False) + + if config.qkv_bias: + q_bias = nn.Parameter(torch.zeros(self.embed_dim)) + v_bias = nn.Parameter(torch.zeros(self.embed_dim)) + else: + q_bias = None + v_bias = None + + if q_bias is not None: + qkv_bias = torch.cat((q_bias, torch.zeros_like(v_bias, requires_grad=False), v_bias)) + self.qkv.bias = nn.Parameter(qkv_bias) + + self.projection = nn.Linear(self.embed_dim, self.embed_dim) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + head_mask: Optional[torch.Tensor] = None, + **kwargs, + ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: + """Input shape: Batch x Time x Channel""" + + bsz, tgt_len, embed_dim = hidden_states.size() + + mixed_qkv = self.qkv(hidden_states) + + mixed_qkv = mixed_qkv.reshape(bsz, tgt_len, 3, self.num_heads, embed_dim // self.num_heads).permute( + 2, 0, 3, 1, 4 + ) + query_states, key_states, value_states = mixed_qkv[0], mixed_qkv[1], mixed_qkv[2] + + attention_interface: Callable = eager_attention_forward + + if self.config._attn_implementation != "eager": + attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] + + attn_output, attn_weights = attention_interface( + self, + query_states, + key_states, + value_states, + attention_mask=None, + dropout=0.0 if not self.training else self.attention_dropout, + scaling=self.scale, + **kwargs, + ) + + attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous() + attn_output = self.projection(attn_output) + + return attn_output, attn_weights + + +class JanusMLP(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.activation_fn = ACT2FN[config.hidden_act] + self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) + self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.fc1(hidden_states) + hidden_states = self.activation_fn(hidden_states) + hidden_states = self.fc2(hidden_states) + return hidden_states + + +class JanusEncoderLayer(GradientCheckpointingLayer): + def __init__(self, config: JanusConfig): + super().__init__() + self.embed_dim = config.hidden_size + self.self_attn = JanusAttention(config) + self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) + self.mlp = JanusMLP(config) + self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) + + @auto_docstring + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: torch.Tensor, + **kwargs: Unpack[TransformersKwargs], + ) -> tuple[torch.FloatTensor]: + residual = hidden_states - return BaseModelOutput( - last_hidden_state=hidden_states, - hidden_states=encoder_states, - attentions=all_attentions, + hidden_states = self.layer_norm1(hidden_states) + hidden_states, _ = self.self_attn( + hidden_states=hidden_states, + head_mask=attention_mask, + **kwargs, ) + hidden_states = hidden_states + residual + residual = hidden_states + hidden_states = self.layer_norm2(hidden_states) + hidden_states = self.mlp(hidden_states) + + hidden_states = hidden_states + residual + + return hidden_states @auto_docstring class JanusVisionModel(JanusPreTrainedModel): main_input_name = "pixel_values" config: JanusVisionConfig + _can_record_outputs = { + "hidden_states": JanusEncoderLayer, + "attentions": JanusAttention, + } def __init__(self, config: JanusVisionConfig): super().__init__(config) @@ -508,21 +578,14 @@ def __init__(self, config: JanusVisionConfig): self.post_init() + @check_model_inputs @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, interpolate_pos_encoding: bool = False, + **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, BaseModelOutputWithPooling]: - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - if pixel_values is None: raise ValueError("You have to specify pixel_values") @@ -530,9 +593,7 @@ def forward( encoder_outputs = self.encoder( inputs_embeds=hidden_states, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, + **kwargs, ) last_hidden_state = encoder_outputs[0] @@ -541,14 +602,9 @@ def forward( pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) - if not return_dict: - return (last_hidden_state, pooled_output) + encoder_outputs[1:] - return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, ) def get_input_embeddings(self): diff --git a/src/transformers/models/llava/modeling_llava.py b/src/transformers/models/llava/modeling_llava.py index a95fce412c1a..b3628f2a1642 100644 --- a/src/transformers/models/llava/modeling_llava.py +++ b/src/transformers/models/llava/modeling_llava.py @@ -124,11 +124,6 @@ class LlavaPreTrainedModel(PreTrainedModel): _supports_flex_attn = True _supports_attention_backend = True - _can_record_outputs = { - "hidden_states": "DecoderLayer", - "attentions": "Attention", - } - @auto_docstring( custom_intro=""" diff --git a/src/transformers/models/mistral3/modeling_mistral3.py b/src/transformers/models/mistral3/modeling_mistral3.py index 4b04a88f56bc..975654915aab 100644 --- a/src/transformers/models/mistral3/modeling_mistral3.py +++ b/src/transformers/models/mistral3/modeling_mistral3.py @@ -189,11 +189,6 @@ class Mistral3PreTrainedModel(PreTrainedModel): _supports_flex_attn = True _supports_attention_backend = True - _can_record_outputs = { - "hidden_states": "DecoderLayer", - "attentions": "Attention", - } - @auto_docstring( custom_intro=""" diff --git a/src/transformers/models/ovis2/modeling_ovis2.py b/src/transformers/models/ovis2/modeling_ovis2.py index b415bc810a8c..185065f34327 100644 --- a/src/transformers/models/ovis2/modeling_ovis2.py +++ b/src/transformers/models/ovis2/modeling_ovis2.py @@ -32,7 +32,9 @@ from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPast from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel -from ...utils import ModelOutput, auto_docstring, can_return_tuple +from ...processing_utils import Unpack +from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple +from ...utils.generic import check_model_inputs from ..auto import AutoModel from .configuration_ovis2 import Ovis2Config, Ovis2VisionConfig @@ -333,17 +335,19 @@ def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = False, + **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor, torch.Tensor]: norm_hidden_states = self.rms_norm1(hidden_states) - attn_output, attn_weights = self.attention(hidden_states=norm_hidden_states, attention_mask=attention_mask) + attn_output, attn_weights = self.attention( + hidden_states=norm_hidden_states, attention_mask=attention_mask, **kwargs + ) hidden_states = hidden_states + attn_output norm_hidden_states = self.rms_norm2(hidden_states) mlp_output = self.ffn(norm_hidden_states) hidden_states = hidden_states + mlp_output - return (hidden_states, attn_weights) if output_attentions else (hidden_states, None) + return hidden_states class Ovis2VisionEncoder(nn.Module): @@ -363,67 +367,18 @@ def __init__(self, config: Ovis2VisionConfig): # Ignore copy @can_return_tuple + @auto_docstring def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, + **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutput: - r""" - Args: - inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): - Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. - This is useful if you want more control over how to convert `input_ids` indices into associated vectors - than the model's internal embedding lookup matrix. - attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors - for more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. - """ - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - - encoder_states = () if output_hidden_states else None - all_attentions = () if output_attentions else None - hidden_states = inputs_embeds for encoder_layer in self.layers: - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) + hidden_states = encoder_layer(hidden_states, attention_mask, **kwargs) - layer_outputs = encoder_layer( - hidden_states, - attention_mask, - output_attentions=output_attentions, - ) - - hidden_states = layer_outputs[0] - - if output_attentions: - all_attentions = all_attentions + (layer_outputs[1],) - - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - - return BaseModelOutput( - last_hidden_state=hidden_states, - hidden_states=encoder_states, - attentions=all_attentions, - ) + return BaseModelOutput(last_hidden_state=hidden_states) class Ovis2VisionTransformer(nn.Module): @@ -440,32 +395,20 @@ def forward( self, pixel_values, attention_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, + **kwargs, ): - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - hidden_states = self.embeddings(pixel_values) encoder_outputs = self.encoder( inputs_embeds=hidden_states, attention_mask=attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=True, + **kwargs, ) last_hidden_state = encoder_outputs[0] last_hidden_state = self.rms_norm(last_hidden_state) - return BaseModelOutput( - last_hidden_state=last_hidden_state, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, - ) + return BaseModelOutput(last_hidden_state=last_hidden_state) class Ovis2VisualEmbeddingTable(nn.Embedding): @@ -502,6 +445,10 @@ def hard_softmax(logits: torch.Tensor, dim: int): class Ovis2VisionModel(Ovis2PreTrainedModel): config: Ovis2VisionConfig + _can_record_outputs = { + "hidden_states": Ovis2VisionEncoderLayer, + "attentions": Ovis2VisionAttention, + } def __init__(self, config: Ovis2VisionConfig): super().__init__(config) @@ -516,10 +463,10 @@ def __init__(self, config: Ovis2VisionConfig): ) self.head_norm = nn.LayerNorm(self.vocab_size - self.num_visual_indicator_tokens) - def forward(self, pixel_values: torch.FloatTensor) -> tuple[torch.Tensor, torch.Tensor]: - outputs = self.transformer(pixel_values) + @check_model_inputs + def forward(self, pixel_values: torch.FloatTensor, **kwargs) -> tuple[torch.Tensor, torch.Tensor]: + outputs = self.transformer(pixel_values, **kwargs) last_hidden_state = outputs.last_hidden_state - if self.config.hidden_stride > 1: num_images, seq_len, hidden_dim = last_hidden_state.shape hidden_stride = self.config.hidden_stride diff --git a/src/transformers/models/ovis2/modular_ovis2.py b/src/transformers/models/ovis2/modular_ovis2.py index fee26273d1ed..0539d4a93bed 100644 --- a/src/transformers/models/ovis2/modular_ovis2.py +++ b/src/transformers/models/ovis2/modular_ovis2.py @@ -22,7 +22,9 @@ from ...generation import GenerationMixin from ...modeling_outputs import BaseModelOutput from ...modeling_utils import PreTrainedModel -from ...utils import auto_docstring, can_return_tuple +from ...processing_utils import Unpack +from ...utils import TransformersKwargs, auto_docstring, can_return_tuple +from ...utils.generic import check_model_inputs from ..aimv2.modeling_aimv2 import Aimv2Attention, Aimv2EncoderLayer from ..auto import AutoModel from ..llama.modeling_llama import LlamaMLP, LlamaRMSNorm @@ -90,6 +92,20 @@ def __init__(self, config: Ovis2VisionConfig): super().__init__() self.layers = nn.ModuleList([Ovis2VisionEncoderLayer(config) for _ in range(config.num_hidden_layers)]) + @can_return_tuple + @auto_docstring + def forward( + self, + inputs_embeds, + attention_mask: Optional[torch.Tensor] = None, + **kwargs: Unpack[TransformersKwargs], + ) -> BaseModelOutput: + hidden_states = inputs_embeds + for encoder_layer in self.layers: + hidden_states = encoder_layer(hidden_states, attention_mask, **kwargs) + + return BaseModelOutput(last_hidden_state=hidden_states) + class Ovis2VisionTransformer(nn.Module): def __init__(self, config: Ovis2VisionConfig): @@ -105,32 +121,20 @@ def forward( self, pixel_values, attention_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, + **kwargs, ): - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - hidden_states = self.embeddings(pixel_values) encoder_outputs = self.encoder( inputs_embeds=hidden_states, attention_mask=attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=True, + **kwargs, ) last_hidden_state = encoder_outputs[0] last_hidden_state = self.rms_norm(last_hidden_state) - return BaseModelOutput( - last_hidden_state=last_hidden_state, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, - ) + return BaseModelOutput(last_hidden_state=last_hidden_state) class Ovis2VisualEmbeddingTable(nn.Embedding): @@ -157,6 +161,10 @@ class Ovis2PreTrainedModel(PreTrainedModel): class Ovis2VisionModel(Ovis2PreTrainedModel): config: Ovis2VisionConfig + _can_record_outputs = { + "hidden_states": Ovis2VisionEncoderLayer, + "attentions": Ovis2VisionAttention, + } def __init__(self, config: Ovis2VisionConfig): super().__init__(config) @@ -171,10 +179,10 @@ def __init__(self, config: Ovis2VisionConfig): ) self.head_norm = nn.LayerNorm(self.vocab_size - self.num_visual_indicator_tokens) - def forward(self, pixel_values: torch.FloatTensor) -> tuple[torch.Tensor, torch.Tensor]: - outputs = self.transformer(pixel_values) + @check_model_inputs + def forward(self, pixel_values: torch.FloatTensor, **kwargs) -> tuple[torch.Tensor, torch.Tensor]: + outputs = self.transformer(pixel_values, **kwargs) last_hidden_state = outputs.last_hidden_state - if self.config.hidden_stride > 1: num_images, seq_len, hidden_dim = last_hidden_state.shape hidden_stride = self.config.hidden_stride diff --git a/src/transformers/models/perception_lm/modeling_perception_lm.py b/src/transformers/models/perception_lm/modeling_perception_lm.py index b3f40d778ae3..4210cd73e545 100644 --- a/src/transformers/models/perception_lm/modeling_perception_lm.py +++ b/src/transformers/models/perception_lm/modeling_perception_lm.py @@ -99,11 +99,6 @@ class PerceptionLMPreTrainedModel(PreTrainedModel): _supports_flex_attn = True _supports_attention_backend = True - _can_record_outputs = { - "hidden_states": "DecoderLayer", - "attentions": "Attention", - } - @dataclass @auto_docstring( diff --git a/src/transformers/models/phi4_multimodal/modeling_phi4_multimodal.py b/src/transformers/models/phi4_multimodal/modeling_phi4_multimodal.py index 494f1fb038bd..54fd2b2e3371 100644 --- a/src/transformers/models/phi4_multimodal/modeling_phi4_multimodal.py +++ b/src/transformers/models/phi4_multimodal/modeling_phi4_multimodal.py @@ -109,7 +109,7 @@ def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, - **kwargs, + **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor]]: """Input shape: Batch x Time x Channel""" input_shape = hidden_states.shape[:-1] @@ -148,29 +148,20 @@ def __init__(self, config: Phi4MultimodalVisionConfig): self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = Phi4MultimodalVisionMLP(config) + @auto_docstring def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, - output_attentions: Optional[bool] = False, + **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.FloatTensor]: - """ - Args: - hidden_states (`torch.FloatTensor`): - Input to the layer of shape `(batch, seq_len, embed_dim)`. - attention_mask (`torch.FloatTensor`): - Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values. - output_attentions (`bool`, *optional*, defaults to `False`): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) - hidden_states, attn_weights = self.self_attn( + hidden_states, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, - output_attentions=output_attentions, + **kwargs, ) hidden_states = residual + hidden_states @@ -179,12 +170,7 @@ def forward( hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states - outputs = (hidden_states,) - - if output_attentions: - outputs += (attn_weights,) - - return outputs + return hidden_states class Phi4MultimodalVisionEncoder(nn.Module): @@ -205,68 +191,22 @@ def __init__(self, config: Phi4MultimodalVisionConfig): self.gradient_checkpointing = False # Ignore copy - @can_return_tuple + @auto_docstring def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, + **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutput: - r""" - Args: - inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): - Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. - This is useful if you want more control over how to convert `input_ids` indices into associated vectors - than the model's internal embedding lookup matrix. - attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors - for more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. - """ - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - - encoder_states = () if output_hidden_states else None - all_attentions = () if output_attentions else None - hidden_states = inputs_embeds for encoder_layer in self.layers: - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - - layer_outputs = encoder_layer( + hidden_states = encoder_layer( hidden_states, attention_mask, - output_attentions=output_attentions, + **kwargs, ) - hidden_states = layer_outputs[0] - - if output_attentions: - all_attentions = all_attentions + (layer_outputs[1],) - - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - - return BaseModelOutput( - last_hidden_state=hidden_states, - hidden_states=encoder_states, - attentions=all_attentions, - ) + return BaseModelOutput(last_hidden_state=hidden_states) def _trunc_normal_(tensor, mean, std, a, b): @@ -376,6 +316,11 @@ class Phi4MultimodalVisionPreTrainedModel(PreTrainedModel): _supports_flex_attn = True _supports_attention_backend = True + _can_record_outputs = { + "hidden_states": Phi4MultimodalVisionEncoderLayer, + "attentions": Phi4MultimodalVisionAttention, + } + def _init_weights(self, module): """Initialize the weights""" if isinstance(module, Phi4MultimodalVisionEmbeddings): @@ -543,18 +488,13 @@ def __init__(self, config: Phi4MultimodalVisionConfig): def get_input_embeddings(self) -> nn.Module: return self.embeddings.patch_embedding + @check_model_inputs def forward( self, pixel_values, patch_attention_mask: Optional[torch.BoolTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, + **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPooling: - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - batch_size = pixel_values.size(0) if patch_attention_mask is None: patch_attention_mask = torch.ones( @@ -585,8 +525,7 @@ def forward( encoder_outputs: BaseModelOutput = self.encoder( inputs_embeds=hidden_states, attention_mask=attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, + **kwargs, ) last_hidden_state = encoder_outputs.last_hidden_state @@ -600,8 +539,6 @@ def forward( return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, ) diff --git a/src/transformers/models/phi4_multimodal/modular_phi4_multimodal.py b/src/transformers/models/phi4_multimodal/modular_phi4_multimodal.py index c6bdc0cc6c34..404fdff1168a 100644 --- a/src/transformers/models/phi4_multimodal/modular_phi4_multimodal.py +++ b/src/transformers/models/phi4_multimodal/modular_phi4_multimodal.py @@ -489,7 +489,7 @@ def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, - **kwargs, + **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor]]: """Input shape: Batch x Time x Channel""" input_shape = hidden_states.shape[:-1] @@ -544,6 +544,11 @@ class Phi4MultimodalVisionPreTrainedModel(SiglipPreTrainedModel): _supports_sdpa = True _supports_flex_attn = True + _can_record_outputs = { + "hidden_states": Phi4MultimodalVisionEncoderLayer, + "attentions": Phi4MultimodalVisionAttention, + } + def _init_weights(self, module): """Initialize the weights""" if isinstance(module, Phi4MultimodalVisionEmbeddings): @@ -667,18 +672,13 @@ def __init__(self, config: Phi4MultimodalVisionConfig): def get_input_embeddings(self) -> nn.Module: return self.embeddings.patch_embedding + @check_model_inputs def forward( self, pixel_values, patch_attention_mask: Optional[torch.BoolTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, + **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPooling: - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - batch_size = pixel_values.size(0) if patch_attention_mask is None: patch_attention_mask = torch.ones( @@ -709,8 +709,7 @@ def forward( encoder_outputs: BaseModelOutput = self.encoder( inputs_embeds=hidden_states, attention_mask=attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, + **kwargs, ) last_hidden_state = encoder_outputs.last_hidden_state @@ -724,8 +723,6 @@ def forward( return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, ) diff --git a/src/transformers/models/siglip/modeling_siglip.py b/src/transformers/models/siglip/modeling_siglip.py index 74e5d7fd5a6f..dfb9daf81262 100644 --- a/src/transformers/models/siglip/modeling_siglip.py +++ b/src/transformers/models/siglip/modeling_siglip.py @@ -30,7 +30,9 @@ from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel -from ...utils import ModelOutput, auto_docstring, can_return_tuple, torch_int +from ...processing_utils import Unpack +from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, torch_int +from ...utils.generic import check_model_inputs from .configuration_siglip import SiglipConfig, SiglipTextConfig, SiglipVisionConfig @@ -428,29 +430,20 @@ def __init__(self, config: Union[SiglipVisionConfig, SiglipTextConfig]): self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = SiglipMLP(config) + @auto_docstring def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, - output_attentions: Optional[bool] = False, + **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.FloatTensor]: - """ - Args: - hidden_states (`torch.FloatTensor`): - Input to the layer of shape `(batch, seq_len, embed_dim)`. - attention_mask (`torch.FloatTensor`): - Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values. - output_attentions (`bool`, *optional*, defaults to `False`): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) - hidden_states, attn_weights = self.self_attn( + hidden_states, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, - output_attentions=output_attentions, + **kwargs, ) hidden_states = residual + hidden_states @@ -459,12 +452,7 @@ def forward( hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states - outputs = (hidden_states,) - - if output_attentions: - outputs += (attn_weights,) - - return outputs + return hidden_states @auto_docstring @@ -484,6 +472,11 @@ class SiglipPreTrainedModel(PreTrainedModel): _supports_flex_attn = True _supports_attention_backend = True + _can_record_outputs = { + "hidden_states": SiglipEncoderLayer, + "attentions": SiglipAttention, + } + def _init_weights(self, module): """Initialize the weights""" if isinstance(module, SiglipVisionEmbeddings): @@ -548,68 +541,22 @@ def __init__(self, config: SiglipConfig): self.gradient_checkpointing = False # Ignore copy - @can_return_tuple + @auto_docstring def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, + **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutput: - r""" - Args: - inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): - Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. - This is useful if you want more control over how to convert `input_ids` indices into associated vectors - than the model's internal embedding lookup matrix. - attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors - for more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. - """ - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - - encoder_states = () if output_hidden_states else None - all_attentions = () if output_attentions else None - hidden_states = inputs_embeds for encoder_layer in self.layers: - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - - layer_outputs = encoder_layer( + hidden_states = encoder_layer( hidden_states, attention_mask, - output_attentions=output_attentions, + **kwargs, ) - hidden_states = layer_outputs[0] - - if output_attentions: - all_attentions = all_attentions + (layer_outputs[1],) - - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - - return BaseModelOutput( - last_hidden_state=hidden_states, - hidden_states=encoder_states, - attentions=all_attentions, - ) + return BaseModelOutput(last_hidden_state=hidden_states) class SiglipTextTransformer(nn.Module): @@ -630,14 +577,8 @@ def forward( input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, + **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPooling: - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - if input_ids is None: raise ValueError("You have to specify input_ids") @@ -658,11 +599,10 @@ def forward( encoder_outputs: BaseModelOutput = self.encoder( inputs_embeds=hidden_states, attention_mask=attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, + **kwargs, ) - last_hidden_state = encoder_outputs.last_hidden_state + last_hidden_state = encoder_outputs[0] last_hidden_state = self.final_layer_norm(last_hidden_state) # The model uses the last token's hidden state, which may be padding. @@ -672,8 +612,6 @@ def forward( return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, ) @@ -704,8 +642,7 @@ def forward( input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, + **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPooling: r""" Examples: @@ -728,8 +665,7 @@ def forward( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, + **kwargs, ) @@ -751,24 +687,17 @@ def __init__(self, config: SiglipVisionConfig): def forward( self, pixel_values, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = False, + **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPooling: - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) encoder_outputs: BaseModelOutput = self.encoder( inputs_embeds=hidden_states, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, + **kwargs, ) - last_hidden_state = encoder_outputs.last_hidden_state + last_hidden_state = encoder_outputs[0] last_hidden_state = self.post_layernorm(last_hidden_state) pooler_output = self.head(last_hidden_state) if self.use_head else None @@ -776,8 +705,6 @@ def forward( return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooler_output, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, ) @@ -825,14 +752,13 @@ def __init__(self, config: SiglipVisionConfig): def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding - @can_return_tuple + @check_model_inputs @auto_docstring def forward( self, pixel_values, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, + **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPooling: r""" Examples: @@ -857,9 +783,8 @@ def forward( return self.vision_model( pixel_values=pixel_values, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, + **kwargs, ) @@ -905,8 +830,7 @@ def get_text_features( input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, + **kwargs: Unpack[TransformersKwargs], ) -> torch.FloatTensor: r""" Returns: @@ -927,18 +851,11 @@ def get_text_features( >>> with torch.no_grad(): ... text_features = model.get_text_features(**inputs) ```""" - # Use SigLIP model's config for some fields (if specified) instead of those of vision & text components. - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - text_outputs: BaseModelOutputWithPooling = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, + **kwargs, ) pooled_output = text_outputs.pooler_output @@ -949,9 +866,8 @@ def get_text_features( def get_image_features( self, pixel_values: Optional[torch.FloatTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, + **kwargs: Unpack[TransformersKwargs], ) -> torch.FloatTensor: r""" Returns: @@ -977,23 +893,17 @@ def get_image_features( >>> with torch.no_grad(): ... image_features = model.get_image_features(**inputs) ```""" - # Use SiglipModel's config for some fields (if specified) instead of those of vision & text components. - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - vision_outputs: BaseModelOutputWithPooling = self.vision_model( pixel_values=pixel_values, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, + **kwargs, ) pooled_output = vision_outputs.pooler_output return pooled_output + # NOTE: SiglipModel uses Pretrained backbones, so we don't need to add `check_model_inputs` here @can_return_tuple @auto_docstring def forward( @@ -1003,9 +913,8 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, return_loss: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, + **kwargs: Unpack[TransformersKwargs], ) -> SiglipOutput: r""" return_loss (`bool`, *optional*): @@ -1037,25 +946,17 @@ def forward( >>> print(f"{probs[0][0]:.1%} that image 0 is '{texts[0]}'") 31.9% that image 0 is 'a photo of 2 cats' ```""" - # Use SigLIP model's config for some fields (if specified) instead of those of vision & text components. - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - vision_outputs: BaseModelOutputWithPooling = self.vision_model( pixel_values=pixel_values, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, + **kwargs, ) text_outputs: BaseModelOutputWithPooling = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, + **kwargs, ) image_embeds = vision_outputs.pooler_output @@ -1120,15 +1021,15 @@ def __init__(self, config: SiglipConfig) -> None: # Initialize weights and apply final processing self.post_init() + # NOTE: SiglipModel uses Pretrained backbones, so we don't need to add `check_model_inputs` here @can_return_tuple @auto_docstring def forward( self, pixel_values: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, + **kwargs: Unpack[TransformersKwargs], ) -> ImageClassifierOutput: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): @@ -1161,16 +1062,10 @@ def forward( >>> print("Predicted class:", model.config.id2label[predicted_class_idx]) Predicted class: LABEL_1 ```""" - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - outputs: BaseModelOutputWithPooling = self.vision_model( pixel_values, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, + **kwargs, ) sequence_output = outputs.last_hidden_state @@ -1208,8 +1103,6 @@ def forward( return ImageClassifierOutput( loss=loss, logits=logits, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, ) diff --git a/src/transformers/models/siglip2/modeling_siglip2.py b/src/transformers/models/siglip2/modeling_siglip2.py index 40c03dc5980f..1e992f5533a0 100644 --- a/src/transformers/models/siglip2/modeling_siglip2.py +++ b/src/transformers/models/siglip2/modeling_siglip2.py @@ -35,7 +35,9 @@ from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel -from ...utils import ModelOutput, auto_docstring, can_return_tuple +from ...processing_utils import Unpack +from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple +from ...utils.generic import check_model_inputs from .configuration_siglip2 import Siglip2Config, Siglip2TextConfig, Siglip2VisionConfig @@ -322,29 +324,20 @@ def __init__(self, config: Union[Siglip2VisionConfig, Siglip2TextConfig]): self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = Siglip2MLP(config) + @auto_docstring def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, - output_attentions: Optional[bool] = False, + **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.FloatTensor]: - """ - Args: - hidden_states (`torch.FloatTensor`): - Input to the layer of shape `(batch, seq_len, embed_dim)`. - attention_mask (`torch.FloatTensor`): - Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values. - output_attentions (`bool`, *optional*, defaults to `False`): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) - hidden_states, attn_weights = self.self_attn( + hidden_states, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, - output_attentions=output_attentions, + **kwargs, ) hidden_states = residual + hidden_states @@ -353,12 +346,7 @@ def forward( hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states - outputs = (hidden_states,) - - if output_attentions: - outputs += (attn_weights,) - - return outputs + return hidden_states class Siglip2Encoder(nn.Module): @@ -377,68 +365,22 @@ def __init__(self, config: Siglip2Config): self.gradient_checkpointing = False # Ignore copy - @can_return_tuple + @auto_docstring def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, + **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutput: - r""" - Args: - inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): - Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. - This is useful if you want more control over how to convert `input_ids` indices into associated vectors - than the model's internal embedding lookup matrix. - attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors - for more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. - """ - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - - encoder_states = () if output_hidden_states else None - all_attentions = () if output_attentions else None - hidden_states = inputs_embeds for encoder_layer in self.layers: - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - - layer_outputs = encoder_layer( + hidden_states = encoder_layer( hidden_states, attention_mask, - output_attentions=output_attentions, + **kwargs, ) - hidden_states = layer_outputs[0] - - if output_attentions: - all_attentions = all_attentions + (layer_outputs[1],) - - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - - return BaseModelOutput( - last_hidden_state=hidden_states, - hidden_states=encoder_states, - attentions=all_attentions, - ) + return BaseModelOutput(last_hidden_state=hidden_states) class Siglip2VisionTransformer(nn.Module): @@ -655,14 +597,8 @@ def forward( input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, + **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPooling: - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - if input_ids is None: raise ValueError("You have to specify input_ids") @@ -683,11 +619,10 @@ def forward( encoder_outputs: BaseModelOutput = self.encoder( inputs_embeds=hidden_states, attention_mask=attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, + **kwargs, ) - last_hidden_state = encoder_outputs.last_hidden_state + last_hidden_state = encoder_outputs[0] last_hidden_state = self.final_layer_norm(last_hidden_state) # The model uses the last token's hidden state, which may be padding. @@ -697,8 +632,6 @@ def forward( return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, ) @@ -719,6 +652,11 @@ class Siglip2PreTrainedModel(PreTrainedModel): _supports_flex_attn = True _supports_attention_backend = True + _can_record_outputs = { + "hidden_states": Siglip2EncoderLayer, + "attentions": Siglip2Attention, + } + def _init_weights(self, module): """Initialize the weights""" if isinstance(module, Siglip2VisionEmbeddings): @@ -793,8 +731,7 @@ def forward( input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, + **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPooling: r""" Examples: @@ -817,8 +754,7 @@ def forward( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, + **kwargs, ) @@ -873,7 +809,7 @@ def __init__(self, config: Siglip2VisionConfig): def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding - @can_return_tuple + @check_model_inputs @auto_docstring def forward( self, @@ -959,8 +895,7 @@ def get_text_features( input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, + **kwargs: Unpack[TransformersKwargs], ) -> torch.FloatTensor: r""" Returns: @@ -981,18 +916,11 @@ def get_text_features( >>> with torch.no_grad(): ... text_features = model.get_text_features(**inputs) ```""" - # Use Siglip2 model's config for some fields (if specified) instead of those of vision & text components. - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - text_outputs: BaseModelOutputWithPooling = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, + **kwargs, ) pooled_output = text_outputs.pooler_output @@ -1056,6 +984,7 @@ def get_image_features( return pooled_output + # NOTE: Siglip2Model uses Pretrained backbones, so we don't need to add `check_model_inputs` here @can_return_tuple @auto_docstring def forward( @@ -1189,6 +1118,7 @@ def __init__(self, config: Siglip2Config) -> None: # Initialize weights and apply final processing self.post_init() + # NOTE: Siglip2Model uses Pretrained backbones, so we don't need to add `check_model_inputs` here @can_return_tuple @auto_docstring def forward( diff --git a/src/transformers/models/smolvlm/modeling_smolvlm.py b/src/transformers/models/smolvlm/modeling_smolvlm.py index 5722dd018b57..3c57939c52b4 100644 --- a/src/transformers/models/smolvlm/modeling_smolvlm.py +++ b/src/transformers/models/smolvlm/modeling_smolvlm.py @@ -40,6 +40,7 @@ can_return_tuple, logging, ) +from ...utils.generic import check_model_inputs from ..auto import AutoModel from .configuration_smolvlm import SmolVLMConfig, SmolVLMVisionConfig @@ -273,29 +274,20 @@ def __init__(self, config: SmolVLMVisionConfig): self.mlp = SmolVLMVisionMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) + @auto_docstring def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, - output_attentions: Optional[bool] = False, + **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.FloatTensor]: - """ - Args: - hidden_states (`torch.FloatTensor`): - Input to the layer of shape `(batch, seq_len, embed_dim)`. - attention_mask (`torch.FloatTensor`): - Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values. - output_attentions (`bool`, *optional*, defaults to `False`): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) - hidden_states, attn_weights = self.self_attn( + hidden_states, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, - output_attentions=output_attentions, + **kwargs, ) hidden_states = residual + hidden_states @@ -304,12 +296,7 @@ def forward( hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states - outputs = (hidden_states,) - - if output_attentions: - outputs += (attn_weights,) - - return outputs + return hidden_states class SmolVLMEncoder(nn.Module): @@ -328,68 +315,22 @@ def __init__(self, config: SmolVLMConfig): self.gradient_checkpointing = False # Ignore copy + @auto_docstring def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: - r""" - Args: - inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): - Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. - This is useful if you want more control over how to convert `input_ids` indices into associated vectors - than the model's internal embedding lookup matrix. - attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors - for more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. - """ - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - encoder_states = () if output_hidden_states else None - all_attentions = () if output_attentions else None - hidden_states = inputs_embeds for encoder_layer in self.layers: - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) layer_outputs = encoder_layer( hidden_states, attention_mask, - output_attentions=output_attentions, ) - hidden_states = layer_outputs[0] - - if output_attentions: - all_attentions = all_attentions + (layer_outputs[1],) + hidden_states = layer_outputs - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - - if not return_dict: - return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) - return BaseModelOutput( - last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions - ) + return BaseModelOutput(last_hidden_state=hidden_states) @auto_docstring( @@ -402,6 +343,10 @@ class SmolVLMVisionTransformer(SmolVLMPreTrainedModel): _supports_sdpa = True _supports_flash_attn = True _supports_flex_attn = True + _can_record_outputs = { + "hidden_states": SmolVLMEncoderLayer, + "attentions": SmolVLMVisionAttention, + } def __init__(self, config: SmolVLMVisionConfig): super().__init__(config) @@ -419,20 +364,12 @@ def get_input_embeddings(self): def set_input_embeddings(self, value): self.embeddings = value + @check_model_inputs def forward( self, pixel_values, patch_attention_mask: Optional[torch.BoolTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - batch_size = pixel_values.size(0) if patch_attention_mask is None: patch_size = self.patch_size @@ -459,21 +396,13 @@ def forward( encoder_outputs = self.encoder( inputs_embeds=hidden_states, attention_mask=patch_attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] last_hidden_state = self.post_layernorm(last_hidden_state) - if not return_dict: - return (last_hidden_state,) + encoder_outputs[1:] - return BaseModelOutput( last_hidden_state=last_hidden_state, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, ) diff --git a/src/transformers/models/vipllava/modeling_vipllava.py b/src/transformers/models/vipllava/modeling_vipllava.py index b65d5a46f78b..41c3553988d2 100644 --- a/src/transformers/models/vipllava/modeling_vipllava.py +++ b/src/transformers/models/vipllava/modeling_vipllava.py @@ -126,11 +126,6 @@ class VipLlavaPreTrainedModel(PreTrainedModel): _supports_flex_attn = True _supports_attention_backend = True - _can_record_outputs = { - "hidden_states": "DecoderLayer", - "attentions": "Attention", - } - @auto_docstring( custom_intro=""" diff --git a/src/transformers/utils/generic.py b/src/transformers/utils/generic.py index 890df968bd0a..af4d5c392cd7 100644 --- a/src/transformers/utils/generic.py +++ b/src/transformers/utils/generic.py @@ -1007,6 +1007,11 @@ def wrapper(self, *args, **kwargs): ) for k in capture_flags } + + # We let cross attentions to be saved separately because some models add `cross-attn` layer + # when certain condtions are met. Let's output cross attention if attentions are requested (for BC) + recordable_keys["output_cross_attentions"] = recordable_keys["output_attentions"] + collected_outputs = defaultdict(tuple) monkey_patched_layers = [] diff --git a/tests/models/blip_2/test_modeling_blip_2.py b/tests/models/blip_2/test_modeling_blip_2.py index 7947b617941b..adc63c536b28 100644 --- a/tests/models/blip_2/test_modeling_blip_2.py +++ b/tests/models/blip_2/test_modeling_blip_2.py @@ -1132,6 +1132,7 @@ def prepare_config_and_inputs_for_common(self): def create_and_check_model(self, config, input_ids, attention_mask): model = Blip2TextModelWithProjection(config=config) + model.set_attn_implementation("eager") model.to(torch_device) model.eval() with torch.no_grad(): @@ -1289,6 +1290,7 @@ def prepare_config_and_inputs_for_common(self): def create_and_check_model(self, config, pixel_values): model = Blip2VisionModelWithProjection(config=config) model.to(torch_device) + model.set_attn_implementation("eager") model.eval() with torch.no_grad(): result = model(pixel_values, output_attentions=True, output_hidden_states=True) diff --git a/tests/models/idefics/test_modeling_idefics.py b/tests/models/idefics/test_modeling_idefics.py index 454b38975cdd..407f1dd6b99b 100644 --- a/tests/models/idefics/test_modeling_idefics.py +++ b/tests/models/idefics/test_modeling_idefics.py @@ -538,8 +538,8 @@ def test_attention_outputs(self): with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions - # IDEFICS does not support outputting attention score because it uses SDPA under the hood - self.assertTrue(attentions[0] is None) + self.assertFalse(attentions[0] is None) + self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) out_len = len(outputs) # Check attention is always last and order is fine @@ -556,8 +556,7 @@ def test_attention_outputs(self): self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) - # IDEFICS does not support outputting attention score because it uses SDPA under the hood - self.assertTrue(self_attentions[0] is None) + self.assertFalse(self_attentions[0] is None) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): From c6ee4594cf70c003e7b0080f27c2e1446b1a8b82 Mon Sep 17 00:00:00 2001 From: raushan Date: Thu, 28 Aug 2025 17:38:36 +0200 Subject: [PATCH 07/19] fix some tests --- .../models/idefics/modeling_idefics.py | 10 ++++++++++ .../modeling_instructblipvideo.py | 5 +++-- .../models/janus/image_processing_janus.py | 7 +------ src/transformers/models/ovis2/modeling_ovis2.py | 8 +------- .../models/siglip/modeling_siglip.py | 5 ++--- .../models/siglip2/modeling_siglip2.py | 17 ++++------------- .../models/tvp/image_processing_tvp_fast.py | 3 --- src/transformers/utils/generic.py | 6 ++++-- 8 files changed, 25 insertions(+), 36 deletions(-) diff --git a/src/transformers/models/idefics/modeling_idefics.py b/src/transformers/models/idefics/modeling_idefics.py index 26f7f2fe90be..8a10f6a2e5ec 100644 --- a/src/transformers/models/idefics/modeling_idefics.py +++ b/src/transformers/models/idefics/modeling_idefics.py @@ -776,6 +776,14 @@ def forward( past_key_values: Optional[tuple[torch.Tensor]] = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: + r""" + image_hidden_states (): + + image_attention_mask (): + + cross_attention_gate (): + + """ if image_hidden_states is None: raise ValueError( "`image_hidden_states` is required for Idefics cross attention module which are visual features to be" @@ -1067,7 +1075,9 @@ def forward( attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, + position_ids=position_ids, ) + print(self.config._attn_implementation, attention_mask[0, 0, :32, :32]) hidden_states = inputs_embeds diff --git a/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py b/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py index d66d8af2e8a5..ef1b8b5b9a9b 100644 --- a/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py +++ b/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py @@ -892,8 +892,9 @@ def forward( **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple[torch.FloatTensor], BaseModelOutputWithPoolingAndCrossAttentions]: r""" - query_embeds (): - + query_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Hidden states to be used in the attention computation. If cross-attention, + will be used for the query (i.e., key and value will use the encoder_hidden_states). """ if input_ids is None and query_embeds is None: raise ValueError("You have to specify query_embeds when input_ids is None") diff --git a/src/transformers/models/janus/image_processing_janus.py b/src/transformers/models/janus/image_processing_janus.py index ac2012c62b04..0c4056f5ba00 100644 --- a/src/transformers/models/janus/image_processing_janus.py +++ b/src/transformers/models/janus/image_processing_janus.py @@ -41,12 +41,7 @@ valid_images, validate_preprocess_arguments, ) -from ...utils import ( - TensorType, - filter_out_non_signature_kwargs, - is_vision_available, - logging, -) +from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging if is_vision_available(): diff --git a/src/transformers/models/ovis2/modeling_ovis2.py b/src/transformers/models/ovis2/modeling_ovis2.py index 185065f34327..888733e05b1c 100644 --- a/src/transformers/models/ovis2/modeling_ovis2.py +++ b/src/transformers/models/ovis2/modeling_ovis2.py @@ -34,7 +34,6 @@ from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple -from ...utils.generic import check_model_inputs from ..auto import AutoModel from .configuration_ovis2 import Ovis2Config, Ovis2VisionConfig @@ -445,10 +444,6 @@ def hard_softmax(logits: torch.Tensor, dim: int): class Ovis2VisionModel(Ovis2PreTrainedModel): config: Ovis2VisionConfig - _can_record_outputs = { - "hidden_states": Ovis2VisionEncoderLayer, - "attentions": Ovis2VisionAttention, - } def __init__(self, config: Ovis2VisionConfig): super().__init__(config) @@ -463,10 +458,9 @@ def __init__(self, config: Ovis2VisionConfig): ) self.head_norm = nn.LayerNorm(self.vocab_size - self.num_visual_indicator_tokens) - @check_model_inputs def forward(self, pixel_values: torch.FloatTensor, **kwargs) -> tuple[torch.Tensor, torch.Tensor]: outputs = self.transformer(pixel_values, **kwargs) - last_hidden_state = outputs.last_hidden_state + last_hidden_state = outputs[0] if self.config.hidden_stride > 1: num_images, seq_len, hidden_dim = last_hidden_state.shape hidden_stride = self.config.hidden_stride diff --git a/src/transformers/models/siglip/modeling_siglip.py b/src/transformers/models/siglip/modeling_siglip.py index dfb9daf81262..cbb44845666d 100644 --- a/src/transformers/models/siglip/modeling_siglip.py +++ b/src/transformers/models/siglip/modeling_siglip.py @@ -635,7 +635,7 @@ def get_input_embeddings(self) -> nn.Module: def set_input_embeddings(self, value): self.text_model.embeddings.token_embedding = value - @can_return_tuple + @check_model_inputs @auto_docstring def forward( self, @@ -1021,8 +1021,7 @@ def __init__(self, config: SiglipConfig) -> None: # Initialize weights and apply final processing self.post_init() - # NOTE: SiglipModel uses Pretrained backbones, so we don't need to add `check_model_inputs` here - @can_return_tuple + @check_model_inputs @auto_docstring def forward( self, diff --git a/src/transformers/models/siglip2/modeling_siglip2.py b/src/transformers/models/siglip2/modeling_siglip2.py index 1e992f5533a0..4ba582c27fda 100644 --- a/src/transformers/models/siglip2/modeling_siglip2.py +++ b/src/transformers/models/siglip2/modeling_siglip2.py @@ -724,7 +724,7 @@ def get_input_embeddings(self) -> nn.Module: def set_input_embeddings(self, value): self.text_model.embeddings.token_embedding = value - @can_return_tuple + @check_model_inputs @auto_docstring def forward( self, @@ -1118,8 +1118,7 @@ def __init__(self, config: Siglip2Config) -> None: # Initialize weights and apply final processing self.post_init() - # NOTE: Siglip2Model uses Pretrained backbones, so we don't need to add `check_model_inputs` here - @can_return_tuple + @check_model_inputs @auto_docstring def forward( self, @@ -1127,8 +1126,7 @@ def forward( pixel_attention_mask: Optional[torch.Tensor] = None, spatial_shapes: Optional[torch.LongTensor] = None, labels: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, + **kwargs: Unpack[TransformersKwargs], ) -> ImageClassifierOutput: r""" pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*): @@ -1166,17 +1164,12 @@ def forward( Predicted class: LABEL_1 ``` """ - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) outputs: BaseModelOutputWithPooling = self.vision_model( pixel_values, attention_mask=pixel_attention_mask, spatial_shapes=spatial_shapes, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, + **kwargs, ) sequence_output = outputs.last_hidden_state @@ -1219,8 +1212,6 @@ def forward( return ImageClassifierOutput( loss=loss, logits=logits, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, ) diff --git a/src/transformers/models/tvp/image_processing_tvp_fast.py b/src/transformers/models/tvp/image_processing_tvp_fast.py index 3eec7e2d9c3d..90c39ea49fb0 100644 --- a/src/transformers/models/tvp/image_processing_tvp_fast.py +++ b/src/transformers/models/tvp/image_processing_tvp_fast.py @@ -101,9 +101,6 @@ def preprocess( videos: Union[ImageInput, list[ImageInput], list[list[ImageInput]]], **kwargs: Unpack[TvpFastImageProcessorKwargs], ) -> BatchFeature: - """ - Preprocess videos using the fast image processor. - """ return super().preprocess(videos, **kwargs) def _further_process_kwargs( diff --git a/src/transformers/utils/generic.py b/src/transformers/utils/generic.py index af4d5c392cd7..6f0464a0a290 100644 --- a/src/transformers/utils/generic.py +++ b/src/transformers/utils/generic.py @@ -1010,7 +1010,8 @@ def wrapper(self, *args, **kwargs): # We let cross attentions to be saved separately because some models add `cross-attn` layer # when certain condtions are met. Let's output cross attention if attentions are requested (for BC) - recordable_keys["output_cross_attentions"] = recordable_keys["output_attentions"] + if "output_attentions" in recordable_keys: + recordable_keys["output_cross_attentions"] = recordable_keys["output_attentions"] collected_outputs = defaultdict(tuple) monkey_patched_layers = [] @@ -1074,10 +1075,11 @@ def wrapped_forward(*args, **kwargs): # Inject collected outputs into model output for key in collected_outputs: if key == "hidden_states": - collected_outputs[key] = collected_outputs[key][:-1] if hasattr(outputs, "vision_hidden_states"): + collected_outputs[key] = collected_outputs[key][:-1] collected_outputs[key] += (outputs.vision_hidden_states,) elif hasattr(outputs, "last_hidden_state"): + collected_outputs[key] = collected_outputs[key][:-1] collected_outputs[key] += (outputs.last_hidden_state,) outputs[key] = collected_outputs[key] From 9a3d9bd954ddd644e050294d391959ee8289c743 Mon Sep 17 00:00:00 2001 From: raushan Date: Thu, 28 Aug 2025 17:43:34 +0200 Subject: [PATCH 08/19] fix copies --- .../modeling_instructblipvideo.py | 184 +++++++++--------- .../models/siglip2/modeling_siglip2.py | 150 +++++++------- 2 files changed, 171 insertions(+), 163 deletions(-) diff --git a/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py b/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py index ef1b8b5b9a9b..71625453a026 100644 --- a/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py +++ b/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py @@ -126,6 +126,45 @@ def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: boo return embeddings +@auto_docstring +class InstructBlipVideoPreTrainedModel(PreTrainedModel): + config: InstructBlipVideoConfig + base_model_prefix = "blip" + supports_gradient_checkpointing = True + _supports_attention_backend = True + _supports_flash_attn = True + _supports_sdpa = True + _supports_flex_attn = True + + _can_compile_fullgraph = True + + _no_split_modules = [ + "InstructBlipVideoQFormerEmbeddings", + "InstructBlipVideoAttention", + "InstructBlipVideoQFormerMultiHeadAttention", + "InstructBlipVideoQFormerSelfOutput", + ] + + def _init_weights(self, module): + """Initialize the weights""" + factor = self.config.initializer_range + + if isinstance(module, (nn.Linear, nn.Conv2d)): + module.weight.data.normal_(mean=0.0, std=factor) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=factor) + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + elif isinstance(module, InstructBlipVideoVisionEmbeddings): + nn.init.trunc_normal_(module.position_embedding, mean=0.0, std=factor) + nn.init.trunc_normal_(module.class_embedding, mean=0.0, std=factor) + elif isinstance(module, (InstructBlipVideoForConditionalGeneration, InstructBlipVideoModel)): + module.query_tokens.data.zero_() + + # Adapted from transformers.models.siglip.modeling_siglip.eager_attention_forward -> InstructBlipVideo doesn't cast attn weights to fp32 def eager_attention_forward( module: nn.Module, @@ -309,6 +348,59 @@ def forward( return BaseModelOutput(last_hidden_state=hidden_states) +class InstructBlipVideoVisionModel(InstructBlipVideoPreTrainedModel): + main_input_name = "pixel_values" + config: InstructBlipVideoVisionConfig + + _can_record_outputs = { + "hidden_states": InstructBlipVideoEncoderLayer, + "attentions": InstructBlipVideoAttention, + } + + def __init__(self, config: InstructBlipVideoVisionConfig): + super().__init__(config) + self.config = config + embed_dim = config.hidden_size + + self.embeddings = InstructBlipVideoVisionEmbeddings(config) + self.encoder = InstructBlipVideoEncoder(config) + self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) + + self.post_init() + + @check_model_inputs + @auto_docstring + def forward( + self, + pixel_values: Optional[torch.FloatTensor] = None, + interpolate_pos_encoding: bool = False, + **kwargs: Unpack[TransformersKwargs], + ) -> Union[tuple, BaseModelOutputWithPooling]: + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) + + encoder_outputs = self.encoder( + inputs_embeds=hidden_states, + **kwargs, + ) + + last_hidden_state = encoder_outputs[0] + last_hidden_state = self.post_layernorm(last_hidden_state) + + pooled_output = last_hidden_state[:, 0, :] + pooled_output = self.post_layernorm(pooled_output) + + return BaseModelOutputWithPooling( + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + ) + + def get_input_embeddings(self): + return self.embeddings + + class InstructBlipVideoQFormerMultiHeadAttention(nn.Module): def __init__(self, config, is_cross_attention=False): super().__init__() @@ -698,98 +790,6 @@ def forward( return embeddings -@auto_docstring -class InstructBlipVideoPreTrainedModel(PreTrainedModel): - config: InstructBlipVideoConfig - base_model_prefix = "blip" - supports_gradient_checkpointing = True - _supports_attention_backend = True - _supports_flash_attn = True - _supports_sdpa = True - _supports_flex_attn = True - - _can_compile_fullgraph = True - - _no_split_modules = [ - "InstructBlipVideoQFormerEmbeddings", - "InstructBlipVideoAttention", - "InstructBlipVideoQFormerMultiHeadAttention", - "InstructBlipVideoQFormerSelfOutput", - ] - - def _init_weights(self, module): - """Initialize the weights""" - factor = self.config.initializer_range - - if isinstance(module, (nn.Linear, nn.Conv2d)): - module.weight.data.normal_(mean=0.0, std=factor) - if module.bias is not None: - module.bias.data.zero_() - elif isinstance(module, nn.Embedding): - module.weight.data.normal_(mean=0.0, std=factor) - elif isinstance(module, nn.LayerNorm): - module.bias.data.zero_() - module.weight.data.fill_(1.0) - elif isinstance(module, InstructBlipVideoVisionEmbeddings): - nn.init.trunc_normal_(module.position_embedding, mean=0.0, std=factor) - nn.init.trunc_normal_(module.class_embedding, mean=0.0, std=factor) - elif isinstance(module, (InstructBlipVideoForConditionalGeneration, InstructBlipVideoModel)): - module.query_tokens.data.zero_() - - -class InstructBlipVideoVisionModel(InstructBlipVideoPreTrainedModel): - main_input_name = "pixel_values" - config: InstructBlipVideoVisionConfig - - _can_record_outputs = { - "hidden_states": InstructBlipVideoEncoderLayer, - "attentions": InstructBlipVideoAttention, - } - - def __init__(self, config: InstructBlipVideoVisionConfig): - super().__init__(config) - self.config = config - embed_dim = config.hidden_size - - self.embeddings = InstructBlipVideoVisionEmbeddings(config) - self.encoder = InstructBlipVideoEncoder(config) - self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) - - self.post_init() - - @check_model_inputs - @auto_docstring - def forward( - self, - pixel_values: Optional[torch.FloatTensor] = None, - interpolate_pos_encoding: bool = False, - **kwargs: Unpack[TransformersKwargs], - ) -> Union[tuple, BaseModelOutputWithPooling]: - if pixel_values is None: - raise ValueError("You have to specify pixel_values") - - hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) - - encoder_outputs = self.encoder( - inputs_embeds=hidden_states, - **kwargs, - ) - - last_hidden_state = encoder_outputs[0] - last_hidden_state = self.post_layernorm(last_hidden_state) - - pooled_output = last_hidden_state[:, 0, :] - pooled_output = self.post_layernorm(pooled_output) - - return BaseModelOutputWithPooling( - last_hidden_state=last_hidden_state, - pooler_output=pooled_output, - ) - - def get_input_embeddings(self): - return self.embeddings - - class InstructBlipVideoQFormerModel(InstructBlipVideoPreTrainedModel): """ Querying Transformer (Q-Former), used in InstructBlipVideo. Slightly modified from BLIP-2 as it also takes the diff --git a/src/transformers/models/siglip2/modeling_siglip2.py b/src/transformers/models/siglip2/modeling_siglip2.py index 062e43e04861..f94d805bed1a 100644 --- a/src/transformers/models/siglip2/modeling_siglip2.py +++ b/src/transformers/models/siglip2/modeling_siglip2.py @@ -539,6 +539,75 @@ def default_flax_embed_init(tensor): variance_scaling_(tensor, mode="fan_in", distribution="normal") +@auto_docstring +class Siglip2PreTrainedModel(PreTrainedModel): + config: Siglip2Config + base_model_prefix = "siglip2" + supports_gradient_checkpointing = True + + _no_split_modules = [ + "Siglip2TextEmbeddings", + "Siglip2VisionEmbeddings", + "Siglip2EncoderLayer", + "Siglip2MultiheadAttentionPoolingHead", + ] + _supports_flash_attn = True + _supports_sdpa = True + _supports_flex_attn = True + _supports_attention_backend = True + + _can_record_outputs = { + "hidden_states": Siglip2EncoderLayer, + "attentions": Siglip2Attention, + } + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, Siglip2VisionEmbeddings): + width = ( + self.config.vision_config.hidden_size + if isinstance(self.config, Siglip2Config) + else self.config.hidden_size + ) + nn.init.normal_(module.position_embedding.weight, std=1 / np.sqrt(width)) + elif isinstance(module, nn.Embedding): + default_flax_embed_init(module.weight) + elif isinstance(module, Siglip2Attention): + nn.init.xavier_uniform_(module.q_proj.weight) + nn.init.xavier_uniform_(module.k_proj.weight) + nn.init.xavier_uniform_(module.v_proj.weight) + nn.init.xavier_uniform_(module.out_proj.weight) + nn.init.zeros_(module.q_proj.bias) + nn.init.zeros_(module.k_proj.bias) + nn.init.zeros_(module.v_proj.bias) + nn.init.zeros_(module.out_proj.bias) + elif isinstance(module, Siglip2MLP): + nn.init.xavier_uniform_(module.fc1.weight) + nn.init.xavier_uniform_(module.fc2.weight) + nn.init.normal_(module.fc1.bias, std=1e-6) + nn.init.normal_(module.fc2.bias, std=1e-6) + elif isinstance(module, Siglip2MultiheadAttentionPoolingHead): + nn.init.xavier_uniform_(module.probe.data) + nn.init.xavier_uniform_(module.attention.in_proj_weight.data) + nn.init.zeros_(module.attention.in_proj_bias.data) + elif isinstance(module, Siglip2Model): + logit_scale_init = torch.log(torch.tensor(1.0)) + module.logit_scale.data.fill_(logit_scale_init) + module.logit_bias.data.zero_() + elif isinstance(module, Siglip2ForImageClassification): + nn.init.normal_( + module.classifier.weight, + std=self.config.vision_config.hidden_size**-0.5 * self.config.initializer_factor, + ) + elif isinstance(module, (nn.Linear, nn.Conv2d)): + lecun_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + class Siglip2TextEmbeddings(nn.Module): def __init__(self, config: Siglip2TextConfig): super().__init__() @@ -635,75 +704,6 @@ def forward( ) -@auto_docstring -class Siglip2PreTrainedModel(PreTrainedModel): - config: Siglip2Config - base_model_prefix = "siglip2" - supports_gradient_checkpointing = True - - _no_split_modules = [ - "Siglip2TextEmbeddings", - "Siglip2VisionEmbeddings", - "Siglip2EncoderLayer", - "Siglip2MultiheadAttentionPoolingHead", - ] - _supports_flash_attn = True - _supports_sdpa = True - _supports_flex_attn = True - _supports_attention_backend = True - - _can_record_outputs = { - "hidden_states": Siglip2EncoderLayer, - "attentions": Siglip2Attention, - } - - def _init_weights(self, module): - """Initialize the weights""" - if isinstance(module, Siglip2VisionEmbeddings): - width = ( - self.config.vision_config.hidden_size - if isinstance(self.config, Siglip2Config) - else self.config.hidden_size - ) - nn.init.normal_(module.position_embedding.weight, std=1 / np.sqrt(width)) - elif isinstance(module, nn.Embedding): - default_flax_embed_init(module.weight) - elif isinstance(module, Siglip2Attention): - nn.init.xavier_uniform_(module.q_proj.weight) - nn.init.xavier_uniform_(module.k_proj.weight) - nn.init.xavier_uniform_(module.v_proj.weight) - nn.init.xavier_uniform_(module.out_proj.weight) - nn.init.zeros_(module.q_proj.bias) - nn.init.zeros_(module.k_proj.bias) - nn.init.zeros_(module.v_proj.bias) - nn.init.zeros_(module.out_proj.bias) - elif isinstance(module, Siglip2MLP): - nn.init.xavier_uniform_(module.fc1.weight) - nn.init.xavier_uniform_(module.fc2.weight) - nn.init.normal_(module.fc1.bias, std=1e-6) - nn.init.normal_(module.fc2.bias, std=1e-6) - elif isinstance(module, Siglip2MultiheadAttentionPoolingHead): - nn.init.xavier_uniform_(module.probe.data) - nn.init.xavier_uniform_(module.attention.in_proj_weight.data) - nn.init.zeros_(module.attention.in_proj_bias.data) - elif isinstance(module, Siglip2Model): - logit_scale_init = torch.log(torch.tensor(1.0)) - module.logit_scale.data.fill_(logit_scale_init) - module.logit_bias.data.zero_() - elif isinstance(module, Siglip2ForImageClassification): - nn.init.normal_( - module.classifier.weight, - std=self.config.vision_config.hidden_size**-0.5 * self.config.initializer_factor, - ) - elif isinstance(module, (nn.Linear, nn.Conv2d)): - lecun_normal_(module.weight) - if module.bias is not None: - nn.init.zeros_(module.bias) - elif isinstance(module, nn.LayerNorm): - module.bias.data.zero_() - module.weight.data.fill_(1.0) - - @auto_docstring( custom_intro=""" The text model from Siglip2 without any head or projection on top. @@ -1126,7 +1126,8 @@ def forward( pixel_attention_mask: Optional[torch.Tensor] = None, spatial_shapes: Optional[torch.LongTensor] = None, labels: Optional[torch.Tensor] = None, - **kwargs: Unpack[TransformersKwargs], + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, ) -> ImageClassifierOutput: r""" pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*): @@ -1164,12 +1165,17 @@ def forward( Predicted class: LABEL_1 ``` """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) outputs: BaseModelOutputWithPooling = self.vision_model( pixel_values, attention_mask=pixel_attention_mask, spatial_shapes=spatial_shapes, - **kwargs, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, ) sequence_output = outputs.last_hidden_state @@ -1212,6 +1218,8 @@ def forward( return ImageClassifierOutput( loss=loss, logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, ) From fb59341637823253adfea46d3488a5972df643d1 Mon Sep 17 00:00:00 2001 From: raushan Date: Thu, 28 Aug 2025 17:54:03 +0200 Subject: [PATCH 09/19] oops delete --- src/transformers/models/idefics/modeling_idefics.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/transformers/models/idefics/modeling_idefics.py b/src/transformers/models/idefics/modeling_idefics.py index 0f2b569a50c3..a1601797bb1d 100644 --- a/src/transformers/models/idefics/modeling_idefics.py +++ b/src/transformers/models/idefics/modeling_idefics.py @@ -1077,7 +1077,6 @@ def forward( past_key_values=past_key_values, position_ids=position_ids, ) - print(self.config._attn_implementation, attention_mask[0, 0, :32, :32]) hidden_states = inputs_embeds From 05104d973857e9689b36dfd2f7984e0e095bcb58 Mon Sep 17 00:00:00 2001 From: raushan Date: Fri, 29 Aug 2025 11:54:23 +0200 Subject: [PATCH 10/19] fix efficientloftr --- src/transformers/models/idefics/modeling_idefics.py | 6 +++--- tests/models/efficientloftr/test_modeling_efficientloftr.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/transformers/models/idefics/modeling_idefics.py b/src/transformers/models/idefics/modeling_idefics.py index a1601797bb1d..cb4ea1e405bc 100644 --- a/src/transformers/models/idefics/modeling_idefics.py +++ b/src/transformers/models/idefics/modeling_idefics.py @@ -1069,7 +1069,7 @@ def forward( (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device ) - attention_mask = create_causal_mask( + causal_mask = create_causal_mask( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, @@ -1086,7 +1086,7 @@ def forward( cross_attn_block = self.gated_cross_attn_layers[idx // self.cross_layer_interval] hidden_states = cross_attn_block( hidden_states, - attention_mask, + causal_mask, image_hidden_states, image_attention_mask=image_attention_mask, cross_attention_gate=cross_attention_gate, @@ -1096,7 +1096,7 @@ def forward( hidden_states = decoder_layer( hidden_states, - attention_mask=attention_mask, + attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, diff --git a/tests/models/efficientloftr/test_modeling_efficientloftr.py b/tests/models/efficientloftr/test_modeling_efficientloftr.py index 35b3452a1f93..bbc0c3980618 100644 --- a/tests/models/efficientloftr/test_modeling_efficientloftr.py +++ b/tests/models/efficientloftr/test_modeling_efficientloftr.py @@ -211,7 +211,7 @@ def check_hidden_states_output(inputs_dict, config, model_class): hidden_states = outputs.hidden_states - expected_num_hidden_states = len(self.model_tester.stage_num_blocks) + expected_num_hidden_states = len(self.model_tester.stage_num_blocks) + 1 self.assertEqual(len(hidden_states), expected_num_hidden_states) self.assertListEqual( From 202bf6b22c5b349f814defb2ba92812498cc535c Mon Sep 17 00:00:00 2001 From: raushan Date: Fri, 29 Aug 2025 12:32:03 +0200 Subject: [PATCH 11/19] fix copies --- src/transformers/models/ovis2/modular_ovis2.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/src/transformers/models/ovis2/modular_ovis2.py b/src/transformers/models/ovis2/modular_ovis2.py index de5b03b3ecc4..3c3790b23f10 100644 --- a/src/transformers/models/ovis2/modular_ovis2.py +++ b/src/transformers/models/ovis2/modular_ovis2.py @@ -24,7 +24,6 @@ from ...modeling_utils import PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, can_return_tuple -from ...utils.generic import check_model_inputs from ..aimv2.modeling_aimv2 import Aimv2Attention, Aimv2EncoderLayer from ..auto import AutoModel from ..llama.modeling_llama import LlamaMLP, LlamaRMSNorm @@ -161,10 +160,6 @@ class Ovis2PreTrainedModel(PreTrainedModel): class Ovis2VisionModel(Ovis2PreTrainedModel): config: Ovis2VisionConfig - _can_record_outputs = { - "hidden_states": Ovis2VisionEncoderLayer, - "attentions": Ovis2VisionAttention, - } def __init__(self, config: Ovis2VisionConfig): super().__init__(config) @@ -179,10 +174,9 @@ def __init__(self, config: Ovis2VisionConfig): ) self.head_norm = nn.LayerNorm(self.vocab_size - self.num_visual_indicator_tokens) - @check_model_inputs def forward(self, pixel_values: torch.FloatTensor, **kwargs) -> tuple[torch.Tensor, torch.Tensor]: outputs = self.transformer(pixel_values, **kwargs) - last_hidden_state = outputs.last_hidden_state + last_hidden_state = outputs[0] if self.config.hidden_stride > 1: num_images, seq_len, hidden_dim = last_hidden_state.shape hidden_stride = self.config.hidden_stride From 59895d8d2e17ec8b1d0db99e6fc5d81f439c6ec5 Mon Sep 17 00:00:00 2001 From: raushan Date: Fri, 29 Aug 2025 12:32:11 +0200 Subject: [PATCH 12/19] i am stupid, fix idefics --- .../models/idefics/modeling_idefics.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/src/transformers/models/idefics/modeling_idefics.py b/src/transformers/models/idefics/modeling_idefics.py index cb4ea1e405bc..55c70c051d73 100644 --- a/src/transformers/models/idefics/modeling_idefics.py +++ b/src/transformers/models/idefics/modeling_idefics.py @@ -776,13 +776,14 @@ def forward( past_key_values: Optional[tuple[torch.Tensor]] = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: - r""" - image_hidden_states (): - - image_attention_mask (): - - cross_attention_gate (): - + """ + Args: + image_hidden_states (`torch.FloatTensor`): + Input to the layer of shape `(batch, seq_len, embed_dim)` + image_attention_mask (`torch.FloatTensor`, *optional*): image attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + cross_attention_gate (`torch.FloatTensor`, *optional*): + gate of size `(batch, seq_len)` used to zero-out cross-attention output for tokens attending no images. """ if image_hidden_states is None: raise ValueError( @@ -806,6 +807,7 @@ def forward( hidden_states, _ = self.cross_attn( hidden_states=hidden_states, key_value_states=image_hidden_states, + attention_mask=image_attention_mask, **kwargs, ) hidden_states = nn.functional.dropout(hidden_states, p=self.config, training=self.training) From 20ae443765df5f5717d5798d661ede46ad861735 Mon Sep 17 00:00:00 2001 From: raushan Date: Fri, 29 Aug 2025 13:12:05 +0200 Subject: [PATCH 13/19] fix GC --- .../models/idefics2/modeling_idefics2.py | 68 +++++-------------- 1 file changed, 17 insertions(+), 51 deletions(-) diff --git a/src/transformers/models/idefics2/modeling_idefics2.py b/src/transformers/models/idefics2/modeling_idefics2.py index 50a57bb4d823..f6b9b9bda7ff 100644 --- a/src/transformers/models/idefics2/modeling_idefics2.py +++ b/src/transformers/models/idefics2/modeling_idefics2.py @@ -379,7 +379,6 @@ def __init__(self, config: Idefics2Config): self.gradient_checkpointing = False # Ignore copy - @can_return_tuple @auto_docstring def forward( self, @@ -472,6 +471,7 @@ def forward( self, pixel_values, patch_attention_mask: Optional[torch.BoolTensor] = None, + **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, BaseModelOutput]: r""" patch_attention_mask (`torch.BoolTensor` of shape `(batch_size, num_patches_height, num_patches_width)`, *optional*): @@ -503,9 +503,11 @@ def forward( encoder_outputs = self.encoder( inputs_embeds=hidden_states, attention_mask=patch_attention_mask, + **kwargs, ) last_hidden_state = encoder_outputs[0] + last_hidden_state = self.post_layernorm(last_hidden_state) return BaseModelOutput(last_hidden_state=last_hidden_state) @@ -573,9 +575,8 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, - output_attentions: bool = False, - use_cache: bool = False, - ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: + **kwargs: Unpack[TransformersKwargs], + ) -> tuple[torch.Tensor, Optional[torch.Tensor]]: """ Runs Perceiver Self-Attention, with special (context, latents) appended along the `seq` dimension! @@ -608,13 +609,7 @@ def forward( attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": - if self.config._attn_implementation == "sdpa" and output_attentions: - logger.warning_once( - "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " - 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' - ) - else: - attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] + attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, @@ -625,15 +620,13 @@ def forward( is_causal=self.is_causal, scaling=self.scaling, dropout=0.0 if not self.training else self.attention_dropout, + **kwargs, ) attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.head_dim) attn_output = self.o_proj(attn_output) - if not output_attentions: - attn_weights = None - - return attn_output, attn_weights, past_key_values + return attn_output, attn_weights class Idefics2PerceiverLayer(nn.Module): @@ -663,10 +656,8 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, - output_attentions: Optional[bool] = False, - use_cache: Optional[bool] = False, - **kwargs, - ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: + **kwargs: Unpack[TransformersKwargs], + ) -> torch.FloatTensor: """ Args: latents (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` @@ -686,10 +677,11 @@ def forward( latents = self.input_latents_norm(latents) context = self.input_context_norm(context) - latents, self_attn_weights, present_key_value = self.self_attn( + latents, _ = self.self_attn( latents=latents, context=context, attention_mask=attention_mask, + **kwargs, ) latents = residual + latents residual = latents @@ -698,15 +690,7 @@ def forward( latents = self.mlp(latents) latents = residual + latents - outputs = (latents,) - - if output_attentions: - outputs += (self_attn_weights,) - - if use_cache: - outputs += (present_key_value,) - - return outputs + return latents @auto_docstring( @@ -742,6 +726,7 @@ def forward( self, context: torch.Tensor, attention_mask: torch.Tensor, + **kwargs: Unpack[TransformersKwargs], ) -> torch.Tensor: r""" context (`torch.FloatTensor` of shape `(batch, seq_len, embed_dim)`): @@ -762,18 +747,14 @@ def forward( compressed_context = latents for perceiver_layer in self.layers: - layer_outputs = perceiver_layer( + compressed_context = perceiver_layer( compressed_context, context, attention_mask=attention_mask, position_ids=None, - past_key_values=None, - output_attentions=False, - use_cache=False, + **kwargs, ) - compressed_context = layer_outputs[0] - compressed_context = self.norm(compressed_context) return compressed_context @@ -952,10 +933,7 @@ def forward( pixel_attention_mask: Optional[torch.BoolTensor] = None, image_hidden_states: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, - return_dict: Optional[bool] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Union[tuple, Idefics2BaseModelOutputWithPast]: r""" @@ -964,12 +942,6 @@ def forward( image_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): The hidden states of the image encoder after modality projection and perceiver resampling. """ - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - use_cache = use_cache if use_cache is not None else self.config.use_cache - return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.training and self.text_model.gradient_checkpointing and use_cache: logger.warning_once( @@ -985,10 +957,6 @@ def forward( else: raise ValueError("You have to specify either input_ids or inputs_embeds") - # TODO (joao): remove this exception in v4.56 -- it exists for users that try to pass a legacy cache - if not isinstance(past_key_values, (type(None), Cache)): - raise ValueError("The `past_key_values` should be either a `Cache` object or `None`.") - if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) @@ -1012,16 +980,14 @@ def forward( image_hidden_states=image_hidden_states, ) + kwargs["return_dict"] = True outputs = self.text_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, cache_position=cache_position, - return_dict=True, **kwargs, ) From 7e64094f1d852d177076c2595fc4c114045e7fc6 Mon Sep 17 00:00:00 2001 From: raushan Date: Mon, 1 Sep 2025 14:55:44 +0200 Subject: [PATCH 14/19] return type and other comments --- .../models/aimv2/modeling_aimv2.py | 13 +++++------ .../models/aimv2/modular_aimv2.py | 15 ++++++------- src/transformers/models/blip/modeling_blip.py | 22 +++++++++---------- .../models/blip_2/modeling_blip_2.py | 12 +++++----- .../models/idefics/modeling_idefics.py | 12 ++++------ .../models/idefics2/modeling_idefics2.py | 2 +- .../models/idefics3/modeling_idefics3.py | 2 +- .../instructblip/modeling_instructblip.py | 9 ++++---- .../modeling_instructblipvideo.py | 9 ++++---- .../models/janus/modeling_janus.py | 8 +++---- .../models/ovis2/modeling_ovis2.py | 6 ++--- .../modeling_phi4_multimodal.py | 2 +- .../models/siglip/modeling_siglip.py | 2 +- .../models/siglip2/modeling_siglip2.py | 2 +- .../models/smolvlm/modeling_smolvlm.py | 2 +- 15 files changed, 54 insertions(+), 64 deletions(-) diff --git a/src/transformers/models/aimv2/modeling_aimv2.py b/src/transformers/models/aimv2/modeling_aimv2.py index 6ca054fe3eac..7bd1c5e5a0c3 100644 --- a/src/transformers/models/aimv2/modeling_aimv2.py +++ b/src/transformers/models/aimv2/modeling_aimv2.py @@ -303,11 +303,9 @@ def forward( hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], - ) -> tuple[torch.Tensor, torch.Tensor]: + ) -> torch.Tensor: norm_hidden_states = self.rms_norm1(hidden_states) - attn_output, attn_weights = self.attention( - hidden_states=norm_hidden_states, attention_mask=attention_mask, **kwargs - ) + attn_output, _ = self.attention(hidden_states=norm_hidden_states, attention_mask=attention_mask, **kwargs) hidden_states = hidden_states + attn_output norm_hidden_states = self.rms_norm2(hidden_states) @@ -474,12 +472,13 @@ def forward( ```""" hidden_states = self.embeddings(pixel_values) - encoder_outputs = self.encoder( + encoder_outputs: BaseModelOutput = self.encoder( inputs_embeds=hidden_states, + attention_mask=attention_mask, **kwargs, ) - last_hidden_state = encoder_outputs[0] + last_hidden_state = encoder_outputs.last_hidden_state last_hidden_state = self.rms_norm(last_hidden_state) pooler_output = self.head(last_hidden_state) if self.use_head else None @@ -549,7 +548,7 @@ def forward( **kwargs, ) - last_hidden_state = encoder_outputs[0] + last_hidden_state = encoder_outputs.last_hidden_state last_hidden_state = self.rms_norm(last_hidden_state) # Get pooled output diff --git a/src/transformers/models/aimv2/modular_aimv2.py b/src/transformers/models/aimv2/modular_aimv2.py index 60cf2be3293a..8365137ddfd8 100644 --- a/src/transformers/models/aimv2/modular_aimv2.py +++ b/src/transformers/models/aimv2/modular_aimv2.py @@ -24,7 +24,7 @@ from ...masking_utils import create_causal_mask from ...modeling_layers import GradientCheckpointingLayer -from ...modeling_outputs import BaseModelOutputWithPooling +from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from ...modeling_utils import PreTrainedModel from ...processing_utils import Unpack from ...utils import ( @@ -377,11 +377,9 @@ def forward( hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], - ) -> tuple[torch.Tensor, torch.Tensor]: + ) -> torch.Tensor: norm_hidden_states = self.rms_norm1(hidden_states) - attn_output, attn_weights = self.attention( - hidden_states=norm_hidden_states, attention_mask=attention_mask, **kwargs - ) + attn_output, _ = self.attention(hidden_states=norm_hidden_states, attention_mask=attention_mask, **kwargs) hidden_states = hidden_states + attn_output norm_hidden_states = self.rms_norm2(hidden_states) @@ -518,12 +516,13 @@ def forward( ```""" hidden_states = self.embeddings(pixel_values) - encoder_outputs = self.encoder( + encoder_outputs: BaseModelOutput = self.encoder( inputs_embeds=hidden_states, + attention_mask=attention_mask, **kwargs, ) - last_hidden_state = encoder_outputs[0] + last_hidden_state = encoder_outputs.last_hidden_state last_hidden_state = self.rms_norm(last_hidden_state) pooler_output = self.head(last_hidden_state) if self.use_head else None @@ -593,7 +592,7 @@ def forward( **kwargs, ) - last_hidden_state = encoder_outputs[0] + last_hidden_state = encoder_outputs.last_hidden_state last_hidden_state = self.rms_norm(last_hidden_state) # Get pooled output diff --git a/src/transformers/models/blip/modeling_blip.py b/src/transformers/models/blip/modeling_blip.py index aac6fbd02a67..c3b5821601fb 100644 --- a/src/transformers/models/blip/modeling_blip.py +++ b/src/transformers/models/blip/modeling_blip.py @@ -330,7 +330,7 @@ def forward( hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], - ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: + ) -> tuple[torch.Tensor, torch.Tensor]: """Input shape: Batch x Time x Channel""" bsz, tgt_len, embed_dim = hidden_states.size() @@ -399,7 +399,7 @@ def forward( hidden_states: torch.Tensor, attention_mask: torch.Tensor, **kwargs: Unpack[TransformersKwargs], - ) -> tuple[torch.FloatTensor]: + ) -> torch.FloatTensor: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) @@ -522,12 +522,12 @@ def forward( hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) - encoder_outputs = self.encoder( + encoder_outputs: BaseModelOutput = self.encoder( inputs_embeds=hidden_states, **kwargs, ) - last_hidden_state = encoder_outputs[0] + last_hidden_state = encoder_outputs.last_hidden_state last_hidden_state = self.post_layernorm(last_hidden_state) pooled_output = last_hidden_state[:, 0, :] @@ -764,10 +764,10 @@ def forward( **kwargs, ) - image_embeds = vision_outputs[1] + image_embeds = vision_outputs.pooler_output image_embeds = self.visual_projection(image_embeds) - text_embeds = text_outputs[1] + text_embeds = text_outputs.pooler_output text_embeds = self.text_projection(text_embeds) # normalized features @@ -864,7 +864,7 @@ def forward( **kwargs, ) - image_embeds = vision_outputs[0] + image_embeds = vision_outputs.last_hidden_state outputs = self.text_decoder( input_ids=input_ids, @@ -1051,7 +1051,7 @@ def forward( **kwargs, ) - image_embeds = vision_outputs[0] + image_embeds = vision_outputs.last_hidden_state image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long) question_embeds = self.text_encoder( @@ -1262,7 +1262,7 @@ def forward( **kwargs, ) - image_embeds = vision_outputs[0] + image_embeds = vision_outputs.last_hidden_state image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long) if use_itm_head: @@ -1273,7 +1273,7 @@ def forward( encoder_attention_mask=image_atts, **kwargs, ) - question_embeds = question_embeds[0] + question_embeds = question_embeds.last_hidden_state output = self.itm_head(question_embeds[:, 0, :]) else: @@ -1282,7 +1282,7 @@ def forward( attention_mask=attention_mask, **kwargs, ) - question_embeds = question_embeds[0] + question_embeds = question_embeds.last_hidden_state image_feat = normalize(self.vision_proj(image_embeds[:, 0, :]), dim=-1) text_feat = normalize(self.text_proj(question_embeds[:, 0, :]), dim=-1) diff --git a/src/transformers/models/blip_2/modeling_blip_2.py b/src/transformers/models/blip_2/modeling_blip_2.py index 2ee9aac11274..1dd5ffcb3a6d 100644 --- a/src/transformers/models/blip_2/modeling_blip_2.py +++ b/src/transformers/models/blip_2/modeling_blip_2.py @@ -361,7 +361,7 @@ def forward( hidden_states: torch.Tensor, attention_mask: torch.Tensor, **kwargs: Unpack[TransformersKwargs], - ) -> tuple[torch.FloatTensor]: + ) -> torch.FloatTensor: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) @@ -498,12 +498,12 @@ def forward( hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) - encoder_outputs = self.encoder( + encoder_outputs: BaseModelOutput = self.encoder( inputs_embeds=hidden_states, **kwargs, ) - last_hidden_state = encoder_outputs[0] + last_hidden_state = encoder_outputs.last_hidden_state last_hidden_state = self.post_layernorm(last_hidden_state) pooled_output = last_hidden_state[:, 0, :] @@ -690,7 +690,7 @@ def forward( encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, **kwargs: Unpack[TransformersKwargs], - ) -> tuple[torch.Tensor]: + ) -> torch.Tensor: attn_output, _ = self.attention( hidden_states=hidden_states, attention_mask=attention_mask, @@ -1067,7 +1067,7 @@ def forward( # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) - encoder_outputs = self.encoder( + encoder_outputs: BaseModelOutput = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, @@ -1076,7 +1076,7 @@ def forward( query_length=query_length, **kwargs, ) - sequence_output = encoder_outputs[0] + sequence_output = encoder_outputs.last_hidden_state pooled_output = sequence_output[:, 0, :] return BaseModelOutputWithPoolingAndCrossAttentions( diff --git a/src/transformers/models/idefics/modeling_idefics.py b/src/transformers/models/idefics/modeling_idefics.py index 55c70c051d73..2a9af9f5a58d 100644 --- a/src/transformers/models/idefics/modeling_idefics.py +++ b/src/transformers/models/idefics/modeling_idefics.py @@ -35,7 +35,7 @@ from ...modeling_outputs import ModelOutput from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PretrainedConfig, PreTrainedModel from ...processing_utils import Unpack -from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, is_torch_flex_attn_available, logging +from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging from ...utils.deprecation import deprecate_kwarg from ...utils.generic import OutputRecorder, check_model_inputs from .configuration_idefics import IdeficsConfig @@ -43,10 +43,6 @@ from .vision import IdeficsVisionEmbeddings, IdeficsVisionTransformer -if is_torch_flex_attn_available(): - pass - - logger = logging.get_logger(__name__) @@ -582,7 +578,7 @@ def forward( past_key_values: Optional[tuple[torch.Tensor]] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], - ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: + ) -> tuple[torch.Tensor, torch.Tensor]: # if key_value_states are provided this layer is used as a cross-attention layer is_cross_attention = self.is_cross_attention or key_value_states is not None @@ -670,7 +666,7 @@ def forward( past_key_values: Optional[tuple[torch.Tensor]] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], - ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: + ) -> torch.FloatTensor: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) @@ -775,7 +771,7 @@ def forward( cross_attention_gate: Optional[torch.Tensor] = None, past_key_values: Optional[tuple[torch.Tensor]] = None, **kwargs: Unpack[TransformersKwargs], - ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: + ) -> torch.FloatTensor: """ Args: image_hidden_states (`torch.FloatTensor`): diff --git a/src/transformers/models/idefics2/modeling_idefics2.py b/src/transformers/models/idefics2/modeling_idefics2.py index f6b9b9bda7ff..49ba0172db10 100644 --- a/src/transformers/models/idefics2/modeling_idefics2.py +++ b/src/transformers/models/idefics2/modeling_idefics2.py @@ -343,7 +343,7 @@ def forward( hidden_states: torch.Tensor, attention_mask: torch.Tensor, **kwargs: Unpack[TransformersKwargs], - ) -> tuple[torch.FloatTensor]: + ) -> torch.FloatTensor: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) diff --git a/src/transformers/models/idefics3/modeling_idefics3.py b/src/transformers/models/idefics3/modeling_idefics3.py index 13a32f2ddfa1..5e80c13a03e0 100644 --- a/src/transformers/models/idefics3/modeling_idefics3.py +++ b/src/transformers/models/idefics3/modeling_idefics3.py @@ -302,7 +302,7 @@ def forward( hidden_states: torch.Tensor, attention_mask: torch.Tensor, **kwargs: Unpack[TransformersKwargs], - ) -> tuple[torch.FloatTensor]: + ) -> torch.FloatTensor: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) diff --git a/src/transformers/models/instructblip/modeling_instructblip.py b/src/transformers/models/instructblip/modeling_instructblip.py index cb6436f9b99f..ef6c8db0bdd7 100644 --- a/src/transformers/models/instructblip/modeling_instructblip.py +++ b/src/transformers/models/instructblip/modeling_instructblip.py @@ -288,7 +288,7 @@ def forward( hidden_states: torch.Tensor, attention_mask: torch.Tensor, **kwargs: Unpack[TransformersKwargs], - ) -> tuple[torch.FloatTensor]: + ) -> torch.FloatTensor: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) @@ -385,7 +385,6 @@ def forward( class InstructBlipVisionModel(InstructBlipPreTrainedModel): main_input_name = "pixel_values" config: InstructBlipVisionConfig - _can_record_outputs = { "hidden_states": InstructBlipEncoderLayer, "attentions": InstructBlipAttention, @@ -415,12 +414,12 @@ def forward( hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) - encoder_outputs = self.encoder( + encoder_outputs: BaseModelOutput = self.encoder( inputs_embeds=hidden_states, **kwargs, ) - last_hidden_state = encoder_outputs[0] + last_hidden_state = encoder_outputs.last_hidden_state last_hidden_state = self.post_layernorm(last_hidden_state) pooled_output = last_hidden_state[:, 0, :] @@ -606,7 +605,7 @@ def forward( encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, **kwargs: Unpack[TransformersKwargs], - ) -> tuple[torch.Tensor]: + ) -> torch.Tensor: attn_output, _ = self.attention( hidden_states=hidden_states, attention_mask=attention_mask, diff --git a/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py b/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py index 71625453a026..9182a1e92fee 100644 --- a/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py +++ b/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py @@ -295,7 +295,7 @@ def forward( hidden_states: torch.Tensor, attention_mask: torch.Tensor, **kwargs: Unpack[TransformersKwargs], - ) -> tuple[torch.FloatTensor]: + ) -> torch.FloatTensor: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) @@ -351,7 +351,6 @@ def forward( class InstructBlipVideoVisionModel(InstructBlipVideoPreTrainedModel): main_input_name = "pixel_values" config: InstructBlipVideoVisionConfig - _can_record_outputs = { "hidden_states": InstructBlipVideoEncoderLayer, "attentions": InstructBlipVideoAttention, @@ -381,12 +380,12 @@ def forward( hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) - encoder_outputs = self.encoder( + encoder_outputs: BaseModelOutput = self.encoder( inputs_embeds=hidden_states, **kwargs, ) - last_hidden_state = encoder_outputs[0] + last_hidden_state = encoder_outputs.last_hidden_state last_hidden_state = self.post_layernorm(last_hidden_state) pooled_output = last_hidden_state[:, 0, :] @@ -570,7 +569,7 @@ def forward( encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, **kwargs: Unpack[TransformersKwargs], - ) -> tuple[torch.Tensor]: + ) -> torch.Tensor: attn_output, _ = self.attention( hidden_states=hidden_states, attention_mask=attention_mask, diff --git a/src/transformers/models/janus/modeling_janus.py b/src/transformers/models/janus/modeling_janus.py index 3da38dde14ba..c8887b7bee2a 100644 --- a/src/transformers/models/janus/modeling_janus.py +++ b/src/transformers/models/janus/modeling_janus.py @@ -380,7 +380,7 @@ def forward( hidden_states: torch.Tensor, attention_mask: torch.Tensor, **kwargs: Unpack[TransformersKwargs], - ) -> tuple[torch.FloatTensor]: + ) -> torch.FloatTensor: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) @@ -539,7 +539,7 @@ def forward( hidden_states: torch.Tensor, attention_mask: torch.Tensor, **kwargs: Unpack[TransformersKwargs], - ) -> tuple[torch.FloatTensor]: + ) -> torch.FloatTensor: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) @@ -591,12 +591,12 @@ def forward( hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) - encoder_outputs = self.encoder( + encoder_outputs: BaseModelOutput = self.encoder( inputs_embeds=hidden_states, **kwargs, ) - last_hidden_state = encoder_outputs[0] + last_hidden_state = encoder_outputs.last_hidden_state last_hidden_state = self.post_layernorm(last_hidden_state) pooled_output = last_hidden_state[:, 0, :] diff --git a/src/transformers/models/ovis2/modeling_ovis2.py b/src/transformers/models/ovis2/modeling_ovis2.py index 888733e05b1c..2aad062eafd3 100644 --- a/src/transformers/models/ovis2/modeling_ovis2.py +++ b/src/transformers/models/ovis2/modeling_ovis2.py @@ -335,11 +335,9 @@ def forward( hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], - ) -> tuple[torch.Tensor, torch.Tensor]: + ) -> torch.Tensor: norm_hidden_states = self.rms_norm1(hidden_states) - attn_output, attn_weights = self.attention( - hidden_states=norm_hidden_states, attention_mask=attention_mask, **kwargs - ) + attn_output, _ = self.attention(hidden_states=norm_hidden_states, attention_mask=attention_mask, **kwargs) hidden_states = hidden_states + attn_output norm_hidden_states = self.rms_norm2(hidden_states) diff --git a/src/transformers/models/phi4_multimodal/modeling_phi4_multimodal.py b/src/transformers/models/phi4_multimodal/modeling_phi4_multimodal.py index 54fd2b2e3371..349f2e02e2f2 100644 --- a/src/transformers/models/phi4_multimodal/modeling_phi4_multimodal.py +++ b/src/transformers/models/phi4_multimodal/modeling_phi4_multimodal.py @@ -154,7 +154,7 @@ def forward( hidden_states: torch.Tensor, attention_mask: torch.Tensor, **kwargs: Unpack[TransformersKwargs], - ) -> tuple[torch.FloatTensor]: + ) -> torch.FloatTensor: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) diff --git a/src/transformers/models/siglip/modeling_siglip.py b/src/transformers/models/siglip/modeling_siglip.py index cbb44845666d..3be327e8e828 100644 --- a/src/transformers/models/siglip/modeling_siglip.py +++ b/src/transformers/models/siglip/modeling_siglip.py @@ -436,7 +436,7 @@ def forward( hidden_states: torch.Tensor, attention_mask: torch.Tensor, **kwargs: Unpack[TransformersKwargs], - ) -> tuple[torch.FloatTensor]: + ) -> torch.FloatTensor: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) diff --git a/src/transformers/models/siglip2/modeling_siglip2.py b/src/transformers/models/siglip2/modeling_siglip2.py index f94d805bed1a..cc24ae850c21 100644 --- a/src/transformers/models/siglip2/modeling_siglip2.py +++ b/src/transformers/models/siglip2/modeling_siglip2.py @@ -330,7 +330,7 @@ def forward( hidden_states: torch.Tensor, attention_mask: torch.Tensor, **kwargs: Unpack[TransformersKwargs], - ) -> tuple[torch.FloatTensor]: + ) -> torch.FloatTensor: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) diff --git a/src/transformers/models/smolvlm/modeling_smolvlm.py b/src/transformers/models/smolvlm/modeling_smolvlm.py index f1bb30a34605..f25019382688 100644 --- a/src/transformers/models/smolvlm/modeling_smolvlm.py +++ b/src/transformers/models/smolvlm/modeling_smolvlm.py @@ -280,7 +280,7 @@ def forward( hidden_states: torch.Tensor, attention_mask: torch.Tensor, **kwargs: Unpack[TransformersKwargs], - ) -> tuple[torch.FloatTensor]: + ) -> torch.FloatTensor: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) From 5929681c605f60c0c21ec99f11b73146058cee92 Mon Sep 17 00:00:00 2001 From: raushan Date: Mon, 1 Sep 2025 15:07:48 +0200 Subject: [PATCH 15/19] we shouldn't manually change attention anymore --- .../models/altclip/modeling_altclip.py | 8 +------ src/transformers/models/clip/modeling_clip.py | 8 +------ .../models/clipseg/modeling_clipseg.py | 8 +------ src/transformers/models/git/modeling_git.py | 8 +------ src/transformers/models/idefics/vision.py | 8 +------ .../models/kosmos2/modeling_kosmos2.py | 16 ++----------- .../models/metaclip_2/modeling_metaclip_2.py | 8 +------ src/transformers/models/opt/modeling_opt.py | 8 +------ .../models/pixtral/modeling_pixtral.py | 8 +------ .../models/vjepa2/modeling_vjepa2.py | 24 +++---------------- .../models/x_clip/modeling_x_clip.py | 8 +------ 11 files changed, 14 insertions(+), 98 deletions(-) diff --git a/src/transformers/models/altclip/modeling_altclip.py b/src/transformers/models/altclip/modeling_altclip.py index cdb0c3dcd032..4d3a542540c8 100755 --- a/src/transformers/models/altclip/modeling_altclip.py +++ b/src/transformers/models/altclip/modeling_altclip.py @@ -545,13 +545,7 @@ def forward( attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": - if self.config._attn_implementation == "sdpa" and output_attentions: - logger.warning_once( - "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " - 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' - ) - else: - attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] + attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, diff --git a/src/transformers/models/clip/modeling_clip.py b/src/transformers/models/clip/modeling_clip.py index e73294ff05c0..1ccca4bdedbe 100644 --- a/src/transformers/models/clip/modeling_clip.py +++ b/src/transformers/models/clip/modeling_clip.py @@ -329,13 +329,7 @@ def forward( attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": - if self.config._attn_implementation == "sdpa" and output_attentions: - logger.warning_once( - "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " - 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' - ) - else: - attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] + attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, diff --git a/src/transformers/models/clipseg/modeling_clipseg.py b/src/transformers/models/clipseg/modeling_clipseg.py index 34d19ffaf387..082f123eeb78 100644 --- a/src/transformers/models/clipseg/modeling_clipseg.py +++ b/src/transformers/models/clipseg/modeling_clipseg.py @@ -332,13 +332,7 @@ def forward( attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": - if self.config._attn_implementation == "sdpa" and output_attentions: - logger.warning_once( - "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " - 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' - ) - else: - attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] + attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, diff --git a/src/transformers/models/git/modeling_git.py b/src/transformers/models/git/modeling_git.py index 2b69cf07a046..0f126353e980 100644 --- a/src/transformers/models/git/modeling_git.py +++ b/src/transformers/models/git/modeling_git.py @@ -658,13 +658,7 @@ def forward( attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": - if self.config._attn_implementation == "sdpa" and output_attentions: - logger.warning_once( - "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " - 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' - ) - else: - attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] + attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, diff --git a/src/transformers/models/idefics/vision.py b/src/transformers/models/idefics/vision.py index 8682ff047a8d..72521761d9d1 100644 --- a/src/transformers/models/idefics/vision.py +++ b/src/transformers/models/idefics/vision.py @@ -243,13 +243,7 @@ def forward( attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": - if self.config._attn_implementation == "sdpa" and output_attentions: - logger.warning_once( - "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " - 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' - ) - else: - attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] + attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, diff --git a/src/transformers/models/kosmos2/modeling_kosmos2.py b/src/transformers/models/kosmos2/modeling_kosmos2.py index 6739022a3977..9093273b700f 100644 --- a/src/transformers/models/kosmos2/modeling_kosmos2.py +++ b/src/transformers/models/kosmos2/modeling_kosmos2.py @@ -344,13 +344,7 @@ def forward( attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": - if self.config._attn_implementation == "sdpa" and output_attentions: - logger.warning_once( - "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " - 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' - ) - else: - attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] + attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, @@ -767,13 +761,7 @@ def forward( attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": - if self.config._attn_implementation == "sdpa" and output_attentions: - logger.warning_once( - "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " - 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' - ) - else: - attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] + attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, diff --git a/src/transformers/models/metaclip_2/modeling_metaclip_2.py b/src/transformers/models/metaclip_2/modeling_metaclip_2.py index 56ab43c03010..9e20380d7ac6 100644 --- a/src/transformers/models/metaclip_2/modeling_metaclip_2.py +++ b/src/transformers/models/metaclip_2/modeling_metaclip_2.py @@ -225,13 +225,7 @@ def forward( attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": - if self.config._attn_implementation == "sdpa" and output_attentions: - logger.warning_once( - "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " - 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' - ) - else: - attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] + attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, diff --git a/src/transformers/models/opt/modeling_opt.py b/src/transformers/models/opt/modeling_opt.py index 92a395ec5438..6f06a2214768 100644 --- a/src/transformers/models/opt/modeling_opt.py +++ b/src/transformers/models/opt/modeling_opt.py @@ -175,13 +175,7 @@ def forward( attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": - if self.config._attn_implementation == "sdpa" and output_attentions: - logger.warning_once( - "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " - 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' - ) - else: - attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] + attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, diff --git a/src/transformers/models/pixtral/modeling_pixtral.py b/src/transformers/models/pixtral/modeling_pixtral.py index b46a399e095b..564c118fccb9 100644 --- a/src/transformers/models/pixtral/modeling_pixtral.py +++ b/src/transformers/models/pixtral/modeling_pixtral.py @@ -206,13 +206,7 @@ def forward( attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": - if self.config._attn_implementation == "sdpa" and output_attentions: - logger.warning_once( - "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " - 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' - ) - else: - attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] + attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] # Since we use packing, if flash_attention_2 is selected we rely on position_ids if self.config._attn_implementation == "flash_attention_2": diff --git a/src/transformers/models/vjepa2/modeling_vjepa2.py b/src/transformers/models/vjepa2/modeling_vjepa2.py index fb27ffdb6a5a..bde505b4ea54 100644 --- a/src/transformers/models/vjepa2/modeling_vjepa2.py +++ b/src/transformers/models/vjepa2/modeling_vjepa2.py @@ -324,13 +324,7 @@ def forward( attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": - if self.config._attn_implementation == "sdpa" and output_attentions: - logger.warning_once( - "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " - 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' - ) - else: - attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] + attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] context_layer, attention_probs = attention_interface( self, @@ -771,13 +765,7 @@ def forward( attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": - if self.config._attn_implementation == "sdpa" and output_attentions: - logger.warning_once( - "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " - 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' - ) - else: - attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] + attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, @@ -846,13 +834,7 @@ def forward( attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": - if self.config._attn_implementation == "sdpa" and output_attentions: - logger.warning_once( - "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " - 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' - ) - else: - attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] + attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, diff --git a/src/transformers/models/x_clip/modeling_x_clip.py b/src/transformers/models/x_clip/modeling_x_clip.py index f6c5c51a27c5..6f9d4438825d 100644 --- a/src/transformers/models/x_clip/modeling_x_clip.py +++ b/src/transformers/models/x_clip/modeling_x_clip.py @@ -298,13 +298,7 @@ def forward( attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": - if self.config._attn_implementation == "sdpa" and output_attentions: - logger.warning_once( - "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " - 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' - ) - else: - attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] + attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, From b8648c31855c75ab730c64335cb469037cc2951c Mon Sep 17 00:00:00 2001 From: raushan Date: Mon, 1 Sep 2025 15:10:16 +0200 Subject: [PATCH 16/19] fix style --- src/transformers/models/blip_2/modeling_blip_2.py | 10 +++++++++- src/transformers/models/siglip/modeling_siglip.py | 9 ++++++++- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/src/transformers/models/blip_2/modeling_blip_2.py b/src/transformers/models/blip_2/modeling_blip_2.py index 5b3987b2d99c..6a488fba8f5a 100644 --- a/src/transformers/models/blip_2/modeling_blip_2.py +++ b/src/transformers/models/blip_2/modeling_blip_2.py @@ -38,7 +38,15 @@ from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer -from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int, filter_out_non_signature_kwargs +from ...utils import ( + ModelOutput, + TransformersKwargs, + auto_docstring, + can_return_tuple, + filter_out_non_signature_kwargs, + logging, + torch_int, +) from ...utils.generic import OutputRecorder, check_model_inputs from ..auto import AutoModelForCausalLM, AutoModelForSeq2SeqLM from .configuration_blip_2 import Blip2Config, Blip2QFormerConfig, Blip2VisionConfig diff --git a/src/transformers/models/siglip/modeling_siglip.py b/src/transformers/models/siglip/modeling_siglip.py index f2d312a0b52f..3c9ebf9245b5 100644 --- a/src/transformers/models/siglip/modeling_siglip.py +++ b/src/transformers/models/siglip/modeling_siglip.py @@ -31,7 +31,14 @@ from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack -from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, torch_int, filter_out_non_signature_kwargs +from ...utils import ( + ModelOutput, + TransformersKwargs, + auto_docstring, + can_return_tuple, + filter_out_non_signature_kwargs, + torch_int, +) from ...utils.generic import check_model_inputs from .configuration_siglip import SiglipConfig, SiglipTextConfig, SiglipVisionConfig From 59b74c8810c5728c5caf2cc47afa050bbf8160ff Mon Sep 17 00:00:00 2001 From: raushan Date: Mon, 1 Sep 2025 15:25:37 +0200 Subject: [PATCH 17/19] fix copies --- .../models/idefics/modeling_idefics.py | 16 ++++++++-------- .../models/idefics2/modeling_idefics2.py | 4 ++-- .../models/idefics3/modeling_idefics3.py | 4 ++-- .../models/instructblip/modeling_instructblip.py | 4 ++-- .../modeling_instructblipvideo.py | 4 ++-- .../models/metaclip_2/modeling_metaclip_2.py | 4 ---- src/transformers/models/ovis2/modeling_ovis2.py | 4 ++-- src/transformers/models/ovis2/modular_ovis2.py | 4 ++-- .../models/siglip/modeling_siglip.py | 4 ++-- .../models/siglip2/modeling_siglip2.py | 2 +- .../models/smolvlm/modeling_smolvlm.py | 4 ++-- 11 files changed, 25 insertions(+), 29 deletions(-) diff --git a/src/transformers/models/idefics/modeling_idefics.py b/src/transformers/models/idefics/modeling_idefics.py index 2a9af9f5a58d..c86262b95b7a 100644 --- a/src/transformers/models/idefics/modeling_idefics.py +++ b/src/transformers/models/idefics/modeling_idefics.py @@ -772,14 +772,14 @@ def forward( past_key_values: Optional[tuple[torch.Tensor]] = None, **kwargs: Unpack[TransformersKwargs], ) -> torch.FloatTensor: - """ - Args: - image_hidden_states (`torch.FloatTensor`): - Input to the layer of shape `(batch, seq_len, embed_dim)` - image_attention_mask (`torch.FloatTensor`, *optional*): image attention mask of size - `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. - cross_attention_gate (`torch.FloatTensor`, *optional*): - gate of size `(batch, seq_len)` used to zero-out cross-attention output for tokens attending no images. + r""" + image_hidden_states (`torch.FloatTensor`): + Input to the layer of shape `(batch, seq_len, embed_dim)` + image_attention_mask (`torch.FloatTensor`, *optional*): + image attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + cross_attention_gate (`torch.FloatTensor`, *optional*): + gate of size `(batch, seq_len)` used to zero-out cross-attention output for tokens attending no images. """ if image_hidden_states is None: raise ValueError( diff --git a/src/transformers/models/idefics2/modeling_idefics2.py b/src/transformers/models/idefics2/modeling_idefics2.py index 49ba0172db10..e38424345d0f 100644 --- a/src/transformers/models/idefics2/modeling_idefics2.py +++ b/src/transformers/models/idefics2/modeling_idefics2.py @@ -500,13 +500,13 @@ def forward( elif not self._use_flash_attention_2: patch_attention_mask = _prepare_4d_attention_mask(patch_attention_mask, hidden_states.dtype) - encoder_outputs = self.encoder( + encoder_outputs: BaseModelOutput = self.encoder( inputs_embeds=hidden_states, attention_mask=patch_attention_mask, **kwargs, ) - last_hidden_state = encoder_outputs[0] + last_hidden_state = encoder_outputs.last_hidden_state last_hidden_state = self.post_layernorm(last_hidden_state) return BaseModelOutput(last_hidden_state=last_hidden_state) diff --git a/src/transformers/models/idefics3/modeling_idefics3.py b/src/transformers/models/idefics3/modeling_idefics3.py index 5e80c13a03e0..eb1678ee66ec 100644 --- a/src/transformers/models/idefics3/modeling_idefics3.py +++ b/src/transformers/models/idefics3/modeling_idefics3.py @@ -506,12 +506,12 @@ def forward( elif not torch.any(~patch_attention_mask): patch_attention_mask = None - encoder_outputs = self.encoder( + encoder_outputs: BaseModelOutput = self.encoder( inputs_embeds=hidden_states, attention_mask=patch_attention_mask, ) - last_hidden_state = encoder_outputs[0] + last_hidden_state = encoder_outputs.last_hidden_state last_hidden_state = self.post_layernorm(last_hidden_state) return BaseModelOutput( diff --git a/src/transformers/models/instructblip/modeling_instructblip.py b/src/transformers/models/instructblip/modeling_instructblip.py index 69f27237e8f5..af039a508c5d 100644 --- a/src/transformers/models/instructblip/modeling_instructblip.py +++ b/src/transformers/models/instructblip/modeling_instructblip.py @@ -982,7 +982,7 @@ def forward( # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) - encoder_outputs = self.encoder( + encoder_outputs: BaseModelOutput = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, @@ -991,7 +991,7 @@ def forward( query_length=query_length, **kwargs, ) - sequence_output = encoder_outputs[0] + sequence_output = encoder_outputs.last_hidden_state pooled_output = sequence_output[:, 0, :] return BaseModelOutputWithPoolingAndCrossAttentions( diff --git a/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py b/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py index 0bf669797310..863e22e82b17 100644 --- a/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py +++ b/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py @@ -943,7 +943,7 @@ def forward( # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) - encoder_outputs = self.encoder( + encoder_outputs: BaseModelOutput = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, @@ -952,7 +952,7 @@ def forward( query_length=query_length, **kwargs, ) - sequence_output = encoder_outputs[0] + sequence_output = encoder_outputs.last_hidden_state pooled_output = sequence_output[:, 0, :] return BaseModelOutputWithPoolingAndCrossAttentions( diff --git a/src/transformers/models/metaclip_2/modeling_metaclip_2.py b/src/transformers/models/metaclip_2/modeling_metaclip_2.py index 0cea7779fe6a..da85d132ee26 100644 --- a/src/transformers/models/metaclip_2/modeling_metaclip_2.py +++ b/src/transformers/models/metaclip_2/modeling_metaclip_2.py @@ -23,16 +23,12 @@ auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, - logging, torch_int, ) from ...utils.generic import check_model_inputs from .configuration_metaclip_2 import MetaClip2Config, MetaClip2TextConfig, MetaClip2VisionConfig -logger = logging.get_logger(__name__) - - class MetaClip2TextEmbeddings(nn.Module): def __init__(self, config: MetaClip2TextConfig): super().__init__() diff --git a/src/transformers/models/ovis2/modeling_ovis2.py b/src/transformers/models/ovis2/modeling_ovis2.py index b60e7b46a83e..8390f66ff39b 100644 --- a/src/transformers/models/ovis2/modeling_ovis2.py +++ b/src/transformers/models/ovis2/modeling_ovis2.py @@ -396,13 +396,13 @@ def forward( ): hidden_states = self.embeddings(pixel_values) - encoder_outputs = self.encoder( + encoder_outputs: BaseModelOutput = self.encoder( inputs_embeds=hidden_states, attention_mask=attention_mask, **kwargs, ) - last_hidden_state = encoder_outputs[0] + last_hidden_state = encoder_outputs.last_hidden_state last_hidden_state = self.rms_norm(last_hidden_state) return BaseModelOutput(last_hidden_state=last_hidden_state) diff --git a/src/transformers/models/ovis2/modular_ovis2.py b/src/transformers/models/ovis2/modular_ovis2.py index 3c3790b23f10..4163d62be5e5 100644 --- a/src/transformers/models/ovis2/modular_ovis2.py +++ b/src/transformers/models/ovis2/modular_ovis2.py @@ -124,13 +124,13 @@ def forward( ): hidden_states = self.embeddings(pixel_values) - encoder_outputs = self.encoder( + encoder_outputs: BaseModelOutput = self.encoder( inputs_embeds=hidden_states, attention_mask=attention_mask, **kwargs, ) - last_hidden_state = encoder_outputs[0] + last_hidden_state = encoder_outputs.last_hidden_state last_hidden_state = self.rms_norm(last_hidden_state) return BaseModelOutput(last_hidden_state=last_hidden_state) diff --git a/src/transformers/models/siglip/modeling_siglip.py b/src/transformers/models/siglip/modeling_siglip.py index 3c9ebf9245b5..0720560a87a9 100644 --- a/src/transformers/models/siglip/modeling_siglip.py +++ b/src/transformers/models/siglip/modeling_siglip.py @@ -609,7 +609,7 @@ def forward( **kwargs, ) - last_hidden_state = encoder_outputs[0] + last_hidden_state = encoder_outputs.last_hidden_state last_hidden_state = self.final_layer_norm(last_hidden_state) # The model uses the last token's hidden state, which may be padding. @@ -704,7 +704,7 @@ def forward( **kwargs, ) - last_hidden_state = encoder_outputs[0] + last_hidden_state = encoder_outputs.last_hidden_state last_hidden_state = self.post_layernorm(last_hidden_state) pooler_output = self.head(last_hidden_state) if self.use_head else None diff --git a/src/transformers/models/siglip2/modeling_siglip2.py b/src/transformers/models/siglip2/modeling_siglip2.py index ac380bfde644..f8ac70476522 100644 --- a/src/transformers/models/siglip2/modeling_siglip2.py +++ b/src/transformers/models/siglip2/modeling_siglip2.py @@ -691,7 +691,7 @@ def forward( **kwargs, ) - last_hidden_state = encoder_outputs[0] + last_hidden_state = encoder_outputs.last_hidden_state last_hidden_state = self.final_layer_norm(last_hidden_state) # The model uses the last token's hidden state, which may be padding. diff --git a/src/transformers/models/smolvlm/modeling_smolvlm.py b/src/transformers/models/smolvlm/modeling_smolvlm.py index f25019382688..bce7b45441b9 100644 --- a/src/transformers/models/smolvlm/modeling_smolvlm.py +++ b/src/transformers/models/smolvlm/modeling_smolvlm.py @@ -393,12 +393,12 @@ def forward( elif not torch.any(~patch_attention_mask): patch_attention_mask = None - encoder_outputs = self.encoder( + encoder_outputs: BaseModelOutput = self.encoder( inputs_embeds=hidden_states, attention_mask=patch_attention_mask, ) - last_hidden_state = encoder_outputs[0] + last_hidden_state = encoder_outputs.last_hidden_state last_hidden_state = self.post_layernorm(last_hidden_state) return BaseModelOutput( From fe5b52244dbbc9d2a73983e2cdac23b1c63aae7c Mon Sep 17 00:00:00 2001 From: raushan Date: Mon, 1 Sep 2025 17:13:33 +0200 Subject: [PATCH 18/19] fix the test --- .../test_modeling_vision_text_dual_encoder.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/models/vision_text_dual_encoder/test_modeling_vision_text_dual_encoder.py b/tests/models/vision_text_dual_encoder/test_modeling_vision_text_dual_encoder.py index 0ebaae4428da..aef0d98bbdfa 100644 --- a/tests/models/vision_text_dual_encoder/test_modeling_vision_text_dual_encoder.py +++ b/tests/models/vision_text_dual_encoder/test_modeling_vision_text_dual_encoder.py @@ -134,6 +134,9 @@ def check_save_load(self, text_config, input_ids, attention_mask, vision_config, def check_vision_text_output_attention( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): + # The backbones don't support dynamic attention setting, so we manually change it. FIXME; when bert is refactored + vision_config._attn_implementation = "eager" + text_config._attn_implementation = "eager" vision_model, text_model = self.get_vision_text_model(vision_config, text_config) model = VisionTextDualEncoderModel(vision_model=vision_model, text_model=text_model) model.to(torch_device) From a753e86e8cddf782597155cb50efc1c08ecf2c52 Mon Sep 17 00:00:00 2001 From: raushan Date: Thu, 4 Sep 2025 17:06:40 +0200 Subject: [PATCH 19/19] vision model shouldn't need attention, see e.g. CLIP/Siglip --- src/transformers/models/aimv2/modeling_aimv2.py | 3 ++- src/transformers/models/aimv2/modular_aimv2.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/transformers/models/aimv2/modeling_aimv2.py b/src/transformers/models/aimv2/modeling_aimv2.py index d2486fda7632..e20797feead9 100644 --- a/src/transformers/models/aimv2/modeling_aimv2.py +++ b/src/transformers/models/aimv2/modeling_aimv2.py @@ -36,6 +36,7 @@ from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs +from ...utils.deprecation import deprecate_kwarg from ...utils.generic import check_model_inputs from .configuration_aimv2 import Aimv2Config, Aimv2TextConfig, Aimv2VisionConfig @@ -442,6 +443,7 @@ def __init__(self, config: Aimv2VisionConfig): def get_input_embeddings(self) -> nn.Module: return self.embeddings.patch_embed + @deprecate_kwarg("attention_mask", version="v4.58.0") @check_model_inputs @auto_docstring def forward( @@ -474,7 +476,6 @@ def forward( encoder_outputs: BaseModelOutput = self.encoder( inputs_embeds=hidden_states, - attention_mask=attention_mask, **kwargs, ) diff --git a/src/transformers/models/aimv2/modular_aimv2.py b/src/transformers/models/aimv2/modular_aimv2.py index 8365137ddfd8..0bf1cdd346a0 100644 --- a/src/transformers/models/aimv2/modular_aimv2.py +++ b/src/transformers/models/aimv2/modular_aimv2.py @@ -32,6 +32,7 @@ auto_docstring, can_return_tuple, ) +from ...utils.deprecation import deprecate_kwarg from ...utils.generic import check_model_inputs from ..clip.modeling_clip import CLIPModel, CLIPTextEmbeddings, _get_vector_norm from ..llama.modeling_llama import LlamaMLP, LlamaRMSNorm @@ -486,6 +487,7 @@ def __init__(self, config: Aimv2VisionConfig): def get_input_embeddings(self) -> nn.Module: return self.embeddings.patch_embed + @deprecate_kwarg("attention_mask", version="v4.58.0") @check_model_inputs @auto_docstring def forward( @@ -518,7 +520,6 @@ def forward( encoder_outputs: BaseModelOutput = self.encoder( inputs_embeds=hidden_states, - attention_mask=attention_mask, **kwargs, )