Skip to content

Commit

Permalink
Deprecate vocab_size in other two VLMs (#31681)
Browse files Browse the repository at this point in the history
* deprrecate `vocab_size` in other two VLMs

* Update src/transformers/models/fuyu/configuration_fuyu.py

Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com>

* depracate until 4.44

---------

Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com>
  • Loading branch information
zucchini-nlp and amyeroberts committed Jul 9, 2024
1 parent 594c161 commit 952dfd4
Show file tree
Hide file tree
Showing 5 changed files with 56 additions and 7 deletions.
4 changes: 2 additions & 2 deletions src/transformers/modeling_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1942,8 +1942,8 @@ def resize_token_embeddings(
# Update base model and current model config
if hasattr(self.config, "text_config"):
self.config.text_config.vocab_size = model_embeds.weight.shape[0]
# TODO: to be removed after v4.42, config.vocab_size is deprecated for models that have a config.text_config
self.config.vocab_size = model_embeds.weight.shape[0]
else:
self.config.vocab_size = model_embeds.weight.shape[0]
self.vocab_size = model_embeds.weight.shape[0]

# Tie weights again if needed
Expand Down
21 changes: 20 additions & 1 deletion src/transformers/models/fuyu/configuration_fuyu.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
# limitations under the License.
"""Fuyu model configuration"""

import warnings

from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
Expand Down Expand Up @@ -157,7 +159,7 @@ def __init__(
text_model_type = text_config["model_type"] if "model_type" in text_config else "persimmon"
self.text_config = CONFIG_MAPPING[text_model_type](**text_config)

self.vocab_size = vocab_size
self._vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.image_size = image_size
self.patch_size = patch_size
Expand Down Expand Up @@ -206,3 +208,20 @@ def _rope_scaling_validation(self):
)
if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")

@property
def vocab_size(self):
warnings.warn(
"The `vocab_size` attribute is deprecated and will be removed in v4.44, Please use `text_config.vocab_size` instead.",
FutureWarning,
)
return self._vocab_size

@vocab_size.setter
def vocab_size(self, value):
self._vocab_size = value

def to_dict(self):
output = super().to_dict()
output.pop("_vocab_size", None)
return output
11 changes: 10 additions & 1 deletion src/transformers/models/fuyu/modeling_fuyu.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ class FuyuForCausalLM(FuyuPreTrainedModel):
def __init__(self, config: FuyuConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.vocab_size = config.text_config.vocab_size
self.language_model = AutoModelForCausalLM.from_config(
config.text_config, attn_implementation=config._attn_implementation
)
Expand All @@ -168,6 +168,15 @@ def get_input_embeddings(self):
def set_input_embeddings(self, value):
self.language_model.set_input_embeddings(value)

def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, pad_to_multiple_of=None) -> nn.Embedding:
# TODO: config.vocab_size is deprecated and will be removed in v4.43.
# `resize_token_embeddings` should work from `modeling_utils.py``
model_embeds = self.language_model.resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
self.config.text_config.vocab_size = model_embeds.num_embeddings
self.config.vocab_size = model_embeds.num_embeddings
self.vocab_size = model_embeds.num_embeddings
return model_embeds

def gather_continuous_embeddings(
self,
word_embeddings: torch.Tensor,
Expand Down
22 changes: 21 additions & 1 deletion src/transformers/models/paligemma/configuration_paligemma.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@
# limitations under the License.
"""PaliGemmamodel configuration"""

import warnings

from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
Expand Down Expand Up @@ -86,7 +88,7 @@ def __init__(
):
self.ignore_index = ignore_index
self.image_token_index = image_token_index
self.vocab_size = vocab_size
self._vocab_size = vocab_size
self.projection_dim = projection_dim
self.hidden_size = hidden_size
self.vision_config = vision_config
Expand Down Expand Up @@ -124,7 +126,25 @@ def __init__(
num_attention_heads=8,
num_key_value_heads=1,
is_encoder_decoder=False,
vocab_size=vocab_size,
)
self.text_config.num_image_tokens = (self.vision_config.image_size // self.vision_config.patch_size) ** 2
self.vision_config.projection_dim = projection_dim
super().__init__(**kwargs)

@property
def vocab_size(self):
warnings.warn(
"The `vocab_size` attribute is deprecated and will be removed in v4.44, Please use `text_config.vocab_size` instead.",
FutureWarning,
)
return self._vocab_size

@vocab_size.setter
def vocab_size(self, value):
self._vocab_size = value

def to_dict(self):
output = super().to_dict()
output.pop("_vocab_size", None)
return output
5 changes: 3 additions & 2 deletions src/transformers/models/paligemma/modeling_paligemma.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ def __init__(self, config: PaliGemmaConfig):
super().__init__(config)
self.vision_tower = AutoModel.from_config(config=config.vision_config)
self.multi_modal_projector = PaliGemmaMultiModalProjector(config)
self.vocab_size = config.vocab_size
self.vocab_size = config.text_config.vocab_size
self._attn_implementation = config._attn_implementation

language_model = AutoModelForCausalLM.from_config(
Expand Down Expand Up @@ -276,8 +276,9 @@ def tie_weights(self):
return self.language_model.tie_weights()

def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, pad_to_multiple_of=None) -> nn.Embedding:
# TODO: config.vocab_size is deprecated and will be removed in v4.43.
# `resize_token_embeddings` should work from `modeling_utils.py``
model_embeds = self.language_model.resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
# update vocab size
self.config.text_config.vocab_size = model_embeds.num_embeddings
self.config.vocab_size = model_embeds.num_embeddings
self.vocab_size = model_embeds.num_embeddings
Expand Down

0 comments on commit 952dfd4

Please sign in to comment.