From 74337000442d4ecb0e8379da00d4457a164b840b Mon Sep 17 00:00:00 2001 From: Sylvain Gugger Date: Wed, 17 Nov 2021 09:25:59 -0500 Subject: [PATCH 1/6] Add a post init method to all models --- src/transformers/modeling_utils.py | 24 +++++++++---------- .../models/albert/modeling_albert.py | 14 +++++------ src/transformers/models/bart/modeling_bart.py | 10 ++++---- src/transformers/models/beit/modeling_beit.py | 8 +++---- src/transformers/models/bert/modeling_bert.py | 18 +++++++------- .../modeling_bert_generation.py | 4 ++-- .../models/big_bird/modeling_big_bird.py | 16 ++++++------- .../modeling_bigbird_pegasus.py | 10 ++++---- .../models/blenderbot/modeling_blenderbot.py | 10 ++++---- .../modeling_blenderbot_small.py | 10 ++++---- .../models/canine/modeling_canine.py | 10 ++++---- src/transformers/models/clip/modeling_clip.py | 6 ++--- .../models/convbert/modeling_convbert.py | 12 +++++----- src/transformers/models/ctrl/modeling_ctrl.py | 6 ++--- .../models/deberta/modeling_deberta.py | 10 ++++---- .../models/deberta_v2/modeling_deberta_v2.py | 10 ++++---- src/transformers/models/deit/modeling_deit.py | 6 ++--- src/transformers/models/detr/modeling_detr.py | 10 ++++---- .../models/distilbert/modeling_distilbert.py | 12 +++++----- src/transformers/models/dpr/modeling_dpr.py | 10 ++++---- .../models/electra/modeling_electra.py | 14 +++++------ .../models/flaubert/modeling_flaubert.py | 12 +++++----- src/transformers/models/fnet/modeling_fnet.py | 16 ++++++------- src/transformers/models/fsmt/modeling_fsmt.py | 2 +- .../models/funnel/modeling_funnel.py | 16 ++++++------- src/transformers/models/gpt2/modeling_gpt2.py | 10 ++++---- .../models/gpt_neo/modeling_gpt_neo.py | 6 ++--- src/transformers/models/gptj/modeling_gptj.py | 6 ++--- .../models/hubert/modeling_hubert.py | 6 ++--- .../models/ibert/modeling_ibert.py | 12 +++++----- .../models/layoutlm/modeling_layoutlm.py | 8 +++---- .../models/layoutlmv2/modeling_layoutlmv2.py | 8 +++---- src/transformers/models/led/modeling_led.py | 8 +++---- .../models/longformer/modeling_longformer.py | 12 +++++----- src/transformers/models/luke/modeling_luke.py | 8 +++---- .../models/lxmert/modeling_lxmert.py | 6 ++--- .../models/m2m_100/modeling_m2m_100.py | 8 +++---- .../models/marian/modeling_marian.py | 10 ++++---- .../models/mbart/modeling_mbart.py | 10 ++++---- .../megatron_bert/modeling_megatron_bert.py | 18 +++++++------- .../models/mobilebert/modeling_mobilebert.py | 16 ++++++------- .../models/mpnet/modeling_mpnet.py | 12 +++++----- .../models/openai/modeling_openai.py | 8 +++---- .../models/pegasus/modeling_pegasus.py | 10 ++++---- .../models/prophetnet/modeling_prophetnet.py | 10 ++++---- .../models/reformer/modeling_reformer.py | 10 ++++---- .../models/rembert/modeling_rembert.py | 14 +++++------ .../models/retribert/modeling_retribert.py | 2 +- .../models/roberta/modeling_roberta.py | 14 +++++------ .../models/roformer/modeling_roformer.py | 14 +++++------ .../models/segformer/modeling_segformer.py | 6 ++--- src/transformers/models/sew/modeling_sew.py | 6 ++--- .../models/sew_d/modeling_sew_d.py | 6 ++--- .../speech_to_text/modeling_speech_to_text.py | 8 +++---- .../modeling_speech_to_text_2.py | 4 ++-- .../models/splinter/modeling_splinter.py | 4 ++-- .../squeezebert/modeling_squeezebert.py | 12 +++++----- src/transformers/models/t5/modeling_t5.py | 8 +++---- .../models/tapas/modeling_tapas.py | 8 +++---- .../models/transfo_xl/modeling_transfo_xl.py | 6 ++--- .../models/trocr/modeling_trocr.py | 4 ++-- .../models/unispeech/modeling_unispeech.py | 8 +++---- .../unispeech_sat/modeling_unispeech_sat.py | 8 +++---- .../visual_bert/modeling_visual_bert.py | 12 +++++----- src/transformers/models/vit/modeling_vit.py | 4 ++-- .../models/wav2vec2/modeling_wav2vec2.py | 10 ++++---- src/transformers/models/xlm/modeling_xlm.py | 14 +++++------ .../models/xlnet/modeling_xlnet.py | 14 +++++------ ...ng_{{cookiecutter.lowercase_modelname}}.py | 24 +++++++++---------- tests/test_modeling_common.py | 8 ------- 70 files changed, 343 insertions(+), 353 deletions(-) diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index f45c11087f6a..c0e04f7b446c 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -412,17 +412,6 @@ def floating_point_ops( return 6 * self.estimate_tokens(input_dict) * self.num_parameters(exclude_embeddings=exclude_embeddings) -def gradient_checkpointing_hook(module, _): - # Hook to enable backward compatibility for gradient checkpointing. Will be removed once all models have a - # proper post_init method. - if getattr(module.config, "gradient_checkpointing", False): - module.gradient_checkpointing_enable() - # Remove the attribute now that is has been consumed, so it's no saved in the config. - delattr(module.config, "gradient_checkpointing") - # The hook will remove itself after the first execution - module._gradient_checkpointing_hook.remove() - - class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMixin): r""" Base class for all models. @@ -490,8 +479,17 @@ def __init__(self, config: PretrainedConfig, *inputs, **kwargs): # Save config and origin of the pretrained weights if given in model self.config = config self.name_or_path = config.name_or_path - if self.supports_gradient_checkpointing: - self._gradient_checkpointing_hook = self.register_forward_pre_hook(gradient_checkpointing_hook) + + def post_init(self): + """ + A method executed at the end of each Transformer model initialization, to execute code that needs the model's + modules properly initialized (such as weight initialization). + """ + self.init_weights() + if self.supports_gradient_checkpointing and getattr(self.config, "gradient_checkpointing", False): + self.gradient_checkpointing_enable() + # Remove the attribute now that is has been consumed, so it's no saved in the config. + delattr(self.config, "gradient_checkpointing") @classmethod def _from_config(cls, config, **kwargs): diff --git a/src/transformers/models/albert/modeling_albert.py b/src/transformers/models/albert/modeling_albert.py index 442242ad43cc..8315a7c0b98c 100755 --- a/src/transformers/models/albert/modeling_albert.py +++ b/src/transformers/models/albert/modeling_albert.py @@ -638,7 +638,7 @@ def __init__(self, config, add_pooling_layer=True): self.pooler = None self.pooler_activation = None - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -757,7 +757,7 @@ def __init__(self, config): self.predictions = AlbertMLMHead(config) self.sop_classifier = AlbertSOPHead(config) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.predictions.decoder @@ -903,7 +903,7 @@ def __init__(self, config): self.albert = AlbertModel(config, add_pooling_layer=False) self.predictions = AlbertMLMHead(config) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.predictions.decoder @@ -991,7 +991,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1097,7 +1097,7 @@ def __init__(self, config): self.dropout = nn.Dropout(classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1187,7 +1187,7 @@ def __init__(self, config): self.albert = AlbertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1286,7 +1286,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/bart/modeling_bart.py b/src/transformers/models/bart/modeling_bart.py index f479a9069b0c..ca13d407eadf 100755 --- a/src/transformers/models/bart/modeling_bart.py +++ b/src/transformers/models/bart/modeling_bart.py @@ -699,7 +699,7 @@ def __init__(self, config: BartConfig, embed_tokens: Optional[nn.Embedding] = No self.layers = nn.ModuleList([BartEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layernorm_embedding = nn.LayerNorm(embed_dim) - self.init_weights() + self.post_init() self.gradient_checkpointing = False def get_input_embeddings(self): @@ -870,7 +870,7 @@ def __init__(self, config: BartConfig, embed_tokens: Optional[nn.Embedding] = No self.layers = nn.ModuleList([BartDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) - self.init_weights() + self.post_init() self.gradient_checkpointing = False def get_input_embeddings(self): @@ -1130,7 +1130,7 @@ def __init__(self, config: BartConfig): self.encoder = BartEncoder(config, self.shared) self.decoder = BartDecoder(config, self.shared) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.shared @@ -1248,7 +1248,7 @@ def __init__(self, config: BartConfig): self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) - self.init_weights() + self.post_init() def get_encoder(self): return self.model.get_encoder() @@ -1666,7 +1666,7 @@ def __init__(self, config): self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens diff --git a/src/transformers/models/beit/modeling_beit.py b/src/transformers/models/beit/modeling_beit.py index a5cca41b0c0d..4b9cb255939d 100755 --- a/src/transformers/models/beit/modeling_beit.py +++ b/src/transformers/models/beit/modeling_beit.py @@ -598,7 +598,7 @@ def __init__(self, config, add_pooling_layer=True): ) self.pooler = BeitPooler(config) if add_pooling_layer else None - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings @@ -715,7 +715,7 @@ def __init__(self, config): self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) @@ -805,7 +805,7 @@ def __init__(self, config): # Classifier head self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) @@ -1121,7 +1121,7 @@ def __init__(self, config): self.decode_head = BeitUperHead(config) self.auxiliary_head = BeitFCNHead(config) if config.use_auxiliary_head else None - self.init_weights() + self.post_init() def compute_loss(self, logits, auxiliary_logits, labels): # upsample logits to the images' original size diff --git a/src/transformers/models/bert/modeling_bert.py b/src/transformers/models/bert/modeling_bert.py index a62e653b018e..bb708c2ae98e 100755 --- a/src/transformers/models/bert/modeling_bert.py +++ b/src/transformers/models/bert/modeling_bert.py @@ -870,7 +870,7 @@ def __init__(self, config, add_pooling_layer=True): self.pooler = BertPooler(config) if add_pooling_layer else None - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -1037,7 +1037,7 @@ def __init__(self, config): self.bert = BertModel(config) self.cls = BertPreTrainingHeads(config) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1145,7 +1145,7 @@ def __init__(self, config): self.bert = BertModel(config, add_pooling_layer=False) self.cls = BertOnlyMLMHead(config) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1294,7 +1294,7 @@ def __init__(self, config): self.bert = BertModel(config, add_pooling_layer=False) self.cls = BertOnlyMLMHead(config) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1394,7 +1394,7 @@ def __init__(self, config): self.bert = BertModel(config) self.cls = BertOnlyNSPHead(config) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC) @@ -1501,7 +1501,7 @@ def __init__(self, config): self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1600,7 +1600,7 @@ def __init__(self, config): self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( @@ -1698,7 +1698,7 @@ def __init__(self, config): self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1788,7 +1788,7 @@ def __init__(self, config): self.bert = BertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/bert_generation/modeling_bert_generation.py b/src/transformers/models/bert_generation/modeling_bert_generation.py index ad0d5ba8b76e..fc655ab905a6 100755 --- a/src/transformers/models/bert_generation/modeling_bert_generation.py +++ b/src/transformers/models/bert_generation/modeling_bert_generation.py @@ -282,7 +282,7 @@ def __init__(self, config): self.embeddings = BertGenerationEmbeddings(config) self.encoder = BertEncoder(config) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -456,7 +456,7 @@ def __init__(self, config): self.bert = BertGenerationEncoder(config) self.lm_head = BertGenerationOnlyLMHead(config) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.lm_head.decoder diff --git a/src/transformers/models/big_bird/modeling_big_bird.py b/src/transformers/models/big_bird/modeling_big_bird.py index ae415a7a79ab..fccef20dba51 100755 --- a/src/transformers/models/big_bird/modeling_big_bird.py +++ b/src/transformers/models/big_bird/modeling_big_bird.py @@ -1953,7 +1953,7 @@ def __init__(self, config, add_pooling_layer=True): ) self.set_attention_type("original_full") - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -2262,7 +2262,7 @@ def __init__(self, config): self.bert = BigBirdModel(config, add_pooling_layer=True) self.cls = BigBirdPreTrainingHeads(config) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -2370,7 +2370,7 @@ def __init__(self, config): self.bert = BigBirdModel(config) self.cls = BigBirdOnlyMLMHead(config) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -2472,7 +2472,7 @@ def __init__(self, config): self.bert = BigBirdModel(config) self.cls = BigBirdOnlyMLMHead(config) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -2642,7 +2642,7 @@ def __init__(self, config): self.bert = BigBirdModel(config) self.classifier = BigBirdClassificationHead(config) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -2737,7 +2737,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward( BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") @@ -2834,7 +2834,7 @@ def __init__(self, config): self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -2942,7 +2942,7 @@ def __init__(self, config, add_pooling_layer=False): self.bert = BigBirdModel(config, add_pooling_layer=add_pooling_layer) self.qa_classifier = BigBirdForQuestionAnsweringHead(config) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py b/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py index e22621c3d767..e616618cdaac 100755 --- a/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py +++ b/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py @@ -1775,7 +1775,7 @@ def __init__(self, config: BigBirdPegasusConfig, embed_tokens: Optional[nn.Embed self.layers = nn.ModuleList([BigBirdPegasusEncoderLayer(config, seed=i) for i in range(config.encoder_layers)]) self.layernorm_embedding = nn.LayerNorm(embed_dim) - self.init_weights() + self.post_init() self.gradient_checkpointing = False def forward( @@ -2066,7 +2066,7 @@ def __init__(self, config: BigBirdPegasusConfig, embed_tokens: Optional[nn.Embed self.layers = nn.ModuleList([BigBirdPegasusDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) - self.init_weights() + self.post_init() self.gradient_checkpointing = False def get_input_embeddings(self): @@ -2327,7 +2327,7 @@ def __init__(self, config: BigBirdPegasusConfig): self.encoder = BigBirdPegasusEncoder(config, self.shared) self.decoder = BigBirdPegasusDecoder(config, self.shared) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.shared @@ -2447,7 +2447,7 @@ def __init__(self, config: BigBirdPegasusConfig): self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) - self.init_weights() + self.post_init() def get_encoder(self): return self.model.get_encoder() @@ -2869,7 +2869,7 @@ def __init__(self, config): self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens diff --git a/src/transformers/models/blenderbot/modeling_blenderbot.py b/src/transformers/models/blenderbot/modeling_blenderbot.py index 1911cd9e9540..b177eb149408 100755 --- a/src/transformers/models/blenderbot/modeling_blenderbot.py +++ b/src/transformers/models/blenderbot/modeling_blenderbot.py @@ -656,7 +656,7 @@ def __init__(self, config: BlenderbotConfig, embed_tokens: Optional[nn.Embedding self.layers = nn.ModuleList([BlenderbotEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) - self.init_weights() + self.post_init() self.gradient_checkpointing = False def forward( @@ -821,7 +821,7 @@ def __init__(self, config: BlenderbotConfig, embed_tokens: Optional[nn.Embedding self.layers = nn.ModuleList([BlenderbotDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) - self.init_weights() + self.post_init() self.gradient_checkpointing = False def get_input_embeddings(self): @@ -1083,7 +1083,7 @@ def __init__(self, config: BlenderbotConfig): self.encoder = BlenderbotEncoder(config, self.shared) self.decoder = BlenderbotDecoder(config, self.shared) - self.init_weights() + self.post_init() @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs): @@ -1220,7 +1220,7 @@ def __init__(self, config: BlenderbotConfig): self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) - self.init_weights() + self.post_init() @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs): @@ -1404,7 +1404,7 @@ def __init__(self, config): self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens diff --git a/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py b/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py index 26dd44d9f068..621129bf2597 100755 --- a/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py +++ b/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py @@ -657,7 +657,7 @@ def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[nn.Embe self.layers = nn.ModuleList([BlenderbotSmallEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layernorm_embedding = nn.LayerNorm(embed_dim) - self.init_weights() + self.post_init() self.gradient_checkpointing = False def forward( @@ -821,7 +821,7 @@ def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[nn.Embe self.layers = nn.ModuleList([BlenderbotSmallDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) - self.init_weights() + self.post_init() self.gradient_checkpointing = False def get_input_embeddings(self): @@ -1081,7 +1081,7 @@ def __init__(self, config: BlenderbotSmallConfig): self.encoder = BlenderbotSmallEncoder(config, self.shared) self.decoder = BlenderbotSmallDecoder(config, self.shared) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.shared @@ -1208,7 +1208,7 @@ def __init__(self, config: BlenderbotSmallConfig): self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) - self.init_weights() + self.post_init() def get_encoder(self): return self.model.get_encoder() @@ -1379,7 +1379,7 @@ def __init__(self, config): self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens diff --git a/src/transformers/models/canine/modeling_canine.py b/src/transformers/models/canine/modeling_canine.py index b461a6c0452f..ee033d1a91ba 100644 --- a/src/transformers/models/canine/modeling_canine.py +++ b/src/transformers/models/canine/modeling_canine.py @@ -1015,7 +1015,7 @@ def __init__(self, config, add_pooling_layer=True): self.pooler = CaninePooler(config) if add_pooling_layer else None - self.init_weights() + self.post_init() def _prune_heads(self, heads_to_prune): """ @@ -1273,7 +1273,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1369,7 +1369,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( @@ -1461,7 +1461,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1548,7 +1548,7 @@ def __init__(self, config): self.canine = CanineModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/clip/modeling_clip.py b/src/transformers/models/clip/modeling_clip.py index dfd8596fd19f..abdb7a3bc33f 100755 --- a/src/transformers/models/clip/modeling_clip.py +++ b/src/transformers/models/clip/modeling_clip.py @@ -683,7 +683,7 @@ class CLIPTextModel(CLIPPreTrainedModel): def __init__(self, config: CLIPTextConfig): super().__init__(config) self.text_model = CLIPTextTransformer(config) - self.init_weights() + self.post_init() def get_input_embeddings(self) -> nn.Module: return self.text_model.embeddings.token_embedding @@ -792,7 +792,7 @@ class CLIPVisionModel(CLIPPreTrainedModel): def __init__(self, config: CLIPVisionConfig): super().__init__(config) self.vision_model = CLIPVisionTransformer(config) - self.init_weights() + self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding @@ -866,7 +866,7 @@ def __init__(self, config: CLIPConfig): self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) self.logit_scale = nn.Parameter(torch.ones([]) * self.config.logit_scale_init_value) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING) def get_text_features( diff --git a/src/transformers/models/convbert/modeling_convbert.py b/src/transformers/models/convbert/modeling_convbert.py index 2d4b0c57ca2f..3eff90b80930 100755 --- a/src/transformers/models/convbert/modeling_convbert.py +++ b/src/transformers/models/convbert/modeling_convbert.py @@ -775,7 +775,7 @@ def __init__(self, config): self.encoder = ConvBertEncoder(config) self.config = config - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -886,7 +886,7 @@ def __init__(self, config): self.generator_predictions = ConvBertGeneratorPredictions(config) self.generator_lm_head = nn.Linear(config.embedding_size, config.vocab_size) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.generator_lm_head @@ -995,7 +995,7 @@ def __init__(self, config): self.convbert = ConvBertModel(config) self.classifier = ConvBertClassificationHead(config) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1090,7 +1090,7 @@ def __init__(self, config): self.sequence_summary = SequenceSummary(config) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward( CONVBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") @@ -1187,7 +1187,7 @@ def __init__(self, config): self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1274,7 +1274,7 @@ def __init__(self, config): self.convbert = ConvBertModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/ctrl/modeling_ctrl.py b/src/transformers/models/ctrl/modeling_ctrl.py index 9c06e2026919..301692340b08 100644 --- a/src/transformers/models/ctrl/modeling_ctrl.py +++ b/src/transformers/models/ctrl/modeling_ctrl.py @@ -338,7 +338,7 @@ def __init__(self, config): ) self.layernorm = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.w @@ -499,7 +499,7 @@ def __init__(self, config): self.transformer = CTRLModel(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=True) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.lm_head @@ -615,7 +615,7 @@ def __init__(self, config): self.transformer = CTRLModel(config) self.classifier = nn.Linear(config.n_embd, self.num_labels, bias=False) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING) @add_code_sample_docstrings( diff --git a/src/transformers/models/deberta/modeling_deberta.py b/src/transformers/models/deberta/modeling_deberta.py index 4a54ab634017..20fef37d1493 100644 --- a/src/transformers/models/deberta/modeling_deberta.py +++ b/src/transformers/models/deberta/modeling_deberta.py @@ -888,7 +888,7 @@ def __init__(self, config): self.encoder = DebertaEncoder(config) self.z_steps = 0 self.config = config - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -1001,7 +1001,7 @@ def __init__(self, config): self.deberta = DebertaModel(config) self.cls = DebertaOnlyMLMHead(config) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1141,7 +1141,7 @@ def __init__(self, config): drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out self.dropout = StableDropout(drop_out) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.deberta.get_input_embeddings() @@ -1254,7 +1254,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1338,7 +1338,7 @@ def __init__(self, config): self.deberta = DebertaModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/deberta_v2/modeling_deberta_v2.py b/src/transformers/models/deberta_v2/modeling_deberta_v2.py index b1ec6bd011af..091b29171879 100644 --- a/src/transformers/models/deberta_v2/modeling_deberta_v2.py +++ b/src/transformers/models/deberta_v2/modeling_deberta_v2.py @@ -996,7 +996,7 @@ def __init__(self, config): self.encoder = DebertaV2Encoder(config) self.z_steps = 0 self.config = config - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -1110,7 +1110,7 @@ def __init__(self, config): self.deberta = DebertaV2Model(config) self.cls = DebertaV2OnlyMLMHead(config) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1251,7 +1251,7 @@ def __init__(self, config): drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out self.dropout = StableDropout(drop_out) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.deberta.get_input_embeddings() @@ -1365,7 +1365,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1450,7 +1450,7 @@ def __init__(self, config): self.deberta = DebertaV2Model(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/deit/modeling_deit.py b/src/transformers/models/deit/modeling_deit.py index 4a44d67fe999..b0f5593f6db0 100644 --- a/src/transformers/models/deit/modeling_deit.py +++ b/src/transformers/models/deit/modeling_deit.py @@ -458,7 +458,7 @@ def __init__(self, config, add_pooling_layer=True): self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pooler = DeiTPooler(config) if add_pooling_layer else None - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings @@ -574,7 +574,7 @@ def __init__(self, config): # Classifier head self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) @@ -711,7 +711,7 @@ def __init__(self, config): nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() ) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DeiTForImageClassificationWithTeacherOutput, config_class=_CONFIG_FOR_DOC) diff --git a/src/transformers/models/detr/modeling_detr.py b/src/transformers/models/detr/modeling_detr.py index 70287626b272..94e5b74e04a2 100644 --- a/src/transformers/models/detr/modeling_detr.py +++ b/src/transformers/models/detr/modeling_detr.py @@ -894,7 +894,7 @@ def __init__(self, config: DetrConfig): # in the original DETR, no layernorm is used at the end of the encoder, as "normalize_before" is set to False by default - self.init_weights() + self.post_init() def forward( self, @@ -1001,7 +1001,7 @@ def __init__(self, config: DetrConfig): # in DETR, the decoder uses layernorm after the last decoder layer output self.layernorm = nn.LayerNorm(config.d_model) - self.init_weights() + self.post_init() self.gradient_checkpointing = False def forward( @@ -1179,7 +1179,7 @@ def __init__(self, config: DetrConfig): self.encoder = DetrEncoder(config) self.decoder = DetrDecoder(config) - self.init_weights() + self.post_init() def get_encoder(self): return self.encoder @@ -1333,7 +1333,7 @@ def __init__(self, config: DetrConfig): input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3 ) - self.init_weights() + self.post_init() # taken from https://github.com/facebookresearch/detr/blob/master/models/detr.py @torch.jit.unused @@ -1494,7 +1494,7 @@ def __init__(self, config: DetrConfig): hidden_size, hidden_size, number_of_heads, dropout=0.0, std=config.init_xavier_std ) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(DETR_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DetrSegmentationOutput, config_class=_CONFIG_FOR_DOC) diff --git a/src/transformers/models/distilbert/modeling_distilbert.py b/src/transformers/models/distilbert/modeling_distilbert.py index b68b7c524e82..ae45d6949649 100755 --- a/src/transformers/models/distilbert/modeling_distilbert.py +++ b/src/transformers/models/distilbert/modeling_distilbert.py @@ -441,7 +441,7 @@ def __init__(self, config): self.embeddings = Embeddings(config) # Embeddings self.transformer = Transformer(config) # Encoder - self.init_weights() + self.post_init() def get_position_embeddings(self) -> nn.Embedding: """ @@ -571,7 +571,7 @@ def __init__(self, config): self.vocab_layer_norm = nn.LayerNorm(config.dim, eps=1e-12) self.vocab_projector = nn.Linear(config.dim, config.vocab_size) - self.init_weights() + self.post_init() self.mlm_loss_fct = nn.CrossEntropyLoss() @@ -677,7 +677,7 @@ def __init__(self, config): self.classifier = nn.Linear(config.dim, config.num_labels) self.dropout = nn.Dropout(config.seq_classif_dropout) - self.init_weights() + self.post_init() def get_position_embeddings(self) -> nn.Embedding: """ @@ -793,7 +793,7 @@ def __init__(self, config): assert config.num_labels == 2 self.dropout = nn.Dropout(config.qa_dropout) - self.init_weights() + self.post_init() def get_position_embeddings(self) -> nn.Embedding: """ @@ -910,7 +910,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() def get_position_embeddings(self) -> nn.Embedding: """ @@ -1015,7 +1015,7 @@ def __init__(self, config): self.classifier = nn.Linear(config.dim, 1) self.dropout = nn.Dropout(config.seq_classif_dropout) - self.init_weights() + self.post_init() def get_position_embeddings(self) -> nn.Embedding: """ diff --git a/src/transformers/models/dpr/modeling_dpr.py b/src/transformers/models/dpr/modeling_dpr.py index 091479af4b3c..2312c0f72a1e 100644 --- a/src/transformers/models/dpr/modeling_dpr.py +++ b/src/transformers/models/dpr/modeling_dpr.py @@ -180,7 +180,7 @@ def __init__(self, config: DPRConfig): self.projection_dim = config.projection_dim if self.projection_dim > 0: self.encode_proj = nn.Linear(self.bert_model.config.hidden_size, config.projection_dim) - self.init_weights() + self.post_init() def forward( self, @@ -232,7 +232,7 @@ def __init__(self, config: DPRConfig): self.encoder = DPREncoder(config) self.qa_outputs = nn.Linear(self.encoder.embeddings_size, 2) self.qa_classifier = nn.Linear(self.encoder.embeddings_size, 1) - self.init_weights() + self.post_init() def forward( self, @@ -447,7 +447,7 @@ def __init__(self, config: DPRConfig): super().__init__(config) self.config = config self.ctx_encoder = DPREncoder(config) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(DPR_ENCODERS_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DPRContextEncoderOutput, config_class=_CONFIG_FOR_DOC) @@ -525,7 +525,7 @@ def __init__(self, config: DPRConfig): super().__init__(config) self.config = config self.question_encoder = DPREncoder(config) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(DPR_ENCODERS_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DPRQuestionEncoderOutput, config_class=_CONFIG_FOR_DOC) @@ -602,7 +602,7 @@ def __init__(self, config: DPRConfig): super().__init__(config) self.config = config self.span_predictor = DPRSpanPredictor(config) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(DPR_READER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DPRReaderOutput, config_class=_CONFIG_FOR_DOC) diff --git a/src/transformers/models/electra/modeling_electra.py b/src/transformers/models/electra/modeling_electra.py index eb26bcfdd95b..cd46ae8c5000 100644 --- a/src/transformers/models/electra/modeling_electra.py +++ b/src/transformers/models/electra/modeling_electra.py @@ -817,7 +817,7 @@ def __init__(self, config): self.encoder = ElectraEncoder(config) self.config = config - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -939,7 +939,7 @@ def __init__(self, config): self.electra = ElectraModel(config) self.classifier = ElectraClassificationHead(config) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1033,7 +1033,7 @@ def __init__(self, config): self.electra = ElectraModel(config) self.discriminator_predictions = ElectraDiscriminatorPredictions(config) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=ElectraForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) @@ -1128,7 +1128,7 @@ def __init__(self, config): self.generator_predictions = ElectraGeneratorPredictions(config) self.generator_lm_head = nn.Linear(config.embedding_size, config.vocab_size) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.generator_lm_head @@ -1216,7 +1216,7 @@ def __init__(self, config): ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1305,7 +1305,7 @@ def __init__(self, config): self.electra = ElectraModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1406,7 +1406,7 @@ def __init__(self, config): self.sequence_summary = SequenceSummary(config) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/flaubert/modeling_flaubert.py b/src/transformers/models/flaubert/modeling_flaubert.py index f0f14caa3931..2696b2a0e729 100644 --- a/src/transformers/models/flaubert/modeling_flaubert.py +++ b/src/transformers/models/flaubert/modeling_flaubert.py @@ -336,7 +336,7 @@ class FlaubertWithLMHeadModel(XLMWithLMHeadModel): def __init__(self, config): super().__init__(config) self.transformer = FlaubertModel(config) - self.init_weights() + self.post_init() @add_start_docstrings( @@ -357,7 +357,7 @@ class FlaubertForSequenceClassification(XLMForSequenceClassification): def __init__(self, config): super().__init__(config) self.transformer = FlaubertModel(config) - self.init_weights() + self.post_init() @add_start_docstrings( @@ -378,7 +378,7 @@ class FlaubertForTokenClassification(XLMForTokenClassification): def __init__(self, config): super().__init__(config) self.transformer = FlaubertModel(config) - self.init_weights() + self.post_init() @add_start_docstrings( @@ -399,7 +399,7 @@ class FlaubertForQuestionAnsweringSimple(XLMForQuestionAnsweringSimple): def __init__(self, config): super().__init__(config) self.transformer = FlaubertModel(config) - self.init_weights() + self.post_init() @add_start_docstrings( @@ -420,7 +420,7 @@ class FlaubertForQuestionAnswering(XLMForQuestionAnswering): def __init__(self, config): super().__init__(config) self.transformer = FlaubertModel(config) - self.init_weights() + self.post_init() @add_start_docstrings( @@ -441,4 +441,4 @@ class FlaubertForMultipleChoice(XLMForMultipleChoice): def __init__(self, config): super().__init__(config) self.transformer = FlaubertModel(config) - self.init_weights() + self.post_init() diff --git a/src/transformers/models/fnet/modeling_fnet.py b/src/transformers/models/fnet/modeling_fnet.py index 16ae695b342f..d933c0791f4b 100755 --- a/src/transformers/models/fnet/modeling_fnet.py +++ b/src/transformers/models/fnet/modeling_fnet.py @@ -535,7 +535,7 @@ def __init__(self, config, add_pooling_layer=True): self.pooler = FNetPooler(config) if add_pooling_layer else None - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -633,7 +633,7 @@ def __init__(self, config): self.fnet = FNetModel(config) self.cls = FNetPreTrainingHeads(config) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -723,7 +723,7 @@ def __init__(self, config): self.fnet = FNetModel(config) self.cls = FNetOnlyMLMHead(config) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -791,7 +791,7 @@ def __init__(self, config): self.fnet = FNetModel(config) self.cls = FNetOnlyNSPHead(config) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC) @@ -885,7 +885,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -969,7 +969,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( @@ -1050,7 +1050,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1119,7 +1119,7 @@ def __init__(self, config): self.fnet = FNetModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/fsmt/modeling_fsmt.py b/src/transformers/models/fsmt/modeling_fsmt.py index 9ddcd1453bbb..b7a7337bbfd2 100644 --- a/src/transformers/models/fsmt/modeling_fsmt.py +++ b/src/transformers/models/fsmt/modeling_fsmt.py @@ -1003,7 +1003,7 @@ def __init__(self, config: FSMTConfig): self.encoder = FSMTEncoder(config, encoder_embed_tokens) self.decoder = FSMTDecoder(config, decoder_embed_tokens) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(FSMT_INPUTS_DOCSTRING) @add_code_sample_docstrings( diff --git a/src/transformers/models/funnel/modeling_funnel.py b/src/transformers/models/funnel/modeling_funnel.py index 7ce2e3221c45..f10c04e8fe4c 100644 --- a/src/transformers/models/funnel/modeling_funnel.py +++ b/src/transformers/models/funnel/modeling_funnel.py @@ -900,7 +900,7 @@ def __init__(self, config): self.embeddings = FunnelEmbeddings(config) self.encoder = FunnelEncoder(config) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -977,7 +977,7 @@ def __init__(self, config): self.encoder = FunnelEncoder(config) self.decoder = FunnelDecoder(config) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -1082,7 +1082,7 @@ def __init__(self, config): self.funnel = FunnelModel(config) self.discriminator_predictions = FunnelDiscriminatorPredictions(config) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=FunnelForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) @@ -1164,7 +1164,7 @@ def __init__(self, config): self.funnel = FunnelModel(config) self.lm_head = nn.Linear(config.d_model, config.vocab_size) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.lm_head @@ -1244,7 +1244,7 @@ def __init__(self, config): self.funnel = FunnelBaseModel(config) self.classifier = FunnelClassificationHead(config, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1334,7 +1334,7 @@ def __init__(self, config): self.funnel = FunnelBaseModel(config) self.classifier = FunnelClassificationHead(config, 1) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( @@ -1420,7 +1420,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1502,7 +1502,7 @@ def __init__(self, config): self.funnel = FunnelModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/gpt2/modeling_gpt2.py b/src/transformers/models/gpt2/modeling_gpt2.py index d8f09d3e7282..08ea72378607 100644 --- a/src/transformers/models/gpt2/modeling_gpt2.py +++ b/src/transformers/models/gpt2/modeling_gpt2.py @@ -677,7 +677,7 @@ def __init__(self, config): self.h = nn.ModuleList([GPT2Block(config, layer_idx=i) for i in range(config.num_hidden_layers)]) self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) - self.init_weights() + self.post_init() # Model parallel self.model_parallel = False @@ -947,7 +947,7 @@ def __init__(self, config): self.transformer = GPT2Model(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) - self.init_weights() + self.post_init() # Model parallel self.model_parallel = False @@ -1117,7 +1117,7 @@ def __init__(self, config): self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) self.multiple_choice_head = SequenceSummary(config) - self.init_weights() + self.post_init() # Model parallel self.model_parallel = False @@ -1330,7 +1330,7 @@ def __init__(self, config): self.transformer = GPT2Model(config) self.score = nn.Linear(config.n_embd, self.num_labels, bias=False) - self.init_weights() + self.post_init() # Model parallel self.model_parallel = False @@ -1461,7 +1461,7 @@ def __init__(self, config): self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() # Model parallel self.model_parallel = False diff --git a/src/transformers/models/gpt_neo/modeling_gpt_neo.py b/src/transformers/models/gpt_neo/modeling_gpt_neo.py index 1be7de2f2cfb..ac0b03278af4 100755 --- a/src/transformers/models/gpt_neo/modeling_gpt_neo.py +++ b/src/transformers/models/gpt_neo/modeling_gpt_neo.py @@ -486,7 +486,7 @@ def __init__(self, config): self.h = nn.ModuleList([GPTNeoBlock(config, layer_id=i) for i in range(config.num_layers)]) self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) - self.init_weights() + self.post_init() self.gradient_checkpointing = False def get_input_embeddings(self): @@ -675,7 +675,7 @@ def __init__(self, config): self.transformer = GPTNeoModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.lm_head @@ -823,7 +823,7 @@ def __init__(self, config): self.transformer = GPTNeoModel(config) self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING) @add_code_sample_docstrings( diff --git a/src/transformers/models/gptj/modeling_gptj.py b/src/transformers/models/gptj/modeling_gptj.py index 7c01fea81d3e..d5630b6922d5 100755 --- a/src/transformers/models/gptj/modeling_gptj.py +++ b/src/transformers/models/gptj/modeling_gptj.py @@ -444,7 +444,7 @@ def __init__(self, config): self.drop = nn.Dropout(config.embd_pdrop) self.h = nn.ModuleList([GPTJBlock(config) for _ in range(config.n_layer)]) self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) - self.init_weights() + self.post_init() # Model parallel self.model_parallel = False @@ -680,7 +680,7 @@ def __init__(self, config): super().__init__(config) self.transformer = GPTJModel(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size) - self.init_weights() + self.post_init() # Model parallel self.model_parallel = False @@ -855,7 +855,7 @@ def __init__(self, config): self.transformer = GPTJModel(config) self.score = nn.Linear(config.n_embd, self.num_labels, bias=False) - self.init_weights() + self.post_init() # Model parallel self.model_parallel = False diff --git a/src/transformers/models/hubert/modeling_hubert.py b/src/transformers/models/hubert/modeling_hubert.py index 07e61ec18100..aca5133e5235 100755 --- a/src/transformers/models/hubert/modeling_hubert.py +++ b/src/transformers/models/hubert/modeling_hubert.py @@ -899,7 +899,7 @@ def __init__(self, config: HubertConfig): else: self.encoder = HubertEncoder(config) - self.init_weights() + self.post_init() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states def _mask_hidden_states( @@ -1039,7 +1039,7 @@ def __init__(self, config): ) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size) - self.init_weights() + self.post_init() def freeze_feature_extractor(self): """ @@ -1147,7 +1147,7 @@ def __init__(self, config): self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) - self.init_weights() + self.post_init() def freeze_feature_extractor(self): """ diff --git a/src/transformers/models/ibert/modeling_ibert.py b/src/transformers/models/ibert/modeling_ibert.py index 8173ce1be800..ac5b33e440dc 100644 --- a/src/transformers/models/ibert/modeling_ibert.py +++ b/src/transformers/models/ibert/modeling_ibert.py @@ -754,7 +754,7 @@ def __init__(self, config, add_pooling_layer=True): self.pooler = IBertPooler(config) if add_pooling_layer else None - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -865,7 +865,7 @@ def __init__(self, config): self.ibert = IBertModel(config, add_pooling_layer=False) self.lm_head = IBertLMHead(config) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.lm_head.decoder @@ -979,7 +979,7 @@ def __init__(self, config): self.ibert = IBertModel(config, add_pooling_layer=False) self.classifier = IBertClassificationHead(config) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1074,7 +1074,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( @@ -1168,7 +1168,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1277,7 +1277,7 @@ def __init__(self, config): self.ibert = IBertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/layoutlm/modeling_layoutlm.py b/src/transformers/models/layoutlm/modeling_layoutlm.py index 541dec879a18..32ebfb7908d7 100644 --- a/src/transformers/models/layoutlm/modeling_layoutlm.py +++ b/src/transformers/models/layoutlm/modeling_layoutlm.py @@ -714,7 +714,7 @@ def __init__(self, config): self.encoder = LayoutLMEncoder(config) self.pooler = LayoutLMPooler(config) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -856,7 +856,7 @@ def __init__(self, config): self.layoutlm = LayoutLMModel(config) self.cls = LayoutLMOnlyMLMHead(config) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.layoutlm.embeddings.word_embeddings @@ -979,7 +979,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.layoutlm.embeddings.word_embeddings @@ -1109,7 +1109,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.layoutlm.embeddings.word_embeddings diff --git a/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py b/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py index e80029a300bc..586d8d033028 100755 --- a/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py +++ b/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py @@ -724,7 +724,7 @@ def __init__(self, config): self.encoder = LayoutLMv2Encoder(config) self.pooler = LayoutLMv2Pooler(config) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -957,7 +957,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size * 3, config.num_labels) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.layoutlmv2.embeddings.word_embeddings @@ -1124,7 +1124,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.layoutlmv2.embeddings.word_embeddings @@ -1239,7 +1239,7 @@ def __init__(self, config, has_visual_segment_embedding=True): self.layoutlmv2 = LayoutLMv2Model(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.layoutlmv2.embeddings.word_embeddings diff --git a/src/transformers/models/led/modeling_led.py b/src/transformers/models/led/modeling_led.py index 5433d0a7c729..cc1989378839 100755 --- a/src/transformers/models/led/modeling_led.py +++ b/src/transformers/models/led/modeling_led.py @@ -1629,7 +1629,7 @@ def __init__(self, config: LEDConfig, embed_tokens: Optional[nn.Embedding] = Non self.layers = nn.ModuleList([LEDEncoderLayer(config, i) for i in range(config.encoder_layers)]) self.layernorm_embedding = nn.LayerNorm(embed_dim) - self.init_weights() + self.post_init() self.gradient_checkpointing = False def _merge_to_attention_mask(self, attention_mask: torch.Tensor, global_attention_mask: torch.Tensor): @@ -1904,7 +1904,7 @@ def __init__(self, config: LEDConfig, embed_tokens: Optional[nn.Embedding] = Non self.layers = nn.ModuleList([LEDDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) - self.init_weights() + self.post_init() self.gradient_checkpointing = False def forward( @@ -2156,7 +2156,7 @@ def __init__(self, config: LEDConfig): self.encoder = LEDEncoder(config, self.shared) self.decoder = LEDDecoder(config, self.shared) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.shared @@ -2283,7 +2283,7 @@ def __init__(self, config: LEDConfig): self.register_buffer("final_logits_bias", torch.zeros((1, self.led.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.led.shared.num_embeddings, bias=False) - self.init_weights() + self.post_init() def get_encoder(self): return self.led.get_encoder() diff --git a/src/transformers/models/longformer/modeling_longformer.py b/src/transformers/models/longformer/modeling_longformer.py index 672c0d948ae8..def3a12a9992 100755 --- a/src/transformers/models/longformer/modeling_longformer.py +++ b/src/transformers/models/longformer/modeling_longformer.py @@ -1511,7 +1511,7 @@ def __init__(self, config, add_pooling_layer=True): self.encoder = LongformerEncoder(config) self.pooler = LongformerPooler(config) if add_pooling_layer else None - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -1713,7 +1713,7 @@ def __init__(self, config): self.longformer = LongformerModel(config, add_pooling_layer=False) self.lm_head = LongformerLMHead(config) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.lm_head.decoder @@ -1818,7 +1818,7 @@ def __init__(self, config): self.longformer = LongformerModel(config, add_pooling_layer=False) self.classifier = LongformerClassificationHead(config) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1943,7 +1943,7 @@ def __init__(self, config): self.longformer = LongformerModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=LongformerQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) @@ -2080,7 +2080,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -2170,7 +2170,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward( LONGFORMER_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") diff --git a/src/transformers/models/luke/modeling_luke.py b/src/transformers/models/luke/modeling_luke.py index 97d1f1adfd9c..2537bd87e544 100644 --- a/src/transformers/models/luke/modeling_luke.py +++ b/src/transformers/models/luke/modeling_luke.py @@ -818,7 +818,7 @@ def __init__(self, config, add_pooling_layer=True): self.pooler = LukePooler(config) if add_pooling_layer else None - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -1029,7 +1029,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=EntityClassificationOutput, config_class=_CONFIG_FOR_DOC) @@ -1142,7 +1142,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size * 2, config.num_labels, False) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=EntityPairClassificationOutput, config_class=_CONFIG_FOR_DOC) @@ -1257,7 +1257,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size * 3, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=EntitySpanClassificationOutput, config_class=_CONFIG_FOR_DOC) diff --git a/src/transformers/models/lxmert/modeling_lxmert.py b/src/transformers/models/lxmert/modeling_lxmert.py index 1135816cc22c..aa2ed978779d 100644 --- a/src/transformers/models/lxmert/modeling_lxmert.py +++ b/src/transformers/models/lxmert/modeling_lxmert.py @@ -891,7 +891,7 @@ def __init__(self, config): self.embeddings = LxmertEmbeddings(config) self.encoder = LxmertEncoder(config) self.pooler = LxmertPooler(config) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -1048,7 +1048,7 @@ def __init__(self, config): self.answer_head = LxmertVisualAnswerHead(config, self.num_qa_labels) # Weight initialization - self.init_weights() + self.post_init() # Loss functions self.loss_fcts = { @@ -1303,7 +1303,7 @@ def __init__(self, config): self.answer_head = LxmertVisualAnswerHead(config, self.num_qa_labels) # Weight initialization - self.init_weights() + self.post_init() # Loss function self.loss = CrossEntropyLoss() diff --git a/src/transformers/models/m2m_100/modeling_m2m_100.py b/src/transformers/models/m2m_100/modeling_m2m_100.py index 1230bf01e78e..0f534f40e61c 100755 --- a/src/transformers/models/m2m_100/modeling_m2m_100.py +++ b/src/transformers/models/m2m_100/modeling_m2m_100.py @@ -705,7 +705,7 @@ def __init__(self, config: M2M100Config, embed_tokens: Optional[nn.Embedding] = self.layers = nn.ModuleList([M2M100EncoderLayer(config) for _ in range(config.encoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) - self.init_weights() + self.post_init() self.gradient_checkpointing = False def forward( @@ -870,7 +870,7 @@ def __init__(self, config: M2M100Config, embed_tokens: Optional[nn.Embedding] = self.layers = nn.ModuleList([M2M100DecoderLayer(config) for _ in range(config.decoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) - self.init_weights() + self.post_init() self.gradient_checkpointing = False def forward( @@ -1113,7 +1113,7 @@ def __init__(self, config: M2M100Config): self.encoder = M2M100Encoder(config, self.shared) self.decoder = M2M100Decoder(config, self.shared) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.shared @@ -1232,7 +1232,7 @@ def __init__(self, config: M2M100Config): self.model = M2M100Model(config) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) - self.init_weights() + self.post_init() def get_encoder(self): return self.model.get_encoder() diff --git a/src/transformers/models/marian/modeling_marian.py b/src/transformers/models/marian/modeling_marian.py index 94f0f800bd55..d0bd332edf92 100755 --- a/src/transformers/models/marian/modeling_marian.py +++ b/src/transformers/models/marian/modeling_marian.py @@ -668,7 +668,7 @@ def __init__(self, config: MarianConfig, embed_tokens: Optional[nn.Embedding] = self.padding_idx, ) self.layers = nn.ModuleList([MarianEncoderLayer(config) for _ in range(config.encoder_layers)]) - self.init_weights() + self.post_init() self.gradient_checkpointing = False def forward( @@ -829,7 +829,7 @@ def __init__(self, config: MarianConfig, embed_tokens: Optional[nn.Embedding] = self.padding_idx, ) self.layers = nn.ModuleList([MarianDecoderLayer(config) for _ in range(config.decoder_layers)]) - self.init_weights() + self.post_init() self.gradient_checkpointing = False def get_input_embeddings(self): @@ -1087,7 +1087,7 @@ def __init__(self, config: MarianConfig): self.encoder = MarianEncoder(config, self.shared) self.decoder = MarianDecoder(config, self.shared) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.shared @@ -1220,7 +1220,7 @@ def __init__(self, config: MarianConfig): self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) - self.init_weights() + self.post_init() def get_encoder(self): return self.model.get_encoder() @@ -1399,7 +1399,7 @@ def __init__(self, config): self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens diff --git a/src/transformers/models/mbart/modeling_mbart.py b/src/transformers/models/mbart/modeling_mbart.py index 564030fb49c7..fec1ce180208 100755 --- a/src/transformers/models/mbart/modeling_mbart.py +++ b/src/transformers/models/mbart/modeling_mbart.py @@ -696,7 +696,7 @@ def __init__(self, config: MBartConfig, embed_tokens: Optional[nn.Embedding] = N self.layernorm_embedding = nn.LayerNorm(embed_dim) self.layer_norm = nn.LayerNorm(config.d_model) - self.init_weights() + self.post_init() self.gradient_checkpointing = False def forward( @@ -862,7 +862,7 @@ def __init__(self, config: MBartConfig, embed_tokens: Optional[nn.Embedding] = N self.layernorm_embedding = nn.LayerNorm(config.d_model) self.layer_norm = nn.LayerNorm(config.d_model) - self.init_weights() + self.post_init() self.gradient_checkpointing = False def get_input_embeddings(self): @@ -1123,7 +1123,7 @@ def __init__(self, config: MBartConfig): self.encoder = MBartEncoder(config, self.shared) self.decoder = MBartDecoder(config, self.shared) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.shared @@ -1243,7 +1243,7 @@ def __init__(self, config: MBartConfig): self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) - self.init_weights() + self.post_init() def get_encoder(self): return self.model.get_encoder() @@ -1664,7 +1664,7 @@ def __init__(self, config): self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens diff --git a/src/transformers/models/megatron_bert/modeling_megatron_bert.py b/src/transformers/models/megatron_bert/modeling_megatron_bert.py index c482b1b639d7..bd5cf1d5950d 100755 --- a/src/transformers/models/megatron_bert/modeling_megatron_bert.py +++ b/src/transformers/models/megatron_bert/modeling_megatron_bert.py @@ -857,7 +857,7 @@ def __init__(self, config, add_pooling_layer=True): self.pooler = MegatronBertPooler(config) if add_pooling_layer else None - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -1018,7 +1018,7 @@ def __init__(self, config, add_binary_head=True): self.bert = MegatronBertModel(config) self.cls = MegatronBertPreTrainingHeads(config) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1127,7 +1127,7 @@ def __init__(self, config): self.bert = MegatronBertModel(config, add_pooling_layer=False) self.cls = MegatronBertOnlyMLMHead(config) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1274,7 +1274,7 @@ def __init__(self, config): self.bert = MegatronBertModel(config, add_pooling_layer=False) self.cls = MegatronBertOnlyMLMHead(config) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1375,7 +1375,7 @@ def __init__(self, config): self.bert = MegatronBertModel(config) self.cls = MegatronBertOnlyNSPHead(config) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC) @@ -1478,7 +1478,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1574,7 +1574,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward( MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") @@ -1671,7 +1671,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1761,7 +1761,7 @@ def __init__(self, config): self.bert = MegatronBertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/mobilebert/modeling_mobilebert.py b/src/transformers/models/mobilebert/modeling_mobilebert.py index 3c85af3b1bc3..7023e20079d8 100644 --- a/src/transformers/models/mobilebert/modeling_mobilebert.py +++ b/src/transformers/models/mobilebert/modeling_mobilebert.py @@ -799,7 +799,7 @@ def __init__(self, config, add_pooling_layer=True): self.pooler = MobileBertPooler(config) if add_pooling_layer else None - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -907,7 +907,7 @@ def __init__(self, config): self.mobilebert = MobileBertModel(config) self.cls = MobileBertPreTrainingHeads(config) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1015,7 +1015,7 @@ def __init__(self, config): self.cls = MobileBertOnlyMLMHead(config) self.config = config - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1111,7 +1111,7 @@ def __init__(self, config): self.mobilebert = MobileBertModel(config) self.cls = MobileBertOnlyNSPHead(config) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC) @@ -1218,7 +1218,7 @@ def __init__(self, config): self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1318,7 +1318,7 @@ def __init__(self, config): self.mobilebert = MobileBertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1421,7 +1421,7 @@ def __init__(self, config): self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward( MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") @@ -1522,7 +1522,7 @@ def __init__(self, config): self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/mpnet/modeling_mpnet.py b/src/transformers/models/mpnet/modeling_mpnet.py index 52cf537ed25d..1ae2ea31ae51 100644 --- a/src/transformers/models/mpnet/modeling_mpnet.py +++ b/src/transformers/models/mpnet/modeling_mpnet.py @@ -493,7 +493,7 @@ def __init__(self, config, add_pooling_layer=True): self.encoder = MPNetEncoder(config) self.pooler = MPNetPooler(config) if add_pooling_layer else None - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -583,7 +583,7 @@ def __init__(self, config): self.mpnet = MPNetModel(config, add_pooling_layer=False) self.lm_head = MPNetLMHead(config) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.lm_head.decoder @@ -691,7 +691,7 @@ def __init__(self, config): self.mpnet = MPNetModel(config, add_pooling_layer=False) self.classifier = MPNetClassificationHead(config) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -785,7 +785,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( @@ -877,7 +877,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -985,7 +985,7 @@ def __init__(self, config): self.mpnet = MPNetModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/openai/modeling_openai.py b/src/transformers/models/openai/modeling_openai.py index 58dd1b055d3e..e000253d7ef0 100644 --- a/src/transformers/models/openai/modeling_openai.py +++ b/src/transformers/models/openai/modeling_openai.py @@ -414,7 +414,7 @@ def __init__(self, config): self.h = nn.ModuleList([Block(config.n_positions, config, scale=True) for _ in range(config.n_layer)]) self.register_buffer("position_ids", torch.arange(config.n_positions)) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.tokens_embed @@ -540,7 +540,7 @@ def __init__(self, config): self.transformer = OpenAIGPTModel(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.lm_head @@ -629,7 +629,7 @@ def __init__(self, config): self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) self.multiple_choice_head = SequenceSummary(config) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.lm_head @@ -750,7 +750,7 @@ def __init__(self, config): self.transformer = OpenAIGPTModel(config) self.score = nn.Linear(config.n_embd, self.num_labels, bias=False) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING) @add_code_sample_docstrings( diff --git a/src/transformers/models/pegasus/modeling_pegasus.py b/src/transformers/models/pegasus/modeling_pegasus.py index 33a1ca14cb72..90b9f15fb4ab 100755 --- a/src/transformers/models/pegasus/modeling_pegasus.py +++ b/src/transformers/models/pegasus/modeling_pegasus.py @@ -658,7 +658,7 @@ def __init__(self, config: PegasusConfig, embed_tokens: Optional[nn.Embedding] = self.layers = nn.ModuleList([PegasusEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) - self.init_weights() + self.post_init() self.gradient_checkpointing = False def resize_position_embeddings(self, new_num_position_embeddings: int): @@ -853,7 +853,7 @@ def __init__(self, config: PegasusConfig, embed_tokens: Optional[nn.Embedding] = self.layers = nn.ModuleList([PegasusDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) - self.init_weights() + self.post_init() self.gradient_checkpointing = False def get_input_embeddings(self): @@ -1142,7 +1142,7 @@ def __init__(self, config: PegasusConfig): self.encoder = PegasusEncoder(config, self.shared) self.decoder = PegasusDecoder(config, self.shared) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.shared @@ -1293,7 +1293,7 @@ def __init__(self, config: PegasusConfig): self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) - self.init_weights() + self.post_init() def get_encoder(self): return self.model.get_encoder() @@ -1490,7 +1490,7 @@ def __init__(self, config): self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens diff --git a/src/transformers/models/prophetnet/modeling_prophetnet.py b/src/transformers/models/prophetnet/modeling_prophetnet.py index 9f72a35f0dfd..96051d8d0d37 100644 --- a/src/transformers/models/prophetnet/modeling_prophetnet.py +++ b/src/transformers/models/prophetnet/modeling_prophetnet.py @@ -1266,7 +1266,7 @@ def __init__(self, config: ProphetNetConfig, word_embeddings: nn.Embedding = Non self.layers = nn.ModuleList([ProphetNetEncoderLayer(config) for _ in range(config.num_encoder_layers)]) - self.init_weights() + self.post_init() self.gradient_checkpointing = False def get_input_embeddings(self): @@ -1411,7 +1411,7 @@ def __init__(self, config: ProphetNetConfig, word_embeddings: nn.Embedding = Non self.layers = nn.ModuleList([ProphetNetDecoderLayer(config) for _ in range(config.num_decoder_layers)]) self.embeddings_layer_norm = LayerNorm(config.hidden_size) - self.init_weights() + self.post_init() self.gradient_checkpointing = False def get_input_embeddings(self): @@ -1765,7 +1765,7 @@ def __init__(self, config): decoder_config.is_encoder_decoder = False self.decoder = ProphetNetDecoder(decoder_config, self.word_embeddings) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.word_embeddings @@ -1882,7 +1882,7 @@ def __init__(self, config: ProphetNetConfig): self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.lm_head @@ -2092,7 +2092,7 @@ def __init__(self, config): self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.prophetnet.decoder.word_embeddings diff --git a/src/transformers/models/reformer/modeling_reformer.py b/src/transformers/models/reformer/modeling_reformer.py index 528875b4aa97..e1cc7541b2e9 100755 --- a/src/transformers/models/reformer/modeling_reformer.py +++ b/src/transformers/models/reformer/modeling_reformer.py @@ -1974,7 +1974,7 @@ def __init__(self, config): self.embeddings = ReformerEmbeddings(config) self.encoder = ReformerEncoder(config) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -2188,7 +2188,7 @@ def __init__(self, config): self.reformer = ReformerModel(config) self.lm_head = ReformerOnlyLMHead(config) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.lm_head.decoder @@ -2303,7 +2303,7 @@ def __init__(self, config): self.reformer = ReformerModel(config) self.lm_head = ReformerOnlyLMHead(config) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.lm_head.decoder @@ -2390,7 +2390,7 @@ def __init__(self, config): if config.is_decoder is True: logger.warning("You might want to disable causal masking for sequence classification") - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING) @add_code_sample_docstrings( @@ -2508,7 +2508,7 @@ def __init__(self, config): # 2 * config.hidden_size because we use reversible residual layers self.qa_outputs = nn.Linear(2 * config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING) @add_code_sample_docstrings( diff --git a/src/transformers/models/rembert/modeling_rembert.py b/src/transformers/models/rembert/modeling_rembert.py index 3ebbde7fa7ed..dd3c39881892 100755 --- a/src/transformers/models/rembert/modeling_rembert.py +++ b/src/transformers/models/rembert/modeling_rembert.py @@ -765,7 +765,7 @@ def __init__(self, config, add_pooling_layer=True): self.pooler = RemBertPooler(config) if add_pooling_layer else None - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -925,7 +925,7 @@ def __init__(self, config): self.rembert = RemBertModel(config, add_pooling_layer=False) self.cls = RemBertOnlyMLMHead(config) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1027,7 +1027,7 @@ def __init__(self, config): self.rembert = RemBertModel(config, add_pooling_layer=False) self.cls = RemBertOnlyMLMHead(config) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1173,7 +1173,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1269,7 +1269,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( @@ -1361,7 +1361,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1449,7 +1449,7 @@ def __init__(self, config): self.rembert = RemBertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/retribert/modeling_retribert.py b/src/transformers/models/retribert/modeling_retribert.py index 08f56e13ee0f..1f82b15b0c6c 100644 --- a/src/transformers/models/retribert/modeling_retribert.py +++ b/src/transformers/models/retribert/modeling_retribert.py @@ -99,7 +99,7 @@ def __init__(self, config): self.ce_loss = nn.CrossEntropyLoss(reduction="mean") - self.init_weights() + self.post_init() def embed_sentences_checkpointed( self, diff --git a/src/transformers/models/roberta/modeling_roberta.py b/src/transformers/models/roberta/modeling_roberta.py index 917bb03f80ae..22d9a98904de 100644 --- a/src/transformers/models/roberta/modeling_roberta.py +++ b/src/transformers/models/roberta/modeling_roberta.py @@ -723,7 +723,7 @@ def __init__(self, config, add_pooling_layer=True): self.pooler = RobertaPooler(config) if add_pooling_layer else None - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -897,7 +897,7 @@ def __init__(self, config): # The LM head weights require special treatment only when they are tied with the word embeddings self.update_keys_to_ignore(config, ["lm_head.decoder.weight"]) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.lm_head.decoder @@ -1050,7 +1050,7 @@ def __init__(self, config): # The LM head weights require special treatment only when they are tied with the word embeddings self.update_keys_to_ignore(config, ["lm_head.decoder.weight"]) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.lm_head.decoder @@ -1169,7 +1169,7 @@ def __init__(self, config): self.roberta = RobertaModel(config, add_pooling_layer=False) self.classifier = RobertaClassificationHead(config) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1265,7 +1265,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( @@ -1362,7 +1362,7 @@ def __init__(self, config): self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1474,7 +1474,7 @@ def __init__(self, config): self.roberta = RobertaModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/roformer/modeling_roformer.py b/src/transformers/models/roformer/modeling_roformer.py index 75f690390ea0..1ee13c3a33d9 100644 --- a/src/transformers/models/roformer/modeling_roformer.py +++ b/src/transformers/models/roformer/modeling_roformer.py @@ -817,7 +817,7 @@ def __init__(self, config): self.encoder = RoFormerEncoder(config) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -973,7 +973,7 @@ def __init__(self, config): self.roformer = RoFormerModel(config) self.cls = RoFormerOnlyMLMHead(config) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1073,7 +1073,7 @@ def __init__(self, config): self.roformer = RoFormerModel(config) self.cls = RoFormerOnlyMLMHead(config) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1238,7 +1238,7 @@ def __init__(self, config): self.roformer = RoFormerModel(config) self.classifier = RoFormerClassificationHead(config) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1330,7 +1330,7 @@ def __init__(self, config): self.sequence_summary = SequenceSummary(config) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward( ROFORMER_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") @@ -1422,7 +1422,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1509,7 +1509,7 @@ def __init__(self, config): self.roformer = RoFormerModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/segformer/modeling_segformer.py b/src/transformers/models/segformer/modeling_segformer.py index 2935d07a1e17..731736b7a17d 100755 --- a/src/transformers/models/segformer/modeling_segformer.py +++ b/src/transformers/models/segformer/modeling_segformer.py @@ -467,7 +467,7 @@ def __init__(self, config): # hierarchical Transformer encoder self.encoder = SegformerEncoder(config) - self.init_weights() + self.post_init() def _prune_heads(self, heads_to_prune): """ @@ -541,7 +541,7 @@ def __init__(self, config): # Classifier head self.classifier = nn.Linear(config.hidden_sizes[-1], config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(SEGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) @@ -696,7 +696,7 @@ def __init__(self, config): self.segformer = SegformerModel(config) self.decode_head = SegformerDecodeHead(config) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(SEGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) diff --git a/src/transformers/models/sew/modeling_sew.py b/src/transformers/models/sew/modeling_sew.py index 55d6bf7caef3..aefbac1dafdc 100644 --- a/src/transformers/models/sew/modeling_sew.py +++ b/src/transformers/models/sew/modeling_sew.py @@ -798,7 +798,7 @@ def __init__(self, config: SEWConfig): self.encoder = SEWEncoder(config) - self.init_weights() + self.post_init() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states def _mask_hidden_states( @@ -924,7 +924,7 @@ def __init__(self, config): ) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size) - self.init_weights() + self.post_init() def freeze_feature_extractor(self): """ @@ -1032,7 +1032,7 @@ def __init__(self, config): self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) - self.init_weights() + self.post_init() def freeze_feature_extractor(self): """ diff --git a/src/transformers/models/sew_d/modeling_sew_d.py b/src/transformers/models/sew_d/modeling_sew_d.py index facb85f07b11..9e67e273f0a1 100644 --- a/src/transformers/models/sew_d/modeling_sew_d.py +++ b/src/transformers/models/sew_d/modeling_sew_d.py @@ -1329,7 +1329,7 @@ def __init__(self, config: SEWDConfig): self.encoder = SEWDEncoder(config) - self.init_weights() + self.post_init() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states def _mask_hidden_states( @@ -1455,7 +1455,7 @@ def __init__(self, config): ) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size) - self.init_weights() + self.post_init() def freeze_feature_extractor(self): """ @@ -1563,7 +1563,7 @@ def __init__(self, config): self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) - self.init_weights() + self.post_init() def freeze_feature_extractor(self): """ diff --git a/src/transformers/models/speech_to_text/modeling_speech_to_text.py b/src/transformers/models/speech_to_text/modeling_speech_to_text.py index e631e75731c1..08166a9d5fae 100755 --- a/src/transformers/models/speech_to_text/modeling_speech_to_text.py +++ b/src/transformers/models/speech_to_text/modeling_speech_to_text.py @@ -723,7 +723,7 @@ def __init__(self, config: Speech2TextConfig): self.layers = nn.ModuleList([Speech2TextEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) - self.init_weights() + self.post_init() self.gradient_checkpointing = False def forward( @@ -876,7 +876,7 @@ def __init__(self, config: Speech2TextConfig): self.layer_norm = nn.LayerNorm(config.d_model) - self.init_weights() + self.post_init() self.gradient_checkpointing = False def get_input_embeddings(self): @@ -1130,7 +1130,7 @@ def __init__(self, config: Speech2TextConfig): self.encoder = Speech2TextEncoder(config) self.decoder = Speech2TextDecoder(config) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.decoder.embed_tokens @@ -1253,7 +1253,7 @@ def __init__(self, config: Speech2TextConfig): self.model = Speech2TextModel(config) self.lm_head = nn.Linear(config.d_model, self.config.vocab_size, bias=False) - self.init_weights() + self.post_init() def get_encoder(self): return self.model.get_encoder() diff --git a/src/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py b/src/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py index 306cacd48f9a..ff29ce9f561a 100755 --- a/src/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py +++ b/src/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py @@ -476,7 +476,7 @@ def __init__(self, config: Speech2Text2Config): self.layers = nn.ModuleList([Speech2Text2DecoderLayer(config) for _ in range(config.decoder_layers)]) - self.init_weights() + self.post_init() self.gradient_checkpointing = False def get_input_embeddings(self): @@ -751,7 +751,7 @@ def __init__(self, config): self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens diff --git a/src/transformers/models/splinter/modeling_splinter.py b/src/transformers/models/splinter/modeling_splinter.py index 812a5f207034..892ed859deff 100755 --- a/src/transformers/models/splinter/modeling_splinter.py +++ b/src/transformers/models/splinter/modeling_splinter.py @@ -619,7 +619,7 @@ def __init__(self, config): self.embeddings = SplinterEmbeddings(config) self.encoder = SplinterEncoder(config) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -834,7 +834,7 @@ def __init__(self, config): self.splinter_qass = QuestionAwareSpanSelectionHead(config) self.question_token_id = config.question_token_id - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(SPLINTER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/squeezebert/modeling_squeezebert.py b/src/transformers/models/squeezebert/modeling_squeezebert.py index 6ec972f06c9e..96d65d999d8c 100644 --- a/src/transformers/models/squeezebert/modeling_squeezebert.py +++ b/src/transformers/models/squeezebert/modeling_squeezebert.py @@ -553,7 +553,7 @@ def __init__(self, config): self.encoder = SqueezeBertEncoder(config) self.pooler = SqueezeBertPooler(config) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -654,7 +654,7 @@ def __init__(self, config): self.transformer = SqueezeBertModel(config) self.cls = SqueezeBertOnlyMLMHead(config) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -739,7 +739,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -836,7 +836,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward( SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") @@ -930,7 +930,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1017,7 +1017,7 @@ def __init__(self, config): self.transformer = SqueezeBertModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/t5/modeling_t5.py b/src/transformers/models/t5/modeling_t5.py index a9f69c91f5de..6789fefd2187 100644 --- a/src/transformers/models/t5/modeling_t5.py +++ b/src/transformers/models/t5/modeling_t5.py @@ -814,7 +814,7 @@ def __init__(self, config, embed_tokens=None): self.final_layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) - self.init_weights() + self.post_init() # Model parallel self.model_parallel = False self.device_map = None @@ -1267,7 +1267,7 @@ def __init__(self, config: T5Config): decoder_config.num_layers = config.num_decoder_layers self.decoder = T5Stack(decoder_config, self.shared) - self.init_weights() + self.post_init() # Model parallel self.model_parallel = False @@ -1457,7 +1457,7 @@ def __init__(self, config): self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) - self.init_weights() + self.post_init() # Model parallel self.model_parallel = False @@ -1731,7 +1731,7 @@ def __init__(self, config: T5Config): encoder_config.is_encoder_decoder = False self.encoder = T5Stack(encoder_config, self.shared) - self.init_weights() + self.post_init() # Model parallel self.model_parallel = False diff --git a/src/transformers/models/tapas/modeling_tapas.py b/src/transformers/models/tapas/modeling_tapas.py index e301a2eca505..353eaf1fe258 100644 --- a/src/transformers/models/tapas/modeling_tapas.py +++ b/src/transformers/models/tapas/modeling_tapas.py @@ -877,7 +877,7 @@ def __init__(self, config, add_pooling_layer=True): self.pooler = TapasPooler(config) if add_pooling_layer else None - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -1016,7 +1016,7 @@ def __init__(self, config): self.tapas = TapasModel(config, add_pooling_layer=False) self.cls = TapasOnlyMLMHead(config) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1146,7 +1146,7 @@ def __init__(self, config: TapasConfig): if config.num_aggregation_labels > 0: self.aggregation_classifier = nn.Linear(config.hidden_size, config.num_aggregation_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TableQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC) @@ -1464,7 +1464,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) diff --git a/src/transformers/models/transfo_xl/modeling_transfo_xl.py b/src/transformers/models/transfo_xl/modeling_transfo_xl.py index e18a5b50f6dd..69d331fbc154 100644 --- a/src/transformers/models/transfo_xl/modeling_transfo_xl.py +++ b/src/transformers/models/transfo_xl/modeling_transfo_xl.py @@ -819,7 +819,7 @@ def __init__(self, config): else: # learnable embeddings and absolute embeddings raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.word_emb @@ -1021,7 +1021,7 @@ def __init__(self, config): config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val ) - self.init_weights() + self.post_init() def tie_weights(self): """ @@ -1170,7 +1170,7 @@ def __init__(self, config): self.num_labels = config.num_labels self.transformer = TransfoXLModel(config) self.score = nn.Linear(config.d_embed, self.num_labels, bias=False) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING) @add_code_sample_docstrings( diff --git a/src/transformers/models/trocr/modeling_trocr.py b/src/transformers/models/trocr/modeling_trocr.py index 87502901ea73..3888d14bfd86 100644 --- a/src/transformers/models/trocr/modeling_trocr.py +++ b/src/transformers/models/trocr/modeling_trocr.py @@ -503,7 +503,7 @@ def __init__(self, config: TrOCRConfig): self.layers = nn.ModuleList([TrOCRDecoderLayer(config) for _ in range(config.decoder_layers)]) - self.init_weights() + self.post_init() self.gradient_checkpointing = False def get_input_embeddings(self): @@ -784,7 +784,7 @@ def __init__(self, config): self.output_projection = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens diff --git a/src/transformers/models/unispeech/modeling_unispeech.py b/src/transformers/models/unispeech/modeling_unispeech.py index 3f700ee15390..2708b07c1f46 100755 --- a/src/transformers/models/unispeech/modeling_unispeech.py +++ b/src/transformers/models/unispeech/modeling_unispeech.py @@ -1045,7 +1045,7 @@ def __init__(self, config: UniSpeechConfig): else: self.encoder = UniSpeechEncoder(config) - self.init_weights() + self.post_init() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states def _mask_hidden_states( @@ -1165,7 +1165,7 @@ def __init__(self, config: UniSpeechConfig): self.ctc_proj = nn.Linear(config.hidden_size, config.num_ctc_classes) self.dropout = nn.Dropout(config.final_dropout) - self.init_weights() + self.post_init() def set_gumbel_temperature(self, temperature: int): """ @@ -1337,7 +1337,7 @@ def __init__(self, config): ) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size) - self.init_weights() + self.post_init() def freeze_feature_extractor(self): """ @@ -1445,7 +1445,7 @@ def __init__(self, config): self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) - self.init_weights() + self.post_init() def freeze_feature_extractor(self): """ diff --git a/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py b/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py index ae2849206480..e6345e263964 100755 --- a/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py +++ b/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py @@ -1046,7 +1046,7 @@ def __init__(self, config: UniSpeechSatConfig): else: self.encoder = UniSpeechSatEncoder(config) - self.init_weights() + self.post_init() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states def _mask_hidden_states( @@ -1171,7 +1171,7 @@ def __init__(self, config: UniSpeechSatConfig): if self.config.do_stable_layer_norm: self.layer_norm_for_extract.requires_grad = False - self.init_weights() + self.post_init() def set_gumbel_temperature(self, temperature: int): """ @@ -1328,7 +1328,7 @@ def __init__(self, config): ) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size) - self.init_weights() + self.post_init() def freeze_feature_extractor(self): """ @@ -1436,7 +1436,7 @@ def __init__(self, config): self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) - self.init_weights() + self.post_init() def freeze_feature_extractor(self): """ diff --git a/src/transformers/models/visual_bert/modeling_visual_bert.py b/src/transformers/models/visual_bert/modeling_visual_bert.py index 6d8d51b4ab26..22d2cc25a02c 100755 --- a/src/transformers/models/visual_bert/modeling_visual_bert.py +++ b/src/transformers/models/visual_bert/modeling_visual_bert.py @@ -701,7 +701,7 @@ def __init__(self, config, add_pooling_layer=True): if self.bypass_transformer: self.additional_layer = VisualBertLayer(config) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -877,7 +877,7 @@ def __init__(self, config): self.visual_bert = VisualBertModel(config) self.cls = VisualBertPreTrainingHeads(config) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1021,7 +1021,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.cls = nn.Linear(config.hidden_size, 1) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward( VISUAL_BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") @@ -1170,7 +1170,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.cls = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(VISUAL_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) @@ -1292,7 +1292,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.cls = nn.Linear(config.hidden_size, config.num_labels) # 2 - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(VISUAL_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) @@ -1448,7 +1448,7 @@ def __init__(self, config): self.cls = VisualBertPreTrainingHeads(config) self.attention = VisualBertRegionToPhraseAttention(config) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(VISUAL_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) diff --git a/src/transformers/models/vit/modeling_vit.py b/src/transformers/models/vit/modeling_vit.py index fda10a1ecec6..c1aced4b3786 100644 --- a/src/transformers/models/vit/modeling_vit.py +++ b/src/transformers/models/vit/modeling_vit.py @@ -487,7 +487,7 @@ def __init__(self, config, add_pooling_layer=True): self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pooler = ViTPooler(config) if add_pooling_layer else None - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings @@ -603,7 +603,7 @@ def __init__(self, config): # Classifier head self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(VIT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) diff --git a/src/transformers/models/wav2vec2/modeling_wav2vec2.py b/src/transformers/models/wav2vec2/modeling_wav2vec2.py index 0bb456620bed..8f66b932e3e2 100755 --- a/src/transformers/models/wav2vec2/modeling_wav2vec2.py +++ b/src/transformers/models/wav2vec2/modeling_wav2vec2.py @@ -1152,7 +1152,7 @@ def __init__(self, config: Wav2Vec2Config): self.adapter = Wav2Vec2Adapter(config) if config.add_adapter else None - self.init_weights() + self.post_init() def _mask_hidden_states( self, @@ -1269,7 +1269,7 @@ def __init__(self, config: Wav2Vec2Config): self.quantizer = Wav2Vec2GumbelVectorQuantizer(config) - self.init_weights() + self.post_init() # make sure that project_hid & project_q are initialized like normal linear layers self.project_hid = nn.Linear(config.hidden_size, config.proj_codevector_dim) @@ -1480,7 +1480,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.final_dropout) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Wav2Vec2BaseModelOutput, config_class=_CONFIG_FOR_DOC) @@ -1563,7 +1563,7 @@ def __init__(self, config): ) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size) - self.init_weights() + self.post_init() def freeze_feature_extractor(self): """ @@ -1670,7 +1670,7 @@ def __init__(self, config): self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) - self.init_weights() + self.post_init() def freeze_feature_extractor(self): """ diff --git a/src/transformers/models/xlm/modeling_xlm.py b/src/transformers/models/xlm/modeling_xlm.py index 4d4b8c0c8d7e..533b210c5988 100755 --- a/src/transformers/models/xlm/modeling_xlm.py +++ b/src/transformers/models/xlm/modeling_xlm.py @@ -469,7 +469,7 @@ def __init__(self, config): if self.attentions[int(layer)].n_heads == config.n_heads: self.prune_heads({int(layer): list(map(int, heads))}) - self.init_weights() + self.post_init() self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) def get_input_embeddings(self): @@ -687,7 +687,7 @@ def __init__(self, config): self.transformer = XLMModel(config) self.pred_layer = XLMPredLayer(config) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.pred_layer.proj @@ -785,7 +785,7 @@ def __init__(self, config): self.transformer = XLMModel(config) self.sequence_summary = SequenceSummary(config) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -885,7 +885,7 @@ def __init__(self, config): self.transformer = XLMModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -989,7 +989,7 @@ def __init__(self, config): self.transformer = XLMModel(config) self.qa_outputs = SQuADHead(config) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=XLMForQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC) @@ -1108,7 +1108,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1201,7 +1201,7 @@ def __init__(self, config, *inputs, **kwargs): self.sequence_summary = SequenceSummary(config) self.logits_proj = nn.Linear(config.num_labels, 1) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/xlnet/modeling_xlnet.py b/src/transformers/models/xlnet/modeling_xlnet.py index 70c37ad84f36..eb3dc5e4ad10 100755 --- a/src/transformers/models/xlnet/modeling_xlnet.py +++ b/src/transformers/models/xlnet/modeling_xlnet.py @@ -955,7 +955,7 @@ def __init__(self, config): self.layer = nn.ModuleList([XLNetLayer(config) for _ in range(config.n_layer)]) self.dropout = nn.Dropout(config.dropout) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.word_embedding @@ -1311,7 +1311,7 @@ def __init__(self, config): self.transformer = XLNetModel(config) self.lm_loss = nn.Linear(config.d_model, config.vocab_size, bias=True) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.lm_loss @@ -1493,7 +1493,7 @@ def __init__(self, config): self.sequence_summary = SequenceSummary(config) self.logits_proj = nn.Linear(config.d_model, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1600,7 +1600,7 @@ def __init__(self, config): self.transformer = XLNetModel(config) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1697,7 +1697,7 @@ def __init__(self, config): self.sequence_summary = SequenceSummary(config) self.logits_proj = nn.Linear(config.d_model, 1) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( @@ -1800,7 +1800,7 @@ def __init__(self, config): self.transformer = XLNetModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1913,7 +1913,7 @@ def __init__(self, config): self.end_logits = PoolerEndLogits(config) self.answer_class = PoolerAnswerClass(config) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=XLNetForQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC) diff --git a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py index 10e7bc599532..97b77c52126e 100755 --- a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py +++ b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py @@ -777,7 +777,7 @@ def __init__(self, config): self.embeddings = {{cookiecutter.camelcase_modelname}}Embeddings(config) self.encoder = {{cookiecutter.camelcase_modelname}}Encoder(config) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -943,7 +943,7 @@ def __init__(self, config): self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config) self.cls = {{cookiecutter.camelcase_modelname}}OnlyMLMHead(config) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1046,7 +1046,7 @@ def __init__(self, config): self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config) self.cls = {{cookiecutter.camelcase_modelname}}OnlyMLMHead(config) - self.init_weights() + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1217,7 +1217,7 @@ def __init__(self, config): self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config) self.classifier = {{cookiecutter.camelcase_modelname}}ClassificationHead(config) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1309,7 +1309,7 @@ def __init__(self, config): self.sequence_summary = SequenceSummary(config) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( @@ -1399,7 +1399,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1486,7 +1486,7 @@ def __init__(self, config): self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + self.post_init() @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -2224,7 +2224,7 @@ def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, embed_tok self.layers = nn.ModuleList([{{cookiecutter.camelcase_modelname}}EncoderLayer(config) for _ in range(config.encoder_layers)]) self.layernorm_embedding = nn.LayerNorm(embed_dim) - self.init_weights() + self.post_init() self.gradient_checkpointing = False def forward( @@ -2388,7 +2388,7 @@ def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, embed_tok self.layers = nn.ModuleList([{{cookiecutter.camelcase_modelname}}DecoderLayer(config) for _ in range(config.decoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) - self.init_weights() + self.post_init() self.gradient_checkpointing = False def get_input_embeddings(self): @@ -2640,7 +2640,7 @@ def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config): self.encoder = {{cookiecutter.camelcase_modelname}}Encoder(config, self.shared) self.decoder = {{cookiecutter.camelcase_modelname}}Decoder(config, self.shared) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.shared @@ -2755,7 +2755,7 @@ def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config): self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) - self.init_weights() + self.post_init() def get_encoder(self): return self.model.get_encoder() @@ -3170,7 +3170,7 @@ def __init__(self, config): self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - self.init_weights() + self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index 49027d3f7e01..05c980c64225 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -222,14 +222,6 @@ def test_gradient_checkpointing_backward_compatibility(self): config.gradient_checkpointing = True model = model_class(config) - # Model does not have gradient checkpointing activated yet, it will be done at the first forward. - self.assertFalse(model.is_gradient_checkpointing) - - model.to(torch_device) - inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) - _ = model(**inputs) - - # Model has gradient checkpointing activated after the first forward. self.assertTrue(model.is_gradient_checkpointing) def test_gradient_checkpointing_enable_disable(self): From 2a6ceec8b48c9c5380e91b61a71560545fafb116 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger Date: Wed, 17 Nov 2021 09:48:28 -0500 Subject: [PATCH 2/6] Fix tests --- src/transformers/models/bart/modeling_bart.py | 4 ++-- .../modeling_bigbird_pegasus.py | 4 ++-- .../models/blenderbot/modeling_blenderbot.py | 4 ++-- .../modeling_blenderbot_small.py | 2 +- src/transformers/models/detr/modeling_detr.py | 2 +- src/transformers/models/gpt2/modeling_gpt2.py | 20 +++++++++---------- .../models/gpt_neo/modeling_gpt_neo.py | 2 +- src/transformers/models/gptj/modeling_gptj.py | 10 ++++++---- src/transformers/models/led/modeling_led.py | 4 ++-- .../models/m2m_100/modeling_m2m_100.py | 4 ++-- .../models/marian/modeling_marian.py | 6 ++++-- .../models/mbart/modeling_mbart.py | 4 ++-- .../models/pegasus/modeling_pegasus.py | 4 ++-- .../models/prophetnet/modeling_prophetnet.py | 4 ++-- .../speech_to_text/modeling_speech_to_text.py | 4 ++-- .../modeling_speech_to_text_2.py | 2 +- .../models/trocr/modeling_trocr.py | 2 +- 17 files changed, 43 insertions(+), 39 deletions(-) diff --git a/src/transformers/models/bart/modeling_bart.py b/src/transformers/models/bart/modeling_bart.py index ca13d407eadf..53c4118711c6 100755 --- a/src/transformers/models/bart/modeling_bart.py +++ b/src/transformers/models/bart/modeling_bart.py @@ -699,8 +699,8 @@ def __init__(self, config: BartConfig, embed_tokens: Optional[nn.Embedding] = No self.layers = nn.ModuleList([BartEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layernorm_embedding = nn.LayerNorm(embed_dim) - self.post_init() self.gradient_checkpointing = False + self.post_init() def get_input_embeddings(self): return self.embed_tokens @@ -870,8 +870,8 @@ def __init__(self, config: BartConfig, embed_tokens: Optional[nn.Embedding] = No self.layers = nn.ModuleList([BartDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) - self.post_init() self.gradient_checkpointing = False + self.post_init() def get_input_embeddings(self): return self.embed_tokens diff --git a/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py b/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py index e616618cdaac..c50613cb8ac4 100755 --- a/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py +++ b/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py @@ -1775,8 +1775,8 @@ def __init__(self, config: BigBirdPegasusConfig, embed_tokens: Optional[nn.Embed self.layers = nn.ModuleList([BigBirdPegasusEncoderLayer(config, seed=i) for i in range(config.encoder_layers)]) self.layernorm_embedding = nn.LayerNorm(embed_dim) - self.post_init() self.gradient_checkpointing = False + self.post_init() def forward( self, @@ -2066,8 +2066,8 @@ def __init__(self, config: BigBirdPegasusConfig, embed_tokens: Optional[nn.Embed self.layers = nn.ModuleList([BigBirdPegasusDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) - self.post_init() self.gradient_checkpointing = False + self.post_init() def get_input_embeddings(self): return self.embed_tokens diff --git a/src/transformers/models/blenderbot/modeling_blenderbot.py b/src/transformers/models/blenderbot/modeling_blenderbot.py index b177eb149408..33d69ae8b416 100755 --- a/src/transformers/models/blenderbot/modeling_blenderbot.py +++ b/src/transformers/models/blenderbot/modeling_blenderbot.py @@ -656,8 +656,8 @@ def __init__(self, config: BlenderbotConfig, embed_tokens: Optional[nn.Embedding self.layers = nn.ModuleList([BlenderbotEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) - self.post_init() self.gradient_checkpointing = False + self.post_init() def forward( self, @@ -821,8 +821,8 @@ def __init__(self, config: BlenderbotConfig, embed_tokens: Optional[nn.Embedding self.layers = nn.ModuleList([BlenderbotDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) - self.post_init() self.gradient_checkpointing = False + self.post_init() def get_input_embeddings(self): return self.embed_tokens diff --git a/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py b/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py index 621129bf2597..52aea945daef 100755 --- a/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py +++ b/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py @@ -657,8 +657,8 @@ def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[nn.Embe self.layers = nn.ModuleList([BlenderbotSmallEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layernorm_embedding = nn.LayerNorm(embed_dim) - self.post_init() self.gradient_checkpointing = False + self.post_init() def forward( self, diff --git a/src/transformers/models/detr/modeling_detr.py b/src/transformers/models/detr/modeling_detr.py index 94e5b74e04a2..6ee6c6e17f22 100644 --- a/src/transformers/models/detr/modeling_detr.py +++ b/src/transformers/models/detr/modeling_detr.py @@ -1001,8 +1001,8 @@ def __init__(self, config: DetrConfig): # in DETR, the decoder uses layernorm after the last decoder layer output self.layernorm = nn.LayerNorm(config.d_model) - self.post_init() self.gradient_checkpointing = False + self.post_init() def forward( self, diff --git a/src/transformers/models/gpt2/modeling_gpt2.py b/src/transformers/models/gpt2/modeling_gpt2.py index 08ea72378607..9c8bfcbf2cd1 100644 --- a/src/transformers/models/gpt2/modeling_gpt2.py +++ b/src/transformers/models/gpt2/modeling_gpt2.py @@ -677,13 +677,13 @@ def __init__(self, config): self.h = nn.ModuleList([GPT2Block(config, layer_idx=i) for i in range(config.num_hidden_layers)]) self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) - self.post_init() - # Model parallel self.model_parallel = False self.device_map = None self.gradient_checkpointing = False + self.post_init() + @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): # Check validity of device_map @@ -947,12 +947,12 @@ def __init__(self, config): self.transformer = GPT2Model(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) - self.post_init() - # Model parallel self.model_parallel = False self.device_map = None + self.post_init() + @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): self.device_map = ( @@ -1117,12 +1117,12 @@ def __init__(self, config): self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) self.multiple_choice_head = SequenceSummary(config) - self.post_init() - # Model parallel self.model_parallel = False self.device_map = None + self.post_init() + @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): self.device_map = ( @@ -1330,12 +1330,12 @@ def __init__(self, config): self.transformer = GPT2Model(config) self.score = nn.Linear(config.n_embd, self.num_labels, bias=False) - self.post_init() - # Model parallel self.model_parallel = False self.device_map = None + self.post_init() + @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, @@ -1461,12 +1461,12 @@ def __init__(self, config): self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.post_init() - # Model parallel self.model_parallel = False self.device_map = None + self.post_init() + @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, diff --git a/src/transformers/models/gpt_neo/modeling_gpt_neo.py b/src/transformers/models/gpt_neo/modeling_gpt_neo.py index ac0b03278af4..cfaccbde5031 100755 --- a/src/transformers/models/gpt_neo/modeling_gpt_neo.py +++ b/src/transformers/models/gpt_neo/modeling_gpt_neo.py @@ -486,8 +486,8 @@ def __init__(self, config): self.h = nn.ModuleList([GPTNeoBlock(config, layer_id=i) for i in range(config.num_layers)]) self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) - self.post_init() self.gradient_checkpointing = False + self.post_init() def get_input_embeddings(self): return self.wte diff --git a/src/transformers/models/gptj/modeling_gptj.py b/src/transformers/models/gptj/modeling_gptj.py index d5630b6922d5..6013401ebfcc 100755 --- a/src/transformers/models/gptj/modeling_gptj.py +++ b/src/transformers/models/gptj/modeling_gptj.py @@ -444,13 +444,14 @@ def __init__(self, config): self.drop = nn.Dropout(config.embd_pdrop) self.h = nn.ModuleList([GPTJBlock(config) for _ in range(config.n_layer)]) self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) - self.post_init() # Model parallel self.model_parallel = False self.device_map = None self.gradient_checkpointing = False + self.post_init() + @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): # Check validity of device_map @@ -680,12 +681,13 @@ def __init__(self, config): super().__init__(config) self.transformer = GPTJModel(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size) - self.post_init() # Model parallel self.model_parallel = False self.device_map = None + self.post_init() + @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): self.device_map = ( @@ -855,12 +857,12 @@ def __init__(self, config): self.transformer = GPTJModel(config) self.score = nn.Linear(config.n_embd, self.num_labels, bias=False) - self.post_init() - # Model parallel self.model_parallel = False self.device_map = None + self.post_init() + @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, diff --git a/src/transformers/models/led/modeling_led.py b/src/transformers/models/led/modeling_led.py index cc1989378839..069760c5a9e5 100755 --- a/src/transformers/models/led/modeling_led.py +++ b/src/transformers/models/led/modeling_led.py @@ -1629,8 +1629,8 @@ def __init__(self, config: LEDConfig, embed_tokens: Optional[nn.Embedding] = Non self.layers = nn.ModuleList([LEDEncoderLayer(config, i) for i in range(config.encoder_layers)]) self.layernorm_embedding = nn.LayerNorm(embed_dim) - self.post_init() self.gradient_checkpointing = False + self.post_init() def _merge_to_attention_mask(self, attention_mask: torch.Tensor, global_attention_mask: torch.Tensor): # longformer self attention expects attention mask to have 0 (no attn), 1 (local attn), 2 (global attn) @@ -1904,8 +1904,8 @@ def __init__(self, config: LEDConfig, embed_tokens: Optional[nn.Embedding] = Non self.layers = nn.ModuleList([LEDDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) - self.post_init() self.gradient_checkpointing = False + self.post_init() def forward( self, diff --git a/src/transformers/models/m2m_100/modeling_m2m_100.py b/src/transformers/models/m2m_100/modeling_m2m_100.py index 0f534f40e61c..e1120d4a2a13 100755 --- a/src/transformers/models/m2m_100/modeling_m2m_100.py +++ b/src/transformers/models/m2m_100/modeling_m2m_100.py @@ -705,8 +705,8 @@ def __init__(self, config: M2M100Config, embed_tokens: Optional[nn.Embedding] = self.layers = nn.ModuleList([M2M100EncoderLayer(config) for _ in range(config.encoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) - self.post_init() self.gradient_checkpointing = False + self.post_init() def forward( self, @@ -870,8 +870,8 @@ def __init__(self, config: M2M100Config, embed_tokens: Optional[nn.Embedding] = self.layers = nn.ModuleList([M2M100DecoderLayer(config) for _ in range(config.decoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) - self.post_init() self.gradient_checkpointing = False + self.post_init() def forward( self, diff --git a/src/transformers/models/marian/modeling_marian.py b/src/transformers/models/marian/modeling_marian.py index d0bd332edf92..6413f2e17776 100755 --- a/src/transformers/models/marian/modeling_marian.py +++ b/src/transformers/models/marian/modeling_marian.py @@ -668,8 +668,9 @@ def __init__(self, config: MarianConfig, embed_tokens: Optional[nn.Embedding] = self.padding_idx, ) self.layers = nn.ModuleList([MarianEncoderLayer(config) for _ in range(config.encoder_layers)]) - self.post_init() + self.gradient_checkpointing = False + self.post_init() def forward( self, @@ -829,8 +830,9 @@ def __init__(self, config: MarianConfig, embed_tokens: Optional[nn.Embedding] = self.padding_idx, ) self.layers = nn.ModuleList([MarianDecoderLayer(config) for _ in range(config.decoder_layers)]) - self.post_init() + self.gradient_checkpointing = False + self.post_init() def get_input_embeddings(self): return self.embed_tokens diff --git a/src/transformers/models/mbart/modeling_mbart.py b/src/transformers/models/mbart/modeling_mbart.py index fec1ce180208..fd730d389bfa 100755 --- a/src/transformers/models/mbart/modeling_mbart.py +++ b/src/transformers/models/mbart/modeling_mbart.py @@ -696,8 +696,8 @@ def __init__(self, config: MBartConfig, embed_tokens: Optional[nn.Embedding] = N self.layernorm_embedding = nn.LayerNorm(embed_dim) self.layer_norm = nn.LayerNorm(config.d_model) - self.post_init() self.gradient_checkpointing = False + self.post_init() def forward( self, @@ -862,8 +862,8 @@ def __init__(self, config: MBartConfig, embed_tokens: Optional[nn.Embedding] = N self.layernorm_embedding = nn.LayerNorm(config.d_model) self.layer_norm = nn.LayerNorm(config.d_model) - self.post_init() self.gradient_checkpointing = False + self.post_init() def get_input_embeddings(self): return self.embed_tokens diff --git a/src/transformers/models/pegasus/modeling_pegasus.py b/src/transformers/models/pegasus/modeling_pegasus.py index 90b9f15fb4ab..c53bdb22be78 100755 --- a/src/transformers/models/pegasus/modeling_pegasus.py +++ b/src/transformers/models/pegasus/modeling_pegasus.py @@ -658,8 +658,8 @@ def __init__(self, config: PegasusConfig, embed_tokens: Optional[nn.Embedding] = self.layers = nn.ModuleList([PegasusEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) - self.post_init() self.gradient_checkpointing = False + self.post_init() def resize_position_embeddings(self, new_num_position_embeddings: int): """ @@ -853,8 +853,8 @@ def __init__(self, config: PegasusConfig, embed_tokens: Optional[nn.Embedding] = self.layers = nn.ModuleList([PegasusDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) - self.post_init() self.gradient_checkpointing = False + self.post_init() def get_input_embeddings(self): return self.embed_tokens diff --git a/src/transformers/models/prophetnet/modeling_prophetnet.py b/src/transformers/models/prophetnet/modeling_prophetnet.py index 96051d8d0d37..39c270c433b1 100644 --- a/src/transformers/models/prophetnet/modeling_prophetnet.py +++ b/src/transformers/models/prophetnet/modeling_prophetnet.py @@ -1266,8 +1266,8 @@ def __init__(self, config: ProphetNetConfig, word_embeddings: nn.Embedding = Non self.layers = nn.ModuleList([ProphetNetEncoderLayer(config) for _ in range(config.num_encoder_layers)]) - self.post_init() self.gradient_checkpointing = False + self.post_init() def get_input_embeddings(self): return self.word_embeddings @@ -1411,8 +1411,8 @@ def __init__(self, config: ProphetNetConfig, word_embeddings: nn.Embedding = Non self.layers = nn.ModuleList([ProphetNetDecoderLayer(config) for _ in range(config.num_decoder_layers)]) self.embeddings_layer_norm = LayerNorm(config.hidden_size) - self.post_init() self.gradient_checkpointing = False + self.post_init() def get_input_embeddings(self): return self.word_embeddings diff --git a/src/transformers/models/speech_to_text/modeling_speech_to_text.py b/src/transformers/models/speech_to_text/modeling_speech_to_text.py index 08166a9d5fae..d3ac9c7b9494 100755 --- a/src/transformers/models/speech_to_text/modeling_speech_to_text.py +++ b/src/transformers/models/speech_to_text/modeling_speech_to_text.py @@ -723,8 +723,8 @@ def __init__(self, config: Speech2TextConfig): self.layers = nn.ModuleList([Speech2TextEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) - self.post_init() self.gradient_checkpointing = False + self.post_init() def forward( self, @@ -876,8 +876,8 @@ def __init__(self, config: Speech2TextConfig): self.layer_norm = nn.LayerNorm(config.d_model) - self.post_init() self.gradient_checkpointing = False + self.post_init() def get_input_embeddings(self): return self.embed_tokens diff --git a/src/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py b/src/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py index ff29ce9f561a..10c59ebf292d 100755 --- a/src/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py +++ b/src/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py @@ -476,8 +476,8 @@ def __init__(self, config: Speech2Text2Config): self.layers = nn.ModuleList([Speech2Text2DecoderLayer(config) for _ in range(config.decoder_layers)]) - self.post_init() self.gradient_checkpointing = False + self.post_init() def get_input_embeddings(self): return self.embed_tokens diff --git a/src/transformers/models/trocr/modeling_trocr.py b/src/transformers/models/trocr/modeling_trocr.py index 3888d14bfd86..3a3f0f29fa98 100644 --- a/src/transformers/models/trocr/modeling_trocr.py +++ b/src/transformers/models/trocr/modeling_trocr.py @@ -503,8 +503,8 @@ def __init__(self, config: TrOCRConfig): self.layers = nn.ModuleList([TrOCRDecoderLayer(config) for _ in range(config.decoder_layers)]) - self.post_init() self.gradient_checkpointing = False + self.post_init() def get_input_embeddings(self): return self.embed_tokens From bc7578e65828382353aaf592c54d4178236b6f67 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger Date: Wed, 17 Nov 2021 10:14:28 -0500 Subject: [PATCH 3/6] Fix last tests --- src/transformers/modeling_utils.py | 3 +++ .../models/blenderbot_small/modeling_blenderbot_small.py | 2 +- src/transformers/models/mbart/modeling_mbart.py | 5 +++++ 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index c0e04f7b446c..9ec64ebb73c7 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -486,6 +486,9 @@ def post_init(self): modules properly initialized (such as weight initialization). """ self.init_weights() + self._backward_compatibility_gradient_checkpointing() + + def _backward_compatibility_gradient_checkpointing(self): if self.supports_gradient_checkpointing and getattr(self.config, "gradient_checkpointing", False): self.gradient_checkpointing_enable() # Remove the attribute now that is has been consumed, so it's no saved in the config. diff --git a/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py b/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py index 52aea945daef..96e3d8503012 100755 --- a/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py +++ b/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py @@ -821,8 +821,8 @@ def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[nn.Embe self.layers = nn.ModuleList([BlenderbotSmallDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) - self.post_init() self.gradient_checkpointing = False + self.post_init() def get_input_embeddings(self): return self.embed_tokens diff --git a/src/transformers/models/mbart/modeling_mbart.py b/src/transformers/models/mbart/modeling_mbart.py index fd730d389bfa..10f850d912c7 100755 --- a/src/transformers/models/mbart/modeling_mbart.py +++ b/src/transformers/models/mbart/modeling_mbart.py @@ -699,6 +699,11 @@ def __init__(self, config: MBartConfig, embed_tokens: Optional[nn.Embedding] = N self.gradient_checkpointing = False self.post_init() + def _backward_compatibility_gradient_checkpointing(self): + # Override to not delete the attribute from the config + if self.supports_gradient_checkpointing and getattr(self.config, "gradient_checkpointing", False): + self.gradient_checkpointing_enable() + def forward( self, input_ids=None, From cda07c259fe17a5749bc3ec6dcc37614521f81b8 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger Date: Wed, 17 Nov 2021 10:16:07 -0500 Subject: [PATCH 4/6] Fix templates --- .../modeling_{{cookiecutter.lowercase_modelname}}.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py index 97b77c52126e..eb3c63d8e0d7 100755 --- a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py +++ b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py @@ -2224,8 +2224,8 @@ def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, embed_tok self.layers = nn.ModuleList([{{cookiecutter.camelcase_modelname}}EncoderLayer(config) for _ in range(config.encoder_layers)]) self.layernorm_embedding = nn.LayerNorm(embed_dim) - self.post_init() self.gradient_checkpointing = False + self.post_init() def forward( self, @@ -2388,8 +2388,8 @@ def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, embed_tok self.layers = nn.ModuleList([{{cookiecutter.camelcase_modelname}}DecoderLayer(config) for _ in range(config.decoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) - self.post_init() self.gradient_checkpointing = False + self.post_init() def get_input_embeddings(self): return self.embed_tokens From 54803ee9ef0066969de4622cb52d7b97ecefe614 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger Date: Thu, 18 Nov 2021 08:00:21 -0500 Subject: [PATCH 5/6] Add comment --- src/transformers/models/albert/modeling_albert.py | 7 +++++++ src/transformers/models/bart/modeling_bart.py | 5 +++++ src/transformers/models/beit/modeling_beit.py | 4 ++++ src/transformers/models/bert/modeling_bert.py | 9 +++++++++ .../bert_generation/modeling_bert_generation.py | 2 ++ .../models/big_bird/modeling_big_bird.py | 8 ++++++++ .../bigbird_pegasus/modeling_bigbird_pegasus.py | 5 +++++ .../models/blenderbot/modeling_blenderbot.py | 5 +++++ .../blenderbot_small/modeling_blenderbot_small.py | 5 +++++ src/transformers/models/canine/modeling_canine.py | 5 +++++ src/transformers/models/clip/modeling_clip.py | 3 +++ .../models/convbert/modeling_convbert.py | 6 ++++++ src/transformers/models/ctrl/modeling_ctrl.py | 3 +++ src/transformers/models/deberta/modeling_deberta.py | 5 +++++ .../models/deberta_v2/modeling_deberta_v2.py | 5 +++++ src/transformers/models/deit/modeling_deit.py | 3 +++ src/transformers/models/detr/modeling_detr.py | 5 +++++ .../models/distilbert/modeling_distilbert.py | 6 ++++++ src/transformers/models/dpr/modeling_dpr.py | 5 +++++ src/transformers/models/electra/modeling_electra.py | 7 +++++++ .../models/flaubert/modeling_flaubert.py | 6 ++++++ src/transformers/models/fnet/modeling_fnet.py | 8 ++++++++ src/transformers/models/fsmt/modeling_fsmt.py | 1 + src/transformers/models/funnel/modeling_funnel.py | 8 ++++++++ src/transformers/models/gpt2/modeling_gpt2.py | 5 +++++ src/transformers/models/gpt_neo/modeling_gpt_neo.py | 3 +++ src/transformers/models/gptj/modeling_gptj.py | 3 +++ src/transformers/models/hubert/modeling_hubert.py | 3 +++ src/transformers/models/ibert/modeling_ibert.py | 6 ++++++ .../models/layoutlm/modeling_layoutlm.py | 4 ++++ .../models/layoutlmv2/modeling_layoutlmv2.py | 4 ++++ src/transformers/models/led/modeling_led.py | 4 ++++ .../models/longformer/modeling_longformer.py | 6 ++++++ src/transformers/models/luke/modeling_luke.py | 4 ++++ src/transformers/models/lxmert/modeling_lxmert.py | 3 +++ src/transformers/models/m2m_100/modeling_m2m_100.py | 4 ++++ src/transformers/models/marian/modeling_marian.py | 5 +++++ src/transformers/models/mbart/modeling_mbart.py | 5 +++++ .../models/megatron_bert/modeling_megatron_bert.py | 9 +++++++++ .../models/mobilebert/modeling_mobilebert.py | 8 ++++++++ src/transformers/models/mpnet/modeling_mpnet.py | 6 ++++++ src/transformers/models/openai/modeling_openai.py | 4 ++++ src/transformers/models/pegasus/modeling_pegasus.py | 5 +++++ .../models/prophetnet/modeling_prophetnet.py | 5 +++++ .../models/reformer/modeling_reformer.py | 5 +++++ src/transformers/models/rembert/modeling_rembert.py | 7 +++++++ .../models/retribert/modeling_retribert.py | 1 + src/transformers/models/roberta/modeling_roberta.py | 7 +++++++ .../models/roformer/modeling_roformer.py | 8 ++++++++ .../models/segformer/modeling_segformer.py | 3 +++ src/transformers/models/sew/modeling_sew.py | 3 +++ src/transformers/models/sew_d/modeling_sew_d.py | 3 +++ .../models/speech_to_text/modeling_speech_to_text.py | 4 ++++ .../speech_to_text_2/modeling_speech_to_text_2.py | 2 ++ .../models/splinter/modeling_splinter.py | 2 ++ .../models/squeezebert/modeling_squeezebert.py | 6 ++++++ src/transformers/models/t5/modeling_t5.py | 4 ++++ src/transformers/models/tapas/modeling_tapas.py | 4 ++++ .../models/transfo_xl/modeling_transfo_xl.py | 3 +++ src/transformers/models/trocr/modeling_trocr.py | 2 ++ .../models/unispeech/modeling_unispeech.py | 4 ++++ .../models/unispeech_sat/modeling_unispeech_sat.py | 4 ++++ .../models/visual_bert/modeling_visual_bert.py | 6 ++++++ src/transformers/models/vit/modeling_vit.py | 2 ++ .../models/wav2vec2/modeling_wav2vec2.py | 5 +++++ src/transformers/models/xlm/modeling_xlm.py | 7 +++++++ src/transformers/models/xlnet/modeling_xlnet.py | 7 +++++++ .../modeling_{{cookiecutter.lowercase_modelname}}.py | 12 ++++++++++++ 68 files changed, 333 insertions(+) diff --git a/src/transformers/models/albert/modeling_albert.py b/src/transformers/models/albert/modeling_albert.py index 8315a7c0b98c..2e80eaf6173f 100755 --- a/src/transformers/models/albert/modeling_albert.py +++ b/src/transformers/models/albert/modeling_albert.py @@ -638,6 +638,7 @@ def __init__(self, config, add_pooling_layer=True): self.pooler = None self.pooler_activation = None + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -757,6 +758,7 @@ def __init__(self, config): self.predictions = AlbertMLMHead(config) self.sop_classifier = AlbertSOPHead(config) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -903,6 +905,7 @@ def __init__(self, config): self.albert = AlbertModel(config, add_pooling_layer=False) self.predictions = AlbertMLMHead(config) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -991,6 +994,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1097,6 +1101,7 @@ def __init__(self, config): self.dropout = nn.Dropout(classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1187,6 +1192,7 @@ def __init__(self, config): self.albert = AlbertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1286,6 +1292,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) diff --git a/src/transformers/models/bart/modeling_bart.py b/src/transformers/models/bart/modeling_bart.py index 53c4118711c6..743b27887f33 100755 --- a/src/transformers/models/bart/modeling_bart.py +++ b/src/transformers/models/bart/modeling_bart.py @@ -700,6 +700,7 @@ def __init__(self, config: BartConfig, embed_tokens: Optional[nn.Embedding] = No self.layernorm_embedding = nn.LayerNorm(embed_dim) self.gradient_checkpointing = False + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -871,6 +872,7 @@ def __init__(self, config: BartConfig, embed_tokens: Optional[nn.Embedding] = No self.layernorm_embedding = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -1130,6 +1132,7 @@ def __init__(self, config: BartConfig): self.encoder = BartEncoder(config, self.shared) self.decoder = BartDecoder(config, self.shared) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -1248,6 +1251,7 @@ def __init__(self, config: BartConfig): self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) + # Initialize weights and apply final processing self.post_init() def get_encoder(self): @@ -1666,6 +1670,7 @@ def __init__(self, config): self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): diff --git a/src/transformers/models/beit/modeling_beit.py b/src/transformers/models/beit/modeling_beit.py index 4b9cb255939d..3b11cc1bdbc7 100755 --- a/src/transformers/models/beit/modeling_beit.py +++ b/src/transformers/models/beit/modeling_beit.py @@ -598,6 +598,7 @@ def __init__(self, config, add_pooling_layer=True): ) self.pooler = BeitPooler(config) if add_pooling_layer else None + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -715,6 +716,7 @@ def __init__(self, config): self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING) @@ -805,6 +807,7 @@ def __init__(self, config): # Classifier head self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING) @@ -1121,6 +1124,7 @@ def __init__(self, config): self.decode_head = BeitUperHead(config) self.auxiliary_head = BeitFCNHead(config) if config.use_auxiliary_head else None + # Initialize weights and apply final processing self.post_init() def compute_loss(self, logits, auxiliary_logits, labels): diff --git a/src/transformers/models/bert/modeling_bert.py b/src/transformers/models/bert/modeling_bert.py index bb708c2ae98e..d48209898ec7 100755 --- a/src/transformers/models/bert/modeling_bert.py +++ b/src/transformers/models/bert/modeling_bert.py @@ -870,6 +870,7 @@ def __init__(self, config, add_pooling_layer=True): self.pooler = BertPooler(config) if add_pooling_layer else None + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -1037,6 +1038,7 @@ def __init__(self, config): self.bert = BertModel(config) self.cls = BertPreTrainingHeads(config) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -1145,6 +1147,7 @@ def __init__(self, config): self.bert = BertModel(config, add_pooling_layer=False) self.cls = BertOnlyMLMHead(config) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -1294,6 +1297,7 @@ def __init__(self, config): self.bert = BertModel(config, add_pooling_layer=False) self.cls = BertOnlyMLMHead(config) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -1394,6 +1398,7 @@ def __init__(self, config): self.bert = BertModel(config) self.cls = BertOnlyNSPHead(config) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1501,6 +1506,7 @@ def __init__(self, config): self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1600,6 +1606,7 @@ def __init__(self, config): self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, 1) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @@ -1698,6 +1705,7 @@ def __init__(self, config): self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1788,6 +1796,7 @@ def __init__(self, config): self.bert = BertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) diff --git a/src/transformers/models/bert_generation/modeling_bert_generation.py b/src/transformers/models/bert_generation/modeling_bert_generation.py index fc655ab905a6..653e585e6837 100755 --- a/src/transformers/models/bert_generation/modeling_bert_generation.py +++ b/src/transformers/models/bert_generation/modeling_bert_generation.py @@ -282,6 +282,7 @@ def __init__(self, config): self.embeddings = BertGenerationEmbeddings(config) self.encoder = BertEncoder(config) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -456,6 +457,7 @@ def __init__(self, config): self.bert = BertGenerationEncoder(config) self.lm_head = BertGenerationOnlyLMHead(config) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): diff --git a/src/transformers/models/big_bird/modeling_big_bird.py b/src/transformers/models/big_bird/modeling_big_bird.py index fccef20dba51..17724e744fd3 100755 --- a/src/transformers/models/big_bird/modeling_big_bird.py +++ b/src/transformers/models/big_bird/modeling_big_bird.py @@ -1953,6 +1953,7 @@ def __init__(self, config, add_pooling_layer=True): ) self.set_attention_type("original_full") + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -2262,6 +2263,7 @@ def __init__(self, config): self.bert = BigBirdModel(config, add_pooling_layer=True) self.cls = BigBirdPreTrainingHeads(config) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -2370,6 +2372,7 @@ def __init__(self, config): self.bert = BigBirdModel(config) self.cls = BigBirdOnlyMLMHead(config) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -2472,6 +2475,7 @@ def __init__(self, config): self.bert = BigBirdModel(config) self.cls = BigBirdOnlyMLMHead(config) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -2642,6 +2646,7 @@ def __init__(self, config): self.bert = BigBirdModel(config) self.classifier = BigBirdClassificationHead(config) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -2737,6 +2742,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward( @@ -2834,6 +2840,7 @@ def __init__(self, config): self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -2942,6 +2949,7 @@ def __init__(self, config, add_pooling_layer=False): self.bert = BigBirdModel(config, add_pooling_layer=add_pooling_layer) self.qa_classifier = BigBirdForQuestionAnsweringHead(config) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) diff --git a/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py b/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py index c50613cb8ac4..6bd0ae93ddec 100755 --- a/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py +++ b/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py @@ -1776,6 +1776,7 @@ def __init__(self, config: BigBirdPegasusConfig, embed_tokens: Optional[nn.Embed self.layernorm_embedding = nn.LayerNorm(embed_dim) self.gradient_checkpointing = False + # Initialize weights and apply final processing self.post_init() def forward( @@ -2067,6 +2068,7 @@ def __init__(self, config: BigBirdPegasusConfig, embed_tokens: Optional[nn.Embed self.layernorm_embedding = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -2327,6 +2329,7 @@ def __init__(self, config: BigBirdPegasusConfig): self.encoder = BigBirdPegasusEncoder(config, self.shared) self.decoder = BigBirdPegasusDecoder(config, self.shared) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -2447,6 +2450,7 @@ def __init__(self, config: BigBirdPegasusConfig): self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) + # Initialize weights and apply final processing self.post_init() def get_encoder(self): @@ -2869,6 +2873,7 @@ def __init__(self, config): self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): diff --git a/src/transformers/models/blenderbot/modeling_blenderbot.py b/src/transformers/models/blenderbot/modeling_blenderbot.py index 33d69ae8b416..850739b0365d 100755 --- a/src/transformers/models/blenderbot/modeling_blenderbot.py +++ b/src/transformers/models/blenderbot/modeling_blenderbot.py @@ -657,6 +657,7 @@ def __init__(self, config: BlenderbotConfig, embed_tokens: Optional[nn.Embedding self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False + # Initialize weights and apply final processing self.post_init() def forward( @@ -822,6 +823,7 @@ def __init__(self, config: BlenderbotConfig, embed_tokens: Optional[nn.Embedding self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -1083,6 +1085,7 @@ def __init__(self, config: BlenderbotConfig): self.encoder = BlenderbotEncoder(config, self.shared) self.decoder = BlenderbotDecoder(config, self.shared) + # Initialize weights and apply final processing self.post_init() @classmethod @@ -1220,6 +1223,7 @@ def __init__(self, config: BlenderbotConfig): self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) + # Initialize weights and apply final processing self.post_init() @classmethod @@ -1404,6 +1408,7 @@ def __init__(self, config): self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): diff --git a/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py b/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py index 96e3d8503012..51472a7e3b78 100755 --- a/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py +++ b/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py @@ -658,6 +658,7 @@ def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[nn.Embe self.layernorm_embedding = nn.LayerNorm(embed_dim) self.gradient_checkpointing = False + # Initialize weights and apply final processing self.post_init() def forward( @@ -822,6 +823,7 @@ def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[nn.Embe self.layernorm_embedding = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -1081,6 +1083,7 @@ def __init__(self, config: BlenderbotSmallConfig): self.encoder = BlenderbotSmallEncoder(config, self.shared) self.decoder = BlenderbotSmallDecoder(config, self.shared) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -1208,6 +1211,7 @@ def __init__(self, config: BlenderbotSmallConfig): self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) + # Initialize weights and apply final processing self.post_init() def get_encoder(self): @@ -1379,6 +1383,7 @@ def __init__(self, config): self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): diff --git a/src/transformers/models/canine/modeling_canine.py b/src/transformers/models/canine/modeling_canine.py index ee033d1a91ba..f2ba24ec0d64 100644 --- a/src/transformers/models/canine/modeling_canine.py +++ b/src/transformers/models/canine/modeling_canine.py @@ -1015,6 +1015,7 @@ def __init__(self, config, add_pooling_layer=True): self.pooler = CaninePooler(config) if add_pooling_layer else None + # Initialize weights and apply final processing self.post_init() def _prune_heads(self, heads_to_prune): @@ -1273,6 +1274,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1369,6 +1371,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @@ -1461,6 +1464,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1548,6 +1552,7 @@ def __init__(self, config): self.canine = CanineModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) diff --git a/src/transformers/models/clip/modeling_clip.py b/src/transformers/models/clip/modeling_clip.py index abdb7a3bc33f..08a9d0cb9a0e 100755 --- a/src/transformers/models/clip/modeling_clip.py +++ b/src/transformers/models/clip/modeling_clip.py @@ -683,6 +683,7 @@ class CLIPTextModel(CLIPPreTrainedModel): def __init__(self, config: CLIPTextConfig): super().__init__(config) self.text_model = CLIPTextTransformer(config) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: @@ -792,6 +793,7 @@ class CLIPVisionModel(CLIPPreTrainedModel): def __init__(self, config: CLIPVisionConfig): super().__init__(config) self.vision_model = CLIPVisionTransformer(config) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: @@ -866,6 +868,7 @@ def __init__(self, config: CLIPConfig): self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) self.logit_scale = nn.Parameter(torch.ones([]) * self.config.logit_scale_init_value) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING) diff --git a/src/transformers/models/convbert/modeling_convbert.py b/src/transformers/models/convbert/modeling_convbert.py index 3eff90b80930..bee2b2ae62bc 100755 --- a/src/transformers/models/convbert/modeling_convbert.py +++ b/src/transformers/models/convbert/modeling_convbert.py @@ -775,6 +775,7 @@ def __init__(self, config): self.encoder = ConvBertEncoder(config) self.config = config + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -886,6 +887,7 @@ def __init__(self, config): self.generator_predictions = ConvBertGeneratorPredictions(config) self.generator_lm_head = nn.Linear(config.embedding_size, config.vocab_size) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -995,6 +997,7 @@ def __init__(self, config): self.convbert = ConvBertModel(config) self.classifier = ConvBertClassificationHead(config) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1090,6 +1093,7 @@ def __init__(self, config): self.sequence_summary = SequenceSummary(config) self.classifier = nn.Linear(config.hidden_size, 1) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward( @@ -1187,6 +1191,7 @@ def __init__(self, config): self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1274,6 +1279,7 @@ def __init__(self, config): self.convbert = ConvBertModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) diff --git a/src/transformers/models/ctrl/modeling_ctrl.py b/src/transformers/models/ctrl/modeling_ctrl.py index 301692340b08..58e147b4b529 100644 --- a/src/transformers/models/ctrl/modeling_ctrl.py +++ b/src/transformers/models/ctrl/modeling_ctrl.py @@ -338,6 +338,7 @@ def __init__(self, config): ) self.layernorm = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -499,6 +500,7 @@ def __init__(self, config): self.transformer = CTRLModel(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=True) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -615,6 +617,7 @@ def __init__(self, config): self.transformer = CTRLModel(config) self.classifier = nn.Linear(config.n_embd, self.num_labels, bias=False) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING) diff --git a/src/transformers/models/deberta/modeling_deberta.py b/src/transformers/models/deberta/modeling_deberta.py index 20fef37d1493..2d2edd8f7dc9 100644 --- a/src/transformers/models/deberta/modeling_deberta.py +++ b/src/transformers/models/deberta/modeling_deberta.py @@ -888,6 +888,7 @@ def __init__(self, config): self.encoder = DebertaEncoder(config) self.z_steps = 0 self.config = config + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -1001,6 +1002,7 @@ def __init__(self, config): self.deberta = DebertaModel(config) self.cls = DebertaOnlyMLMHead(config) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -1141,6 +1143,7 @@ def __init__(self, config): drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out self.dropout = StableDropout(drop_out) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -1254,6 +1257,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1338,6 +1342,7 @@ def __init__(self, config): self.deberta = DebertaModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) diff --git a/src/transformers/models/deberta_v2/modeling_deberta_v2.py b/src/transformers/models/deberta_v2/modeling_deberta_v2.py index 091b29171879..e0c78395e6e9 100644 --- a/src/transformers/models/deberta_v2/modeling_deberta_v2.py +++ b/src/transformers/models/deberta_v2/modeling_deberta_v2.py @@ -996,6 +996,7 @@ def __init__(self, config): self.encoder = DebertaV2Encoder(config) self.z_steps = 0 self.config = config + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -1110,6 +1111,7 @@ def __init__(self, config): self.deberta = DebertaV2Model(config) self.cls = DebertaV2OnlyMLMHead(config) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -1251,6 +1253,7 @@ def __init__(self, config): drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out self.dropout = StableDropout(drop_out) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -1365,6 +1368,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1450,6 +1454,7 @@ def __init__(self, config): self.deberta = DebertaV2Model(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) diff --git a/src/transformers/models/deit/modeling_deit.py b/src/transformers/models/deit/modeling_deit.py index b0f5593f6db0..e47e88b849f0 100644 --- a/src/transformers/models/deit/modeling_deit.py +++ b/src/transformers/models/deit/modeling_deit.py @@ -458,6 +458,7 @@ def __init__(self, config, add_pooling_layer=True): self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pooler = DeiTPooler(config) if add_pooling_layer else None + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -574,6 +575,7 @@ def __init__(self, config): # Classifier head self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING) @@ -711,6 +713,7 @@ def __init__(self, config): nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() ) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING) diff --git a/src/transformers/models/detr/modeling_detr.py b/src/transformers/models/detr/modeling_detr.py index 6ee6c6e17f22..e7771a4adb8d 100644 --- a/src/transformers/models/detr/modeling_detr.py +++ b/src/transformers/models/detr/modeling_detr.py @@ -894,6 +894,7 @@ def __init__(self, config: DetrConfig): # in the original DETR, no layernorm is used at the end of the encoder, as "normalize_before" is set to False by default + # Initialize weights and apply final processing self.post_init() def forward( @@ -1002,6 +1003,7 @@ def __init__(self, config: DetrConfig): self.layernorm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False + # Initialize weights and apply final processing self.post_init() def forward( @@ -1179,6 +1181,7 @@ def __init__(self, config: DetrConfig): self.encoder = DetrEncoder(config) self.decoder = DetrDecoder(config) + # Initialize weights and apply final processing self.post_init() def get_encoder(self): @@ -1333,6 +1336,7 @@ def __init__(self, config: DetrConfig): input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3 ) + # Initialize weights and apply final processing self.post_init() # taken from https://github.com/facebookresearch/detr/blob/master/models/detr.py @@ -1494,6 +1498,7 @@ def __init__(self, config: DetrConfig): hidden_size, hidden_size, number_of_heads, dropout=0.0, std=config.init_xavier_std ) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(DETR_INPUTS_DOCSTRING) diff --git a/src/transformers/models/distilbert/modeling_distilbert.py b/src/transformers/models/distilbert/modeling_distilbert.py index ae45d6949649..a79b4523946c 100755 --- a/src/transformers/models/distilbert/modeling_distilbert.py +++ b/src/transformers/models/distilbert/modeling_distilbert.py @@ -441,6 +441,7 @@ def __init__(self, config): self.embeddings = Embeddings(config) # Embeddings self.transformer = Transformer(config) # Encoder + # Initialize weights and apply final processing self.post_init() def get_position_embeddings(self) -> nn.Embedding: @@ -571,6 +572,7 @@ def __init__(self, config): self.vocab_layer_norm = nn.LayerNorm(config.dim, eps=1e-12) self.vocab_projector = nn.Linear(config.dim, config.vocab_size) + # Initialize weights and apply final processing self.post_init() self.mlm_loss_fct = nn.CrossEntropyLoss() @@ -677,6 +679,7 @@ def __init__(self, config): self.classifier = nn.Linear(config.dim, config.num_labels) self.dropout = nn.Dropout(config.seq_classif_dropout) + # Initialize weights and apply final processing self.post_init() def get_position_embeddings(self) -> nn.Embedding: @@ -793,6 +796,7 @@ def __init__(self, config): assert config.num_labels == 2 self.dropout = nn.Dropout(config.qa_dropout) + # Initialize weights and apply final processing self.post_init() def get_position_embeddings(self) -> nn.Embedding: @@ -910,6 +914,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() def get_position_embeddings(self) -> nn.Embedding: @@ -1015,6 +1020,7 @@ def __init__(self, config): self.classifier = nn.Linear(config.dim, 1) self.dropout = nn.Dropout(config.seq_classif_dropout) + # Initialize weights and apply final processing self.post_init() def get_position_embeddings(self) -> nn.Embedding: diff --git a/src/transformers/models/dpr/modeling_dpr.py b/src/transformers/models/dpr/modeling_dpr.py index 2312c0f72a1e..6cde47678c88 100644 --- a/src/transformers/models/dpr/modeling_dpr.py +++ b/src/transformers/models/dpr/modeling_dpr.py @@ -180,6 +180,7 @@ def __init__(self, config: DPRConfig): self.projection_dim = config.projection_dim if self.projection_dim > 0: self.encode_proj = nn.Linear(self.bert_model.config.hidden_size, config.projection_dim) + # Initialize weights and apply final processing self.post_init() def forward( @@ -232,6 +233,7 @@ def __init__(self, config: DPRConfig): self.encoder = DPREncoder(config) self.qa_outputs = nn.Linear(self.encoder.embeddings_size, 2) self.qa_classifier = nn.Linear(self.encoder.embeddings_size, 1) + # Initialize weights and apply final processing self.post_init() def forward( @@ -447,6 +449,7 @@ def __init__(self, config: DPRConfig): super().__init__(config) self.config = config self.ctx_encoder = DPREncoder(config) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(DPR_ENCODERS_INPUTS_DOCSTRING) @@ -525,6 +528,7 @@ def __init__(self, config: DPRConfig): super().__init__(config) self.config = config self.question_encoder = DPREncoder(config) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(DPR_ENCODERS_INPUTS_DOCSTRING) @@ -602,6 +606,7 @@ def __init__(self, config: DPRConfig): super().__init__(config) self.config = config self.span_predictor = DPRSpanPredictor(config) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(DPR_READER_INPUTS_DOCSTRING) diff --git a/src/transformers/models/electra/modeling_electra.py b/src/transformers/models/electra/modeling_electra.py index cd46ae8c5000..c94a5c408b71 100644 --- a/src/transformers/models/electra/modeling_electra.py +++ b/src/transformers/models/electra/modeling_electra.py @@ -817,6 +817,7 @@ def __init__(self, config): self.encoder = ElectraEncoder(config) self.config = config + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -939,6 +940,7 @@ def __init__(self, config): self.electra = ElectraModel(config) self.classifier = ElectraClassificationHead(config) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1033,6 +1035,7 @@ def __init__(self, config): self.electra = ElectraModel(config) self.discriminator_predictions = ElectraDiscriminatorPredictions(config) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1128,6 +1131,7 @@ def __init__(self, config): self.generator_predictions = ElectraGeneratorPredictions(config) self.generator_lm_head = nn.Linear(config.embedding_size, config.vocab_size) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -1216,6 +1220,7 @@ def __init__(self, config): ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1305,6 +1310,7 @@ def __init__(self, config): self.electra = ElectraModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1406,6 +1412,7 @@ def __init__(self, config): self.sequence_summary = SequenceSummary(config) self.classifier = nn.Linear(config.hidden_size, 1) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) diff --git a/src/transformers/models/flaubert/modeling_flaubert.py b/src/transformers/models/flaubert/modeling_flaubert.py index 2696b2a0e729..9887b639c016 100644 --- a/src/transformers/models/flaubert/modeling_flaubert.py +++ b/src/transformers/models/flaubert/modeling_flaubert.py @@ -336,6 +336,7 @@ class FlaubertWithLMHeadModel(XLMWithLMHeadModel): def __init__(self, config): super().__init__(config) self.transformer = FlaubertModel(config) + # Initialize weights and apply final processing self.post_init() @@ -357,6 +358,7 @@ class FlaubertForSequenceClassification(XLMForSequenceClassification): def __init__(self, config): super().__init__(config) self.transformer = FlaubertModel(config) + # Initialize weights and apply final processing self.post_init() @@ -378,6 +380,7 @@ class FlaubertForTokenClassification(XLMForTokenClassification): def __init__(self, config): super().__init__(config) self.transformer = FlaubertModel(config) + # Initialize weights and apply final processing self.post_init() @@ -399,6 +402,7 @@ class FlaubertForQuestionAnsweringSimple(XLMForQuestionAnsweringSimple): def __init__(self, config): super().__init__(config) self.transformer = FlaubertModel(config) + # Initialize weights and apply final processing self.post_init() @@ -420,6 +424,7 @@ class FlaubertForQuestionAnswering(XLMForQuestionAnswering): def __init__(self, config): super().__init__(config) self.transformer = FlaubertModel(config) + # Initialize weights and apply final processing self.post_init() @@ -441,4 +446,5 @@ class FlaubertForMultipleChoice(XLMForMultipleChoice): def __init__(self, config): super().__init__(config) self.transformer = FlaubertModel(config) + # Initialize weights and apply final processing self.post_init() diff --git a/src/transformers/models/fnet/modeling_fnet.py b/src/transformers/models/fnet/modeling_fnet.py index d933c0791f4b..76aa1aa50432 100755 --- a/src/transformers/models/fnet/modeling_fnet.py +++ b/src/transformers/models/fnet/modeling_fnet.py @@ -535,6 +535,7 @@ def __init__(self, config, add_pooling_layer=True): self.pooler = FNetPooler(config) if add_pooling_layer else None + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -633,6 +634,7 @@ def __init__(self, config): self.fnet = FNetModel(config) self.cls = FNetPreTrainingHeads(config) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -723,6 +725,7 @@ def __init__(self, config): self.fnet = FNetModel(config) self.cls = FNetOnlyMLMHead(config) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -791,6 +794,7 @@ def __init__(self, config): self.fnet = FNetModel(config) self.cls = FNetOnlyNSPHead(config) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -885,6 +889,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -969,6 +974,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @@ -1050,6 +1056,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1119,6 +1126,7 @@ def __init__(self, config): self.fnet = FNetModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) diff --git a/src/transformers/models/fsmt/modeling_fsmt.py b/src/transformers/models/fsmt/modeling_fsmt.py index b7a7337bbfd2..a1c4c1ed8ce9 100644 --- a/src/transformers/models/fsmt/modeling_fsmt.py +++ b/src/transformers/models/fsmt/modeling_fsmt.py @@ -1003,6 +1003,7 @@ def __init__(self, config: FSMTConfig): self.encoder = FSMTEncoder(config, encoder_embed_tokens) self.decoder = FSMTDecoder(config, decoder_embed_tokens) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(FSMT_INPUTS_DOCSTRING) diff --git a/src/transformers/models/funnel/modeling_funnel.py b/src/transformers/models/funnel/modeling_funnel.py index f10c04e8fe4c..fffed242fd96 100644 --- a/src/transformers/models/funnel/modeling_funnel.py +++ b/src/transformers/models/funnel/modeling_funnel.py @@ -900,6 +900,7 @@ def __init__(self, config): self.embeddings = FunnelEmbeddings(config) self.encoder = FunnelEncoder(config) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -977,6 +978,7 @@ def __init__(self, config): self.encoder = FunnelEncoder(config) self.decoder = FunnelDecoder(config) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -1082,6 +1084,7 @@ def __init__(self, config): self.funnel = FunnelModel(config) self.discriminator_predictions = FunnelDiscriminatorPredictions(config) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1164,6 +1167,7 @@ def __init__(self, config): self.funnel = FunnelModel(config) self.lm_head = nn.Linear(config.d_model, config.vocab_size) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -1244,6 +1248,7 @@ def __init__(self, config): self.funnel = FunnelBaseModel(config) self.classifier = FunnelClassificationHead(config, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1334,6 +1339,7 @@ def __init__(self, config): self.funnel = FunnelBaseModel(config) self.classifier = FunnelClassificationHead(config, 1) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @@ -1420,6 +1426,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1502,6 +1509,7 @@ def __init__(self, config): self.funnel = FunnelModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) diff --git a/src/transformers/models/gpt2/modeling_gpt2.py b/src/transformers/models/gpt2/modeling_gpt2.py index 9c8bfcbf2cd1..77ef0386ea93 100644 --- a/src/transformers/models/gpt2/modeling_gpt2.py +++ b/src/transformers/models/gpt2/modeling_gpt2.py @@ -682,6 +682,7 @@ def __init__(self, config): self.device_map = None self.gradient_checkpointing = False + # Initialize weights and apply final processing self.post_init() @add_start_docstrings(PARALLELIZE_DOCSTRING) @@ -951,6 +952,7 @@ def __init__(self, config): self.model_parallel = False self.device_map = None + # Initialize weights and apply final processing self.post_init() @add_start_docstrings(PARALLELIZE_DOCSTRING) @@ -1121,6 +1123,7 @@ def __init__(self, config): self.model_parallel = False self.device_map = None + # Initialize weights and apply final processing self.post_init() @add_start_docstrings(PARALLELIZE_DOCSTRING) @@ -1334,6 +1337,7 @@ def __init__(self, config): self.model_parallel = False self.device_map = None + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @@ -1465,6 +1469,7 @@ def __init__(self, config): self.model_parallel = False self.device_map = None + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) diff --git a/src/transformers/models/gpt_neo/modeling_gpt_neo.py b/src/transformers/models/gpt_neo/modeling_gpt_neo.py index cfaccbde5031..9785178ce864 100755 --- a/src/transformers/models/gpt_neo/modeling_gpt_neo.py +++ b/src/transformers/models/gpt_neo/modeling_gpt_neo.py @@ -487,6 +487,7 @@ def __init__(self, config): self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) self.gradient_checkpointing = False + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -675,6 +676,7 @@ def __init__(self, config): self.transformer = GPTNeoModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -823,6 +825,7 @@ def __init__(self, config): self.transformer = GPTNeoModel(config) self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING) diff --git a/src/transformers/models/gptj/modeling_gptj.py b/src/transformers/models/gptj/modeling_gptj.py index 6013401ebfcc..603619cc5aef 100755 --- a/src/transformers/models/gptj/modeling_gptj.py +++ b/src/transformers/models/gptj/modeling_gptj.py @@ -450,6 +450,7 @@ def __init__(self, config): self.device_map = None self.gradient_checkpointing = False + # Initialize weights and apply final processing self.post_init() @add_start_docstrings(PARALLELIZE_DOCSTRING) @@ -686,6 +687,7 @@ def __init__(self, config): self.model_parallel = False self.device_map = None + # Initialize weights and apply final processing self.post_init() @add_start_docstrings(PARALLELIZE_DOCSTRING) @@ -861,6 +863,7 @@ def __init__(self, config): self.model_parallel = False self.device_map = None + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length")) diff --git a/src/transformers/models/hubert/modeling_hubert.py b/src/transformers/models/hubert/modeling_hubert.py index aca5133e5235..2f8c59257c5e 100755 --- a/src/transformers/models/hubert/modeling_hubert.py +++ b/src/transformers/models/hubert/modeling_hubert.py @@ -899,6 +899,7 @@ def __init__(self, config: HubertConfig): else: self.encoder = HubertEncoder(config) + # Initialize weights and apply final processing self.post_init() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states @@ -1039,6 +1040,7 @@ def __init__(self, config): ) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size) + # Initialize weights and apply final processing self.post_init() def freeze_feature_extractor(self): @@ -1147,6 +1149,7 @@ def __init__(self, config): self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() def freeze_feature_extractor(self): diff --git a/src/transformers/models/ibert/modeling_ibert.py b/src/transformers/models/ibert/modeling_ibert.py index ac5b33e440dc..6666258e7035 100644 --- a/src/transformers/models/ibert/modeling_ibert.py +++ b/src/transformers/models/ibert/modeling_ibert.py @@ -754,6 +754,7 @@ def __init__(self, config, add_pooling_layer=True): self.pooler = IBertPooler(config) if add_pooling_layer else None + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -865,6 +866,7 @@ def __init__(self, config): self.ibert = IBertModel(config, add_pooling_layer=False) self.lm_head = IBertLMHead(config) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -979,6 +981,7 @@ def __init__(self, config): self.ibert = IBertModel(config, add_pooling_layer=False) self.classifier = IBertClassificationHead(config) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1074,6 +1077,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @@ -1168,6 +1172,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1277,6 +1282,7 @@ def __init__(self, config): self.ibert = IBertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) diff --git a/src/transformers/models/layoutlm/modeling_layoutlm.py b/src/transformers/models/layoutlm/modeling_layoutlm.py index 32ebfb7908d7..186146e12090 100644 --- a/src/transformers/models/layoutlm/modeling_layoutlm.py +++ b/src/transformers/models/layoutlm/modeling_layoutlm.py @@ -714,6 +714,7 @@ def __init__(self, config): self.encoder = LayoutLMEncoder(config) self.pooler = LayoutLMPooler(config) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -856,6 +857,7 @@ def __init__(self, config): self.layoutlm = LayoutLMModel(config) self.cls = LayoutLMOnlyMLMHead(config) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -979,6 +981,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -1109,6 +1112,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): diff --git a/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py b/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py index 586d8d033028..93706000e62a 100755 --- a/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py +++ b/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py @@ -724,6 +724,7 @@ def __init__(self, config): self.encoder = LayoutLMv2Encoder(config) self.pooler = LayoutLMv2Pooler(config) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -957,6 +958,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size * 3, config.num_labels) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -1124,6 +1126,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -1239,6 +1242,7 @@ def __init__(self, config, has_visual_segment_embedding=True): self.layoutlmv2 = LayoutLMv2Model(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): diff --git a/src/transformers/models/led/modeling_led.py b/src/transformers/models/led/modeling_led.py index 069760c5a9e5..2f15448522af 100755 --- a/src/transformers/models/led/modeling_led.py +++ b/src/transformers/models/led/modeling_led.py @@ -1630,6 +1630,7 @@ def __init__(self, config: LEDConfig, embed_tokens: Optional[nn.Embedding] = Non self.layernorm_embedding = nn.LayerNorm(embed_dim) self.gradient_checkpointing = False + # Initialize weights and apply final processing self.post_init() def _merge_to_attention_mask(self, attention_mask: torch.Tensor, global_attention_mask: torch.Tensor): @@ -1905,6 +1906,7 @@ def __init__(self, config: LEDConfig, embed_tokens: Optional[nn.Embedding] = Non self.layernorm_embedding = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False + # Initialize weights and apply final processing self.post_init() def forward( @@ -2156,6 +2158,7 @@ def __init__(self, config: LEDConfig): self.encoder = LEDEncoder(config, self.shared) self.decoder = LEDDecoder(config, self.shared) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -2283,6 +2286,7 @@ def __init__(self, config: LEDConfig): self.register_buffer("final_logits_bias", torch.zeros((1, self.led.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.led.shared.num_embeddings, bias=False) + # Initialize weights and apply final processing self.post_init() def get_encoder(self): diff --git a/src/transformers/models/longformer/modeling_longformer.py b/src/transformers/models/longformer/modeling_longformer.py index def3a12a9992..93bfd90dfbce 100755 --- a/src/transformers/models/longformer/modeling_longformer.py +++ b/src/transformers/models/longformer/modeling_longformer.py @@ -1511,6 +1511,7 @@ def __init__(self, config, add_pooling_layer=True): self.encoder = LongformerEncoder(config) self.pooler = LongformerPooler(config) if add_pooling_layer else None + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -1713,6 +1714,7 @@ def __init__(self, config): self.longformer = LongformerModel(config, add_pooling_layer=False) self.lm_head = LongformerLMHead(config) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -1818,6 +1820,7 @@ def __init__(self, config): self.longformer = LongformerModel(config, add_pooling_layer=False) self.classifier = LongformerClassificationHead(config) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1943,6 +1946,7 @@ def __init__(self, config): self.longformer = LongformerModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -2080,6 +2084,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -2170,6 +2175,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward( diff --git a/src/transformers/models/luke/modeling_luke.py b/src/transformers/models/luke/modeling_luke.py index 2537bd87e544..6edd84a3ce25 100644 --- a/src/transformers/models/luke/modeling_luke.py +++ b/src/transformers/models/luke/modeling_luke.py @@ -818,6 +818,7 @@ def __init__(self, config, add_pooling_layer=True): self.pooler = LukePooler(config) if add_pooling_layer else None + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -1029,6 +1030,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1142,6 +1144,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size * 2, config.num_labels, False) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1257,6 +1260,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size * 3, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) diff --git a/src/transformers/models/lxmert/modeling_lxmert.py b/src/transformers/models/lxmert/modeling_lxmert.py index aa2ed978779d..c78e36fddb75 100644 --- a/src/transformers/models/lxmert/modeling_lxmert.py +++ b/src/transformers/models/lxmert/modeling_lxmert.py @@ -891,6 +891,7 @@ def __init__(self, config): self.embeddings = LxmertEmbeddings(config) self.encoder = LxmertEncoder(config) self.pooler = LxmertPooler(config) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -1048,6 +1049,7 @@ def __init__(self, config): self.answer_head = LxmertVisualAnswerHead(config, self.num_qa_labels) # Weight initialization + # Initialize weights and apply final processing self.post_init() # Loss functions @@ -1303,6 +1305,7 @@ def __init__(self, config): self.answer_head = LxmertVisualAnswerHead(config, self.num_qa_labels) # Weight initialization + # Initialize weights and apply final processing self.post_init() # Loss function diff --git a/src/transformers/models/m2m_100/modeling_m2m_100.py b/src/transformers/models/m2m_100/modeling_m2m_100.py index e1120d4a2a13..4c9caadd8cb4 100755 --- a/src/transformers/models/m2m_100/modeling_m2m_100.py +++ b/src/transformers/models/m2m_100/modeling_m2m_100.py @@ -706,6 +706,7 @@ def __init__(self, config: M2M100Config, embed_tokens: Optional[nn.Embedding] = self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False + # Initialize weights and apply final processing self.post_init() def forward( @@ -871,6 +872,7 @@ def __init__(self, config: M2M100Config, embed_tokens: Optional[nn.Embedding] = self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False + # Initialize weights and apply final processing self.post_init() def forward( @@ -1113,6 +1115,7 @@ def __init__(self, config: M2M100Config): self.encoder = M2M100Encoder(config, self.shared) self.decoder = M2M100Decoder(config, self.shared) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -1232,6 +1235,7 @@ def __init__(self, config: M2M100Config): self.model = M2M100Model(config) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) + # Initialize weights and apply final processing self.post_init() def get_encoder(self): diff --git a/src/transformers/models/marian/modeling_marian.py b/src/transformers/models/marian/modeling_marian.py index 6413f2e17776..ef4369dcb8c8 100755 --- a/src/transformers/models/marian/modeling_marian.py +++ b/src/transformers/models/marian/modeling_marian.py @@ -670,6 +670,7 @@ def __init__(self, config: MarianConfig, embed_tokens: Optional[nn.Embedding] = self.layers = nn.ModuleList([MarianEncoderLayer(config) for _ in range(config.encoder_layers)]) self.gradient_checkpointing = False + # Initialize weights and apply final processing self.post_init() def forward( @@ -832,6 +833,7 @@ def __init__(self, config: MarianConfig, embed_tokens: Optional[nn.Embedding] = self.layers = nn.ModuleList([MarianDecoderLayer(config) for _ in range(config.decoder_layers)]) self.gradient_checkpointing = False + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -1089,6 +1091,7 @@ def __init__(self, config: MarianConfig): self.encoder = MarianEncoder(config, self.shared) self.decoder = MarianDecoder(config, self.shared) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -1222,6 +1225,7 @@ def __init__(self, config: MarianConfig): self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) + # Initialize weights and apply final processing self.post_init() def get_encoder(self): @@ -1401,6 +1405,7 @@ def __init__(self, config): self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): diff --git a/src/transformers/models/mbart/modeling_mbart.py b/src/transformers/models/mbart/modeling_mbart.py index 10f850d912c7..82452251bf35 100755 --- a/src/transformers/models/mbart/modeling_mbart.py +++ b/src/transformers/models/mbart/modeling_mbart.py @@ -697,6 +697,7 @@ def __init__(self, config: MBartConfig, embed_tokens: Optional[nn.Embedding] = N self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False + # Initialize weights and apply final processing self.post_init() def _backward_compatibility_gradient_checkpointing(self): @@ -868,6 +869,7 @@ def __init__(self, config: MBartConfig, embed_tokens: Optional[nn.Embedding] = N self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -1128,6 +1130,7 @@ def __init__(self, config: MBartConfig): self.encoder = MBartEncoder(config, self.shared) self.decoder = MBartDecoder(config, self.shared) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -1248,6 +1251,7 @@ def __init__(self, config: MBartConfig): self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) + # Initialize weights and apply final processing self.post_init() def get_encoder(self): @@ -1669,6 +1673,7 @@ def __init__(self, config): self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): diff --git a/src/transformers/models/megatron_bert/modeling_megatron_bert.py b/src/transformers/models/megatron_bert/modeling_megatron_bert.py index bd5cf1d5950d..12f026f63c31 100755 --- a/src/transformers/models/megatron_bert/modeling_megatron_bert.py +++ b/src/transformers/models/megatron_bert/modeling_megatron_bert.py @@ -857,6 +857,7 @@ def __init__(self, config, add_pooling_layer=True): self.pooler = MegatronBertPooler(config) if add_pooling_layer else None + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -1018,6 +1019,7 @@ def __init__(self, config, add_binary_head=True): self.bert = MegatronBertModel(config) self.cls = MegatronBertPreTrainingHeads(config) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -1127,6 +1129,7 @@ def __init__(self, config): self.bert = MegatronBertModel(config, add_pooling_layer=False) self.cls = MegatronBertOnlyMLMHead(config) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -1274,6 +1277,7 @@ def __init__(self, config): self.bert = MegatronBertModel(config, add_pooling_layer=False) self.cls = MegatronBertOnlyMLMHead(config) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -1375,6 +1379,7 @@ def __init__(self, config): self.bert = MegatronBertModel(config) self.cls = MegatronBertOnlyNSPHead(config) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1478,6 +1483,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1574,6 +1580,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward( @@ -1671,6 +1678,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1761,6 +1769,7 @@ def __init__(self, config): self.bert = MegatronBertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) diff --git a/src/transformers/models/mobilebert/modeling_mobilebert.py b/src/transformers/models/mobilebert/modeling_mobilebert.py index 7023e20079d8..28c01d55211a 100644 --- a/src/transformers/models/mobilebert/modeling_mobilebert.py +++ b/src/transformers/models/mobilebert/modeling_mobilebert.py @@ -799,6 +799,7 @@ def __init__(self, config, add_pooling_layer=True): self.pooler = MobileBertPooler(config) if add_pooling_layer else None + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -907,6 +908,7 @@ def __init__(self, config): self.mobilebert = MobileBertModel(config) self.cls = MobileBertPreTrainingHeads(config) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -1015,6 +1017,7 @@ def __init__(self, config): self.cls = MobileBertOnlyMLMHead(config) self.config = config + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -1111,6 +1114,7 @@ def __init__(self, config): self.mobilebert = MobileBertModel(config) self.cls = MobileBertOnlyNSPHead(config) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1218,6 +1222,7 @@ def __init__(self, config): self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1318,6 +1323,7 @@ def __init__(self, config): self.mobilebert = MobileBertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1421,6 +1427,7 @@ def __init__(self, config): self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, 1) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward( @@ -1522,6 +1529,7 @@ def __init__(self, config): self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) diff --git a/src/transformers/models/mpnet/modeling_mpnet.py b/src/transformers/models/mpnet/modeling_mpnet.py index 1ae2ea31ae51..70e2d09a93f7 100644 --- a/src/transformers/models/mpnet/modeling_mpnet.py +++ b/src/transformers/models/mpnet/modeling_mpnet.py @@ -493,6 +493,7 @@ def __init__(self, config, add_pooling_layer=True): self.encoder = MPNetEncoder(config) self.pooler = MPNetPooler(config) if add_pooling_layer else None + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -583,6 +584,7 @@ def __init__(self, config): self.mpnet = MPNetModel(config, add_pooling_layer=False) self.lm_head = MPNetLMHead(config) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -691,6 +693,7 @@ def __init__(self, config): self.mpnet = MPNetModel(config, add_pooling_layer=False) self.classifier = MPNetClassificationHead(config) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -785,6 +788,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @@ -877,6 +881,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -985,6 +990,7 @@ def __init__(self, config): self.mpnet = MPNetModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) diff --git a/src/transformers/models/openai/modeling_openai.py b/src/transformers/models/openai/modeling_openai.py index e000253d7ef0..782812b7e70f 100644 --- a/src/transformers/models/openai/modeling_openai.py +++ b/src/transformers/models/openai/modeling_openai.py @@ -414,6 +414,7 @@ def __init__(self, config): self.h = nn.ModuleList([Block(config.n_positions, config, scale=True) for _ in range(config.n_layer)]) self.register_buffer("position_ids", torch.arange(config.n_positions)) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -540,6 +541,7 @@ def __init__(self, config): self.transformer = OpenAIGPTModel(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -629,6 +631,7 @@ def __init__(self, config): self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) self.multiple_choice_head = SequenceSummary(config) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -750,6 +753,7 @@ def __init__(self, config): self.transformer = OpenAIGPTModel(config) self.score = nn.Linear(config.n_embd, self.num_labels, bias=False) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING) diff --git a/src/transformers/models/pegasus/modeling_pegasus.py b/src/transformers/models/pegasus/modeling_pegasus.py index c53bdb22be78..4929c1014106 100755 --- a/src/transformers/models/pegasus/modeling_pegasus.py +++ b/src/transformers/models/pegasus/modeling_pegasus.py @@ -659,6 +659,7 @@ def __init__(self, config: PegasusConfig, embed_tokens: Optional[nn.Embedding] = self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False + # Initialize weights and apply final processing self.post_init() def resize_position_embeddings(self, new_num_position_embeddings: int): @@ -854,6 +855,7 @@ def __init__(self, config: PegasusConfig, embed_tokens: Optional[nn.Embedding] = self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -1142,6 +1144,7 @@ def __init__(self, config: PegasusConfig): self.encoder = PegasusEncoder(config, self.shared) self.decoder = PegasusDecoder(config, self.shared) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -1293,6 +1296,7 @@ def __init__(self, config: PegasusConfig): self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) + # Initialize weights and apply final processing self.post_init() def get_encoder(self): @@ -1490,6 +1494,7 @@ def __init__(self, config): self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): diff --git a/src/transformers/models/prophetnet/modeling_prophetnet.py b/src/transformers/models/prophetnet/modeling_prophetnet.py index 39c270c433b1..a3e89aa69f43 100644 --- a/src/transformers/models/prophetnet/modeling_prophetnet.py +++ b/src/transformers/models/prophetnet/modeling_prophetnet.py @@ -1267,6 +1267,7 @@ def __init__(self, config: ProphetNetConfig, word_embeddings: nn.Embedding = Non self.layers = nn.ModuleList([ProphetNetEncoderLayer(config) for _ in range(config.num_encoder_layers)]) self.gradient_checkpointing = False + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -1412,6 +1413,7 @@ def __init__(self, config: ProphetNetConfig, word_embeddings: nn.Embedding = Non self.embeddings_layer_norm = LayerNorm(config.hidden_size) self.gradient_checkpointing = False + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -1765,6 +1767,7 @@ def __init__(self, config): decoder_config.is_encoder_decoder = False self.decoder = ProphetNetDecoder(decoder_config, self.word_embeddings) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -1882,6 +1885,7 @@ def __init__(self, config: ProphetNetConfig): self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -2092,6 +2096,7 @@ def __init__(self, config): self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): diff --git a/src/transformers/models/reformer/modeling_reformer.py b/src/transformers/models/reformer/modeling_reformer.py index e1cc7541b2e9..eae0b3009224 100755 --- a/src/transformers/models/reformer/modeling_reformer.py +++ b/src/transformers/models/reformer/modeling_reformer.py @@ -1974,6 +1974,7 @@ def __init__(self, config): self.embeddings = ReformerEmbeddings(config) self.encoder = ReformerEncoder(config) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -2188,6 +2189,7 @@ def __init__(self, config): self.reformer = ReformerModel(config) self.lm_head = ReformerOnlyLMHead(config) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -2303,6 +2305,7 @@ def __init__(self, config): self.reformer = ReformerModel(config) self.lm_head = ReformerOnlyLMHead(config) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -2390,6 +2393,7 @@ def __init__(self, config): if config.is_decoder is True: logger.warning("You might want to disable causal masking for sequence classification") + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING) @@ -2508,6 +2512,7 @@ def __init__(self, config): # 2 * config.hidden_size because we use reversible residual layers self.qa_outputs = nn.Linear(2 * config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING) diff --git a/src/transformers/models/rembert/modeling_rembert.py b/src/transformers/models/rembert/modeling_rembert.py index dd3c39881892..bc5569e553b5 100755 --- a/src/transformers/models/rembert/modeling_rembert.py +++ b/src/transformers/models/rembert/modeling_rembert.py @@ -765,6 +765,7 @@ def __init__(self, config, add_pooling_layer=True): self.pooler = RemBertPooler(config) if add_pooling_layer else None + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -925,6 +926,7 @@ def __init__(self, config): self.rembert = RemBertModel(config, add_pooling_layer=False) self.cls = RemBertOnlyMLMHead(config) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -1027,6 +1029,7 @@ def __init__(self, config): self.rembert = RemBertModel(config, add_pooling_layer=False) self.cls = RemBertOnlyMLMHead(config) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -1173,6 +1176,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1269,6 +1273,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @@ -1361,6 +1366,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1449,6 +1455,7 @@ def __init__(self, config): self.rembert = RemBertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) diff --git a/src/transformers/models/retribert/modeling_retribert.py b/src/transformers/models/retribert/modeling_retribert.py index 1f82b15b0c6c..2456545a2218 100644 --- a/src/transformers/models/retribert/modeling_retribert.py +++ b/src/transformers/models/retribert/modeling_retribert.py @@ -99,6 +99,7 @@ def __init__(self, config): self.ce_loss = nn.CrossEntropyLoss(reduction="mean") + # Initialize weights and apply final processing self.post_init() def embed_sentences_checkpointed( diff --git a/src/transformers/models/roberta/modeling_roberta.py b/src/transformers/models/roberta/modeling_roberta.py index 22d9a98904de..5f85a81fe805 100644 --- a/src/transformers/models/roberta/modeling_roberta.py +++ b/src/transformers/models/roberta/modeling_roberta.py @@ -723,6 +723,7 @@ def __init__(self, config, add_pooling_layer=True): self.pooler = RobertaPooler(config) if add_pooling_layer else None + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -897,6 +898,7 @@ def __init__(self, config): # The LM head weights require special treatment only when they are tied with the word embeddings self.update_keys_to_ignore(config, ["lm_head.decoder.weight"]) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -1050,6 +1052,7 @@ def __init__(self, config): # The LM head weights require special treatment only when they are tied with the word embeddings self.update_keys_to_ignore(config, ["lm_head.decoder.weight"]) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -1169,6 +1172,7 @@ def __init__(self, config): self.roberta = RobertaModel(config, add_pooling_layer=False) self.classifier = RobertaClassificationHead(config) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1265,6 +1269,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @@ -1362,6 +1367,7 @@ def __init__(self, config): self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1474,6 +1480,7 @@ def __init__(self, config): self.roberta = RobertaModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) diff --git a/src/transformers/models/roformer/modeling_roformer.py b/src/transformers/models/roformer/modeling_roformer.py index 1ee13c3a33d9..54d1035648c1 100644 --- a/src/transformers/models/roformer/modeling_roformer.py +++ b/src/transformers/models/roformer/modeling_roformer.py @@ -817,6 +817,7 @@ def __init__(self, config): self.encoder = RoFormerEncoder(config) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -973,6 +974,7 @@ def __init__(self, config): self.roformer = RoFormerModel(config) self.cls = RoFormerOnlyMLMHead(config) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -1073,6 +1075,7 @@ def __init__(self, config): self.roformer = RoFormerModel(config) self.cls = RoFormerOnlyMLMHead(config) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -1238,6 +1241,7 @@ def __init__(self, config): self.roformer = RoFormerModel(config) self.classifier = RoFormerClassificationHead(config) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1330,6 +1334,8 @@ def __init__(self, config): self.sequence_summary = SequenceSummary(config) self.classifier = nn.Linear(config.hidden_size, 1) + # Initialize weights and apply final processing + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward( @@ -1422,6 +1428,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1509,6 +1516,7 @@ def __init__(self, config): self.roformer = RoFormerModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) diff --git a/src/transformers/models/segformer/modeling_segformer.py b/src/transformers/models/segformer/modeling_segformer.py index 731736b7a17d..5b1593af2208 100755 --- a/src/transformers/models/segformer/modeling_segformer.py +++ b/src/transformers/models/segformer/modeling_segformer.py @@ -467,6 +467,7 @@ def __init__(self, config): # hierarchical Transformer encoder self.encoder = SegformerEncoder(config) + # Initialize weights and apply final processing self.post_init() def _prune_heads(self, heads_to_prune): @@ -541,6 +542,7 @@ def __init__(self, config): # Classifier head self.classifier = nn.Linear(config.hidden_sizes[-1], config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SEGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -696,6 +698,7 @@ def __init__(self, config): self.segformer = SegformerModel(config) self.decode_head = SegformerDecodeHead(config) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SEGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) diff --git a/src/transformers/models/sew/modeling_sew.py b/src/transformers/models/sew/modeling_sew.py index aefbac1dafdc..fd4cf4bf4d28 100644 --- a/src/transformers/models/sew/modeling_sew.py +++ b/src/transformers/models/sew/modeling_sew.py @@ -798,6 +798,7 @@ def __init__(self, config: SEWConfig): self.encoder = SEWEncoder(config) + # Initialize weights and apply final processing self.post_init() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states @@ -924,6 +925,7 @@ def __init__(self, config): ) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size) + # Initialize weights and apply final processing self.post_init() def freeze_feature_extractor(self): @@ -1032,6 +1034,7 @@ def __init__(self, config): self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() def freeze_feature_extractor(self): diff --git a/src/transformers/models/sew_d/modeling_sew_d.py b/src/transformers/models/sew_d/modeling_sew_d.py index 9e67e273f0a1..53f9862b4721 100644 --- a/src/transformers/models/sew_d/modeling_sew_d.py +++ b/src/transformers/models/sew_d/modeling_sew_d.py @@ -1329,6 +1329,7 @@ def __init__(self, config: SEWDConfig): self.encoder = SEWDEncoder(config) + # Initialize weights and apply final processing self.post_init() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states @@ -1455,6 +1456,7 @@ def __init__(self, config): ) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size) + # Initialize weights and apply final processing self.post_init() def freeze_feature_extractor(self): @@ -1563,6 +1565,7 @@ def __init__(self, config): self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() def freeze_feature_extractor(self): diff --git a/src/transformers/models/speech_to_text/modeling_speech_to_text.py b/src/transformers/models/speech_to_text/modeling_speech_to_text.py index d3ac9c7b9494..aead484a5933 100755 --- a/src/transformers/models/speech_to_text/modeling_speech_to_text.py +++ b/src/transformers/models/speech_to_text/modeling_speech_to_text.py @@ -724,6 +724,7 @@ def __init__(self, config: Speech2TextConfig): self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False + # Initialize weights and apply final processing self.post_init() def forward( @@ -877,6 +878,7 @@ def __init__(self, config: Speech2TextConfig): self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -1130,6 +1132,7 @@ def __init__(self, config: Speech2TextConfig): self.encoder = Speech2TextEncoder(config) self.decoder = Speech2TextDecoder(config) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -1253,6 +1256,7 @@ def __init__(self, config: Speech2TextConfig): self.model = Speech2TextModel(config) self.lm_head = nn.Linear(config.d_model, self.config.vocab_size, bias=False) + # Initialize weights and apply final processing self.post_init() def get_encoder(self): diff --git a/src/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py b/src/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py index 10c59ebf292d..a6b4e5b54247 100755 --- a/src/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py +++ b/src/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py @@ -477,6 +477,7 @@ def __init__(self, config: Speech2Text2Config): self.layers = nn.ModuleList([Speech2Text2DecoderLayer(config) for _ in range(config.decoder_layers)]) self.gradient_checkpointing = False + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -751,6 +752,7 @@ def __init__(self, config): self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): diff --git a/src/transformers/models/splinter/modeling_splinter.py b/src/transformers/models/splinter/modeling_splinter.py index 892ed859deff..19dab0457d7d 100755 --- a/src/transformers/models/splinter/modeling_splinter.py +++ b/src/transformers/models/splinter/modeling_splinter.py @@ -619,6 +619,7 @@ def __init__(self, config): self.embeddings = SplinterEmbeddings(config) self.encoder = SplinterEncoder(config) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -834,6 +835,7 @@ def __init__(self, config): self.splinter_qass = QuestionAwareSpanSelectionHead(config) self.question_token_id = config.question_token_id + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SPLINTER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) diff --git a/src/transformers/models/squeezebert/modeling_squeezebert.py b/src/transformers/models/squeezebert/modeling_squeezebert.py index 96d65d999d8c..ba12c2341dde 100644 --- a/src/transformers/models/squeezebert/modeling_squeezebert.py +++ b/src/transformers/models/squeezebert/modeling_squeezebert.py @@ -553,6 +553,7 @@ def __init__(self, config): self.encoder = SqueezeBertEncoder(config) self.pooler = SqueezeBertPooler(config) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -654,6 +655,7 @@ def __init__(self, config): self.transformer = SqueezeBertModel(config) self.cls = SqueezeBertOnlyMLMHead(config) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -739,6 +741,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -836,6 +839,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward( @@ -930,6 +934,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1017,6 +1022,7 @@ def __init__(self, config): self.transformer = SqueezeBertModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) diff --git a/src/transformers/models/t5/modeling_t5.py b/src/transformers/models/t5/modeling_t5.py index 6789fefd2187..78ccd072364a 100644 --- a/src/transformers/models/t5/modeling_t5.py +++ b/src/transformers/models/t5/modeling_t5.py @@ -814,6 +814,7 @@ def __init__(self, config, embed_tokens=None): self.final_layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) + # Initialize weights and apply final processing self.post_init() # Model parallel self.model_parallel = False @@ -1267,6 +1268,7 @@ def __init__(self, config: T5Config): decoder_config.num_layers = config.num_decoder_layers self.decoder = T5Stack(decoder_config, self.shared) + # Initialize weights and apply final processing self.post_init() # Model parallel @@ -1457,6 +1459,7 @@ def __init__(self, config): self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) + # Initialize weights and apply final processing self.post_init() # Model parallel @@ -1731,6 +1734,7 @@ def __init__(self, config: T5Config): encoder_config.is_encoder_decoder = False self.encoder = T5Stack(encoder_config, self.shared) + # Initialize weights and apply final processing self.post_init() # Model parallel diff --git a/src/transformers/models/tapas/modeling_tapas.py b/src/transformers/models/tapas/modeling_tapas.py index 353eaf1fe258..7ff9081fab2a 100644 --- a/src/transformers/models/tapas/modeling_tapas.py +++ b/src/transformers/models/tapas/modeling_tapas.py @@ -877,6 +877,7 @@ def __init__(self, config, add_pooling_layer=True): self.pooler = TapasPooler(config) if add_pooling_layer else None + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -1016,6 +1017,7 @@ def __init__(self, config): self.tapas = TapasModel(config, add_pooling_layer=False) self.cls = TapasOnlyMLMHead(config) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -1146,6 +1148,7 @@ def __init__(self, config: TapasConfig): if config.num_aggregation_labels > 0: self.aggregation_classifier = nn.Linear(config.hidden_size, config.num_aggregation_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1464,6 +1467,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) diff --git a/src/transformers/models/transfo_xl/modeling_transfo_xl.py b/src/transformers/models/transfo_xl/modeling_transfo_xl.py index 69d331fbc154..fda73520c9c2 100644 --- a/src/transformers/models/transfo_xl/modeling_transfo_xl.py +++ b/src/transformers/models/transfo_xl/modeling_transfo_xl.py @@ -819,6 +819,7 @@ def __init__(self, config): else: # learnable embeddings and absolute embeddings raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -1021,6 +1022,7 @@ def __init__(self, config): config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val ) + # Initialize weights and apply final processing self.post_init() def tie_weights(self): @@ -1170,6 +1172,7 @@ def __init__(self, config): self.num_labels = config.num_labels self.transformer = TransfoXLModel(config) self.score = nn.Linear(config.d_embed, self.num_labels, bias=False) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING) diff --git a/src/transformers/models/trocr/modeling_trocr.py b/src/transformers/models/trocr/modeling_trocr.py index 3a3f0f29fa98..5b8943a26a93 100644 --- a/src/transformers/models/trocr/modeling_trocr.py +++ b/src/transformers/models/trocr/modeling_trocr.py @@ -504,6 +504,7 @@ def __init__(self, config: TrOCRConfig): self.layers = nn.ModuleList([TrOCRDecoderLayer(config) for _ in range(config.decoder_layers)]) self.gradient_checkpointing = False + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -784,6 +785,7 @@ def __init__(self, config): self.output_projection = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): diff --git a/src/transformers/models/unispeech/modeling_unispeech.py b/src/transformers/models/unispeech/modeling_unispeech.py index 2708b07c1f46..cd4ff0108116 100755 --- a/src/transformers/models/unispeech/modeling_unispeech.py +++ b/src/transformers/models/unispeech/modeling_unispeech.py @@ -1045,6 +1045,7 @@ def __init__(self, config: UniSpeechConfig): else: self.encoder = UniSpeechEncoder(config) + # Initialize weights and apply final processing self.post_init() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states @@ -1165,6 +1166,7 @@ def __init__(self, config: UniSpeechConfig): self.ctc_proj = nn.Linear(config.hidden_size, config.num_ctc_classes) self.dropout = nn.Dropout(config.final_dropout) + # Initialize weights and apply final processing self.post_init() def set_gumbel_temperature(self, temperature: int): @@ -1337,6 +1339,7 @@ def __init__(self, config): ) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size) + # Initialize weights and apply final processing self.post_init() def freeze_feature_extractor(self): @@ -1445,6 +1448,7 @@ def __init__(self, config): self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() def freeze_feature_extractor(self): diff --git a/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py b/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py index e6345e263964..c69faafc4397 100755 --- a/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py +++ b/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py @@ -1046,6 +1046,7 @@ def __init__(self, config: UniSpeechSatConfig): else: self.encoder = UniSpeechSatEncoder(config) + # Initialize weights and apply final processing self.post_init() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states @@ -1171,6 +1172,7 @@ def __init__(self, config: UniSpeechSatConfig): if self.config.do_stable_layer_norm: self.layer_norm_for_extract.requires_grad = False + # Initialize weights and apply final processing self.post_init() def set_gumbel_temperature(self, temperature: int): @@ -1328,6 +1330,7 @@ def __init__(self, config): ) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size) + # Initialize weights and apply final processing self.post_init() def freeze_feature_extractor(self): @@ -1436,6 +1439,7 @@ def __init__(self, config): self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() def freeze_feature_extractor(self): diff --git a/src/transformers/models/visual_bert/modeling_visual_bert.py b/src/transformers/models/visual_bert/modeling_visual_bert.py index 22d2cc25a02c..eabca9ad4c46 100755 --- a/src/transformers/models/visual_bert/modeling_visual_bert.py +++ b/src/transformers/models/visual_bert/modeling_visual_bert.py @@ -701,6 +701,7 @@ def __init__(self, config, add_pooling_layer=True): if self.bypass_transformer: self.additional_layer = VisualBertLayer(config) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -877,6 +878,7 @@ def __init__(self, config): self.visual_bert = VisualBertModel(config) self.cls = VisualBertPreTrainingHeads(config) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -1021,6 +1023,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.cls = nn.Linear(config.hidden_size, 1) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward( @@ -1170,6 +1173,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.cls = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(VISUAL_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1292,6 +1296,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.cls = nn.Linear(config.hidden_size, config.num_labels) # 2 + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(VISUAL_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1448,6 +1453,7 @@ def __init__(self, config): self.cls = VisualBertPreTrainingHeads(config) self.attention = VisualBertRegionToPhraseAttention(config) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(VISUAL_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) diff --git a/src/transformers/models/vit/modeling_vit.py b/src/transformers/models/vit/modeling_vit.py index c1aced4b3786..b1bc30312486 100644 --- a/src/transformers/models/vit/modeling_vit.py +++ b/src/transformers/models/vit/modeling_vit.py @@ -487,6 +487,7 @@ def __init__(self, config, add_pooling_layer=True): self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pooler = ViTPooler(config) if add_pooling_layer else None + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -603,6 +604,7 @@ def __init__(self, config): # Classifier head self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(VIT_INPUTS_DOCSTRING) diff --git a/src/transformers/models/wav2vec2/modeling_wav2vec2.py b/src/transformers/models/wav2vec2/modeling_wav2vec2.py index 8f66b932e3e2..00eec6933be9 100755 --- a/src/transformers/models/wav2vec2/modeling_wav2vec2.py +++ b/src/transformers/models/wav2vec2/modeling_wav2vec2.py @@ -1152,6 +1152,7 @@ def __init__(self, config: Wav2Vec2Config): self.adapter = Wav2Vec2Adapter(config) if config.add_adapter else None + # Initialize weights and apply final processing self.post_init() def _mask_hidden_states( @@ -1269,6 +1270,7 @@ def __init__(self, config: Wav2Vec2Config): self.quantizer = Wav2Vec2GumbelVectorQuantizer(config) + # Initialize weights and apply final processing self.post_init() # make sure that project_hid & project_q are initialized like normal linear layers @@ -1480,6 +1482,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.final_dropout) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING) @@ -1563,6 +1566,7 @@ def __init__(self, config): ) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size) + # Initialize weights and apply final processing self.post_init() def freeze_feature_extractor(self): @@ -1670,6 +1674,7 @@ def __init__(self, config): self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() def freeze_feature_extractor(self): diff --git a/src/transformers/models/xlm/modeling_xlm.py b/src/transformers/models/xlm/modeling_xlm.py index 533b210c5988..c3219952c27f 100755 --- a/src/transformers/models/xlm/modeling_xlm.py +++ b/src/transformers/models/xlm/modeling_xlm.py @@ -469,6 +469,7 @@ def __init__(self, config): if self.attentions[int(layer)].n_heads == config.n_heads: self.prune_heads({int(layer): list(map(int, heads))}) + # Initialize weights and apply final processing self.post_init() self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) @@ -687,6 +688,7 @@ def __init__(self, config): self.transformer = XLMModel(config) self.pred_layer = XLMPredLayer(config) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -785,6 +787,7 @@ def __init__(self, config): self.transformer = XLMModel(config) self.sequence_summary = SequenceSummary(config) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -885,6 +888,7 @@ def __init__(self, config): self.transformer = XLMModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -989,6 +993,7 @@ def __init__(self, config): self.transformer = XLMModel(config) self.qa_outputs = SQuADHead(config) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1108,6 +1113,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1201,6 +1207,7 @@ def __init__(self, config, *inputs, **kwargs): self.sequence_summary = SequenceSummary(config) self.logits_proj = nn.Linear(config.num_labels, 1) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) diff --git a/src/transformers/models/xlnet/modeling_xlnet.py b/src/transformers/models/xlnet/modeling_xlnet.py index eb3dc5e4ad10..10aadbdfbaac 100755 --- a/src/transformers/models/xlnet/modeling_xlnet.py +++ b/src/transformers/models/xlnet/modeling_xlnet.py @@ -955,6 +955,7 @@ def __init__(self, config): self.layer = nn.ModuleList([XLNetLayer(config) for _ in range(config.n_layer)]) self.dropout = nn.Dropout(config.dropout) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -1311,6 +1312,7 @@ def __init__(self, config): self.transformer = XLNetModel(config) self.lm_loss = nn.Linear(config.d_model, config.vocab_size, bias=True) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -1493,6 +1495,7 @@ def __init__(self, config): self.sequence_summary = SequenceSummary(config) self.logits_proj = nn.Linear(config.d_model, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1600,6 +1603,7 @@ def __init__(self, config): self.transformer = XLNetModel(config) self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1697,6 +1701,7 @@ def __init__(self, config): self.sequence_summary = SequenceSummary(config) self.logits_proj = nn.Linear(config.d_model, 1) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @@ -1800,6 +1805,7 @@ def __init__(self, config): self.transformer = XLNetModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1913,6 +1919,7 @@ def __init__(self, config): self.end_logits = PoolerEndLogits(config) self.answer_class = PoolerAnswerClass(config) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) diff --git a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py index eb3c63d8e0d7..7d0afd2d9c65 100755 --- a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py +++ b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py @@ -777,6 +777,7 @@ def __init__(self, config): self.embeddings = {{cookiecutter.camelcase_modelname}}Embeddings(config) self.encoder = {{cookiecutter.camelcase_modelname}}Encoder(config) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -943,6 +944,7 @@ def __init__(self, config): self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config) self.cls = {{cookiecutter.camelcase_modelname}}OnlyMLMHead(config) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -1046,6 +1048,7 @@ def __init__(self, config): self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config) self.cls = {{cookiecutter.camelcase_modelname}}OnlyMLMHead(config) + # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): @@ -1217,6 +1220,7 @@ def __init__(self, config): self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config) self.classifier = {{cookiecutter.camelcase_modelname}}ClassificationHead(config) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1309,6 +1313,7 @@ def __init__(self, config): self.sequence_summary = SequenceSummary(config) self.classifier = nn.Linear(config.hidden_size, 1) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @@ -1399,6 +1404,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -1486,6 +1492,7 @@ def __init__(self, config): self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @@ -2225,6 +2232,7 @@ def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, embed_tok self.layernorm_embedding = nn.LayerNorm(embed_dim) self.gradient_checkpointing = False + # Initialize weights and apply final processing self.post_init() def forward( @@ -2389,6 +2397,7 @@ def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, embed_tok self.layernorm_embedding = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -2640,6 +2649,7 @@ def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config): self.encoder = {{cookiecutter.camelcase_modelname}}Encoder(config, self.shared) self.decoder = {{cookiecutter.camelcase_modelname}}Decoder(config, self.shared) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): @@ -2755,6 +2765,7 @@ def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config): self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) + # Initialize weights and apply final processing self.post_init() def get_encoder(self): @@ -3170,6 +3181,7 @@ def __init__(self, config): self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): From 279925be0ea0db2342bfd14e0a6d23f8dd6e5b55 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger Date: Thu, 18 Nov 2021 08:03:04 -0500 Subject: [PATCH 6/6] Forgot to save --- src/transformers/models/roformer/modeling_roformer.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/transformers/models/roformer/modeling_roformer.py b/src/transformers/models/roformer/modeling_roformer.py index 54d1035648c1..14e74a24f836 100644 --- a/src/transformers/models/roformer/modeling_roformer.py +++ b/src/transformers/models/roformer/modeling_roformer.py @@ -1334,7 +1334,6 @@ def __init__(self, config): self.sequence_summary = SequenceSummary(config) self.classifier = nn.Linear(config.hidden_size, 1) - # Initialize weights and apply final processing # Initialize weights and apply final processing self.post_init()