diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index f45c11087f6a..9ec64ebb73c7 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -412,17 +412,6 @@ def floating_point_ops( return 6 * self.estimate_tokens(input_dict) * self.num_parameters(exclude_embeddings=exclude_embeddings) -def gradient_checkpointing_hook(module, _): - # Hook to enable backward compatibility for gradient checkpointing. Will be removed once all models have a - # proper post_init method. - if getattr(module.config, "gradient_checkpointing", False): - module.gradient_checkpointing_enable() - # Remove the attribute now that is has been consumed, so it's no saved in the config. - delattr(module.config, "gradient_checkpointing") - # The hook will remove itself after the first execution - module._gradient_checkpointing_hook.remove() - - class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMixin): r""" Base class for all models. @@ -490,8 +479,20 @@ def __init__(self, config: PretrainedConfig, *inputs, **kwargs): # Save config and origin of the pretrained weights if given in model self.config = config self.name_or_path = config.name_or_path - if self.supports_gradient_checkpointing: - self._gradient_checkpointing_hook = self.register_forward_pre_hook(gradient_checkpointing_hook) + + def post_init(self): + """ + A method executed at the end of each Transformer model initialization, to execute code that needs the model's + modules properly initialized (such as weight initialization). + """ + self.init_weights() + self._backward_compatibility_gradient_checkpointing() + + def _backward_compatibility_gradient_checkpointing(self): + if self.supports_gradient_checkpointing and getattr(self.config, "gradient_checkpointing", False): + self.gradient_checkpointing_enable() + # Remove the attribute now that is has been consumed, so it's no saved in the config. + delattr(self.config, "gradient_checkpointing") @classmethod def _from_config(cls, config, **kwargs): diff --git a/src/transformers/models/albert/modeling_albert.py b/src/transformers/models/albert/modeling_albert.py index 442242ad43cc..2e80eaf6173f 100755 --- a/src/transformers/models/albert/modeling_albert.py +++ b/src/transformers/models/albert/modeling_albert.py @@ -638,7 +638,8 @@ def __init__(self, config, add_pooling_layer=True): self.pooler = None self.pooler_activation = None - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -757,7 +758,8 @@ def __init__(self, config): self.predictions = AlbertMLMHead(config) self.sop_classifier = AlbertSOPHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.predictions.decoder @@ -903,7 +905,8 @@ def __init__(self, config): self.albert = AlbertModel(config, add_pooling_layer=False) self.predictions = AlbertMLMHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.predictions.decoder @@ -991,7 +994,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1097,7 +1101,8 @@ def __init__(self, config): self.dropout = nn.Dropout(classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1187,7 +1192,8 @@ def __init__(self, config): self.albert = AlbertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1286,7 +1292,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/bart/modeling_bart.py b/src/transformers/models/bart/modeling_bart.py index f479a9069b0c..743b27887f33 100755 --- a/src/transformers/models/bart/modeling_bart.py +++ b/src/transformers/models/bart/modeling_bart.py @@ -699,8 +699,9 @@ def __init__(self, config: BartConfig, embed_tokens: Optional[nn.Embedding] = No self.layers = nn.ModuleList([BartEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layernorm_embedding = nn.LayerNorm(embed_dim) - self.init_weights() self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embed_tokens @@ -870,8 +871,9 @@ def __init__(self, config: BartConfig, embed_tokens: Optional[nn.Embedding] = No self.layers = nn.ModuleList([BartDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) - self.init_weights() self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embed_tokens @@ -1130,7 +1132,8 @@ def __init__(self, config: BartConfig): self.encoder = BartEncoder(config, self.shared) self.decoder = BartDecoder(config, self.shared) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.shared @@ -1248,7 +1251,8 @@ def __init__(self, config: BartConfig): self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_encoder(self): return self.model.get_encoder() @@ -1666,7 +1670,8 @@ def __init__(self, config): self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens diff --git a/src/transformers/models/beit/modeling_beit.py b/src/transformers/models/beit/modeling_beit.py index a5cca41b0c0d..3b11cc1bdbc7 100755 --- a/src/transformers/models/beit/modeling_beit.py +++ b/src/transformers/models/beit/modeling_beit.py @@ -598,7 +598,8 @@ def __init__(self, config, add_pooling_layer=True): ) self.pooler = BeitPooler(config) if add_pooling_layer else None - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings @@ -715,7 +716,8 @@ def __init__(self, config): self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) @@ -805,7 +807,8 @@ def __init__(self, config): # Classifier head self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) @@ -1121,7 +1124,8 @@ def __init__(self, config): self.decode_head = BeitUperHead(config) self.auxiliary_head = BeitFCNHead(config) if config.use_auxiliary_head else None - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def compute_loss(self, logits, auxiliary_logits, labels): # upsample logits to the images' original size diff --git a/src/transformers/models/bert/modeling_bert.py b/src/transformers/models/bert/modeling_bert.py index a62e653b018e..d48209898ec7 100755 --- a/src/transformers/models/bert/modeling_bert.py +++ b/src/transformers/models/bert/modeling_bert.py @@ -870,7 +870,8 @@ def __init__(self, config, add_pooling_layer=True): self.pooler = BertPooler(config) if add_pooling_layer else None - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -1037,7 +1038,8 @@ def __init__(self, config): self.bert = BertModel(config) self.cls = BertPreTrainingHeads(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1145,7 +1147,8 @@ def __init__(self, config): self.bert = BertModel(config, add_pooling_layer=False) self.cls = BertOnlyMLMHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1294,7 +1297,8 @@ def __init__(self, config): self.bert = BertModel(config, add_pooling_layer=False) self.cls = BertOnlyMLMHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1394,7 +1398,8 @@ def __init__(self, config): self.bert = BertModel(config) self.cls = BertOnlyNSPHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC) @@ -1501,7 +1506,8 @@ def __init__(self, config): self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1600,7 +1606,8 @@ def __init__(self, config): self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( @@ -1698,7 +1705,8 @@ def __init__(self, config): self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1788,7 +1796,8 @@ def __init__(self, config): self.bert = BertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/bert_generation/modeling_bert_generation.py b/src/transformers/models/bert_generation/modeling_bert_generation.py index ad0d5ba8b76e..653e585e6837 100755 --- a/src/transformers/models/bert_generation/modeling_bert_generation.py +++ b/src/transformers/models/bert_generation/modeling_bert_generation.py @@ -282,7 +282,8 @@ def __init__(self, config): self.embeddings = BertGenerationEmbeddings(config) self.encoder = BertEncoder(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -456,7 +457,8 @@ def __init__(self, config): self.bert = BertGenerationEncoder(config) self.lm_head = BertGenerationOnlyLMHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.lm_head.decoder diff --git a/src/transformers/models/big_bird/modeling_big_bird.py b/src/transformers/models/big_bird/modeling_big_bird.py index ae415a7a79ab..17724e744fd3 100755 --- a/src/transformers/models/big_bird/modeling_big_bird.py +++ b/src/transformers/models/big_bird/modeling_big_bird.py @@ -1953,7 +1953,8 @@ def __init__(self, config, add_pooling_layer=True): ) self.set_attention_type("original_full") - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -2262,7 +2263,8 @@ def __init__(self, config): self.bert = BigBirdModel(config, add_pooling_layer=True) self.cls = BigBirdPreTrainingHeads(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -2370,7 +2372,8 @@ def __init__(self, config): self.bert = BigBirdModel(config) self.cls = BigBirdOnlyMLMHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -2472,7 +2475,8 @@ def __init__(self, config): self.bert = BigBirdModel(config) self.cls = BigBirdOnlyMLMHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -2642,7 +2646,8 @@ def __init__(self, config): self.bert = BigBirdModel(config) self.classifier = BigBirdClassificationHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -2737,7 +2742,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward( BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") @@ -2834,7 +2840,8 @@ def __init__(self, config): self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -2942,7 +2949,8 @@ def __init__(self, config, add_pooling_layer=False): self.bert = BigBirdModel(config, add_pooling_layer=add_pooling_layer) self.qa_classifier = BigBirdForQuestionAnsweringHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py b/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py index e22621c3d767..6bd0ae93ddec 100755 --- a/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py +++ b/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py @@ -1775,8 +1775,9 @@ def __init__(self, config: BigBirdPegasusConfig, embed_tokens: Optional[nn.Embed self.layers = nn.ModuleList([BigBirdPegasusEncoderLayer(config, seed=i) for i in range(config.encoder_layers)]) self.layernorm_embedding = nn.LayerNorm(embed_dim) - self.init_weights() self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() def forward( self, @@ -2066,8 +2067,9 @@ def __init__(self, config: BigBirdPegasusConfig, embed_tokens: Optional[nn.Embed self.layers = nn.ModuleList([BigBirdPegasusDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) - self.init_weights() self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embed_tokens @@ -2327,7 +2329,8 @@ def __init__(self, config: BigBirdPegasusConfig): self.encoder = BigBirdPegasusEncoder(config, self.shared) self.decoder = BigBirdPegasusDecoder(config, self.shared) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.shared @@ -2447,7 +2450,8 @@ def __init__(self, config: BigBirdPegasusConfig): self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_encoder(self): return self.model.get_encoder() @@ -2869,7 +2873,8 @@ def __init__(self, config): self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens diff --git a/src/transformers/models/blenderbot/modeling_blenderbot.py b/src/transformers/models/blenderbot/modeling_blenderbot.py index 1911cd9e9540..850739b0365d 100755 --- a/src/transformers/models/blenderbot/modeling_blenderbot.py +++ b/src/transformers/models/blenderbot/modeling_blenderbot.py @@ -656,8 +656,9 @@ def __init__(self, config: BlenderbotConfig, embed_tokens: Optional[nn.Embedding self.layers = nn.ModuleList([BlenderbotEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) - self.init_weights() self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() def forward( self, @@ -821,8 +822,9 @@ def __init__(self, config: BlenderbotConfig, embed_tokens: Optional[nn.Embedding self.layers = nn.ModuleList([BlenderbotDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) - self.init_weights() self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embed_tokens @@ -1083,7 +1085,8 @@ def __init__(self, config: BlenderbotConfig): self.encoder = BlenderbotEncoder(config, self.shared) self.decoder = BlenderbotDecoder(config, self.shared) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs): @@ -1220,7 +1223,8 @@ def __init__(self, config: BlenderbotConfig): self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs): @@ -1404,7 +1408,8 @@ def __init__(self, config): self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens diff --git a/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py b/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py index 26dd44d9f068..51472a7e3b78 100755 --- a/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py +++ b/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py @@ -657,8 +657,9 @@ def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[nn.Embe self.layers = nn.ModuleList([BlenderbotSmallEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layernorm_embedding = nn.LayerNorm(embed_dim) - self.init_weights() self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() def forward( self, @@ -821,8 +822,9 @@ def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[nn.Embe self.layers = nn.ModuleList([BlenderbotSmallDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) - self.init_weights() self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embed_tokens @@ -1081,7 +1083,8 @@ def __init__(self, config: BlenderbotSmallConfig): self.encoder = BlenderbotSmallEncoder(config, self.shared) self.decoder = BlenderbotSmallDecoder(config, self.shared) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.shared @@ -1208,7 +1211,8 @@ def __init__(self, config: BlenderbotSmallConfig): self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_encoder(self): return self.model.get_encoder() @@ -1379,7 +1383,8 @@ def __init__(self, config): self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens diff --git a/src/transformers/models/canine/modeling_canine.py b/src/transformers/models/canine/modeling_canine.py index b461a6c0452f..f2ba24ec0d64 100644 --- a/src/transformers/models/canine/modeling_canine.py +++ b/src/transformers/models/canine/modeling_canine.py @@ -1015,7 +1015,8 @@ def __init__(self, config, add_pooling_layer=True): self.pooler = CaninePooler(config) if add_pooling_layer else None - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def _prune_heads(self, heads_to_prune): """ @@ -1273,7 +1274,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1369,7 +1371,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( @@ -1461,7 +1464,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1548,7 +1552,8 @@ def __init__(self, config): self.canine = CanineModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/clip/modeling_clip.py b/src/transformers/models/clip/modeling_clip.py index dfd8596fd19f..08a9d0cb9a0e 100755 --- a/src/transformers/models/clip/modeling_clip.py +++ b/src/transformers/models/clip/modeling_clip.py @@ -683,7 +683,8 @@ class CLIPTextModel(CLIPPreTrainedModel): def __init__(self, config: CLIPTextConfig): super().__init__(config) self.text_model = CLIPTextTransformer(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self) -> nn.Module: return self.text_model.embeddings.token_embedding @@ -792,7 +793,8 @@ class CLIPVisionModel(CLIPPreTrainedModel): def __init__(self, config: CLIPVisionConfig): super().__init__(config) self.vision_model = CLIPVisionTransformer(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding @@ -866,7 +868,8 @@ def __init__(self, config: CLIPConfig): self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) self.logit_scale = nn.Parameter(torch.ones([]) * self.config.logit_scale_init_value) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING) def get_text_features( diff --git a/src/transformers/models/convbert/modeling_convbert.py b/src/transformers/models/convbert/modeling_convbert.py index 2d4b0c57ca2f..bee2b2ae62bc 100755 --- a/src/transformers/models/convbert/modeling_convbert.py +++ b/src/transformers/models/convbert/modeling_convbert.py @@ -775,7 +775,8 @@ def __init__(self, config): self.encoder = ConvBertEncoder(config) self.config = config - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -886,7 +887,8 @@ def __init__(self, config): self.generator_predictions = ConvBertGeneratorPredictions(config) self.generator_lm_head = nn.Linear(config.embedding_size, config.vocab_size) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.generator_lm_head @@ -995,7 +997,8 @@ def __init__(self, config): self.convbert = ConvBertModel(config) self.classifier = ConvBertClassificationHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1090,7 +1093,8 @@ def __init__(self, config): self.sequence_summary = SequenceSummary(config) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward( CONVBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") @@ -1187,7 +1191,8 @@ def __init__(self, config): self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1274,7 +1279,8 @@ def __init__(self, config): self.convbert = ConvBertModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/ctrl/modeling_ctrl.py b/src/transformers/models/ctrl/modeling_ctrl.py index 9c06e2026919..58e147b4b529 100644 --- a/src/transformers/models/ctrl/modeling_ctrl.py +++ b/src/transformers/models/ctrl/modeling_ctrl.py @@ -338,7 +338,8 @@ def __init__(self, config): ) self.layernorm = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.w @@ -499,7 +500,8 @@ def __init__(self, config): self.transformer = CTRLModel(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=True) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.lm_head @@ -615,7 +617,8 @@ def __init__(self, config): self.transformer = CTRLModel(config) self.classifier = nn.Linear(config.n_embd, self.num_labels, bias=False) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING) @add_code_sample_docstrings( diff --git a/src/transformers/models/deberta/modeling_deberta.py b/src/transformers/models/deberta/modeling_deberta.py index 4a54ab634017..2d2edd8f7dc9 100644 --- a/src/transformers/models/deberta/modeling_deberta.py +++ b/src/transformers/models/deberta/modeling_deberta.py @@ -888,7 +888,8 @@ def __init__(self, config): self.encoder = DebertaEncoder(config) self.z_steps = 0 self.config = config - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -1001,7 +1002,8 @@ def __init__(self, config): self.deberta = DebertaModel(config) self.cls = DebertaOnlyMLMHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1141,7 +1143,8 @@ def __init__(self, config): drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out self.dropout = StableDropout(drop_out) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.deberta.get_input_embeddings() @@ -1254,7 +1257,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1338,7 +1342,8 @@ def __init__(self, config): self.deberta = DebertaModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/deberta_v2/modeling_deberta_v2.py b/src/transformers/models/deberta_v2/modeling_deberta_v2.py index b1ec6bd011af..e0c78395e6e9 100644 --- a/src/transformers/models/deberta_v2/modeling_deberta_v2.py +++ b/src/transformers/models/deberta_v2/modeling_deberta_v2.py @@ -996,7 +996,8 @@ def __init__(self, config): self.encoder = DebertaV2Encoder(config) self.z_steps = 0 self.config = config - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -1110,7 +1111,8 @@ def __init__(self, config): self.deberta = DebertaV2Model(config) self.cls = DebertaV2OnlyMLMHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1251,7 +1253,8 @@ def __init__(self, config): drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out self.dropout = StableDropout(drop_out) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.deberta.get_input_embeddings() @@ -1365,7 +1368,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1450,7 +1454,8 @@ def __init__(self, config): self.deberta = DebertaV2Model(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/deit/modeling_deit.py b/src/transformers/models/deit/modeling_deit.py index 4a44d67fe999..e47e88b849f0 100644 --- a/src/transformers/models/deit/modeling_deit.py +++ b/src/transformers/models/deit/modeling_deit.py @@ -458,7 +458,8 @@ def __init__(self, config, add_pooling_layer=True): self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pooler = DeiTPooler(config) if add_pooling_layer else None - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings @@ -574,7 +575,8 @@ def __init__(self, config): # Classifier head self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) @@ -711,7 +713,8 @@ def __init__(self, config): nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() ) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DeiTForImageClassificationWithTeacherOutput, config_class=_CONFIG_FOR_DOC) diff --git a/src/transformers/models/detr/modeling_detr.py b/src/transformers/models/detr/modeling_detr.py index 70287626b272..e7771a4adb8d 100644 --- a/src/transformers/models/detr/modeling_detr.py +++ b/src/transformers/models/detr/modeling_detr.py @@ -894,7 +894,8 @@ def __init__(self, config: DetrConfig): # in the original DETR, no layernorm is used at the end of the encoder, as "normalize_before" is set to False by default - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def forward( self, @@ -1001,8 +1002,9 @@ def __init__(self, config: DetrConfig): # in DETR, the decoder uses layernorm after the last decoder layer output self.layernorm = nn.LayerNorm(config.d_model) - self.init_weights() self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() def forward( self, @@ -1179,7 +1181,8 @@ def __init__(self, config: DetrConfig): self.encoder = DetrEncoder(config) self.decoder = DetrDecoder(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_encoder(self): return self.encoder @@ -1333,7 +1336,8 @@ def __init__(self, config: DetrConfig): input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3 ) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() # taken from https://github.com/facebookresearch/detr/blob/master/models/detr.py @torch.jit.unused @@ -1494,7 +1498,8 @@ def __init__(self, config: DetrConfig): hidden_size, hidden_size, number_of_heads, dropout=0.0, std=config.init_xavier_std ) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(DETR_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DetrSegmentationOutput, config_class=_CONFIG_FOR_DOC) diff --git a/src/transformers/models/distilbert/modeling_distilbert.py b/src/transformers/models/distilbert/modeling_distilbert.py index b68b7c524e82..a79b4523946c 100755 --- a/src/transformers/models/distilbert/modeling_distilbert.py +++ b/src/transformers/models/distilbert/modeling_distilbert.py @@ -441,7 +441,8 @@ def __init__(self, config): self.embeddings = Embeddings(config) # Embeddings self.transformer = Transformer(config) # Encoder - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_position_embeddings(self) -> nn.Embedding: """ @@ -571,7 +572,8 @@ def __init__(self, config): self.vocab_layer_norm = nn.LayerNorm(config.dim, eps=1e-12) self.vocab_projector = nn.Linear(config.dim, config.vocab_size) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() self.mlm_loss_fct = nn.CrossEntropyLoss() @@ -677,7 +679,8 @@ def __init__(self, config): self.classifier = nn.Linear(config.dim, config.num_labels) self.dropout = nn.Dropout(config.seq_classif_dropout) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_position_embeddings(self) -> nn.Embedding: """ @@ -793,7 +796,8 @@ def __init__(self, config): assert config.num_labels == 2 self.dropout = nn.Dropout(config.qa_dropout) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_position_embeddings(self) -> nn.Embedding: """ @@ -910,7 +914,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_position_embeddings(self) -> nn.Embedding: """ @@ -1015,7 +1020,8 @@ def __init__(self, config): self.classifier = nn.Linear(config.dim, 1) self.dropout = nn.Dropout(config.seq_classif_dropout) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_position_embeddings(self) -> nn.Embedding: """ diff --git a/src/transformers/models/dpr/modeling_dpr.py b/src/transformers/models/dpr/modeling_dpr.py index 091479af4b3c..6cde47678c88 100644 --- a/src/transformers/models/dpr/modeling_dpr.py +++ b/src/transformers/models/dpr/modeling_dpr.py @@ -180,7 +180,8 @@ def __init__(self, config: DPRConfig): self.projection_dim = config.projection_dim if self.projection_dim > 0: self.encode_proj = nn.Linear(self.bert_model.config.hidden_size, config.projection_dim) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def forward( self, @@ -232,7 +233,8 @@ def __init__(self, config: DPRConfig): self.encoder = DPREncoder(config) self.qa_outputs = nn.Linear(self.encoder.embeddings_size, 2) self.qa_classifier = nn.Linear(self.encoder.embeddings_size, 1) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def forward( self, @@ -447,7 +449,8 @@ def __init__(self, config: DPRConfig): super().__init__(config) self.config = config self.ctx_encoder = DPREncoder(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(DPR_ENCODERS_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DPRContextEncoderOutput, config_class=_CONFIG_FOR_DOC) @@ -525,7 +528,8 @@ def __init__(self, config: DPRConfig): super().__init__(config) self.config = config self.question_encoder = DPREncoder(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(DPR_ENCODERS_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DPRQuestionEncoderOutput, config_class=_CONFIG_FOR_DOC) @@ -602,7 +606,8 @@ def __init__(self, config: DPRConfig): super().__init__(config) self.config = config self.span_predictor = DPRSpanPredictor(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(DPR_READER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DPRReaderOutput, config_class=_CONFIG_FOR_DOC) diff --git a/src/transformers/models/electra/modeling_electra.py b/src/transformers/models/electra/modeling_electra.py index eb26bcfdd95b..c94a5c408b71 100644 --- a/src/transformers/models/electra/modeling_electra.py +++ b/src/transformers/models/electra/modeling_electra.py @@ -817,7 +817,8 @@ def __init__(self, config): self.encoder = ElectraEncoder(config) self.config = config - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -939,7 +940,8 @@ def __init__(self, config): self.electra = ElectraModel(config) self.classifier = ElectraClassificationHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1033,7 +1035,8 @@ def __init__(self, config): self.electra = ElectraModel(config) self.discriminator_predictions = ElectraDiscriminatorPredictions(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=ElectraForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) @@ -1128,7 +1131,8 @@ def __init__(self, config): self.generator_predictions = ElectraGeneratorPredictions(config) self.generator_lm_head = nn.Linear(config.embedding_size, config.vocab_size) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.generator_lm_head @@ -1216,7 +1220,8 @@ def __init__(self, config): ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1305,7 +1310,8 @@ def __init__(self, config): self.electra = ElectraModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1406,7 +1412,8 @@ def __init__(self, config): self.sequence_summary = SequenceSummary(config) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/flaubert/modeling_flaubert.py b/src/transformers/models/flaubert/modeling_flaubert.py index f0f14caa3931..9887b639c016 100644 --- a/src/transformers/models/flaubert/modeling_flaubert.py +++ b/src/transformers/models/flaubert/modeling_flaubert.py @@ -336,7 +336,8 @@ class FlaubertWithLMHeadModel(XLMWithLMHeadModel): def __init__(self, config): super().__init__(config) self.transformer = FlaubertModel(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings( @@ -357,7 +358,8 @@ class FlaubertForSequenceClassification(XLMForSequenceClassification): def __init__(self, config): super().__init__(config) self.transformer = FlaubertModel(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings( @@ -378,7 +380,8 @@ class FlaubertForTokenClassification(XLMForTokenClassification): def __init__(self, config): super().__init__(config) self.transformer = FlaubertModel(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings( @@ -399,7 +402,8 @@ class FlaubertForQuestionAnsweringSimple(XLMForQuestionAnsweringSimple): def __init__(self, config): super().__init__(config) self.transformer = FlaubertModel(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings( @@ -420,7 +424,8 @@ class FlaubertForQuestionAnswering(XLMForQuestionAnswering): def __init__(self, config): super().__init__(config) self.transformer = FlaubertModel(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings( @@ -441,4 +446,5 @@ class FlaubertForMultipleChoice(XLMForMultipleChoice): def __init__(self, config): super().__init__(config) self.transformer = FlaubertModel(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() diff --git a/src/transformers/models/fnet/modeling_fnet.py b/src/transformers/models/fnet/modeling_fnet.py index 16ae695b342f..76aa1aa50432 100755 --- a/src/transformers/models/fnet/modeling_fnet.py +++ b/src/transformers/models/fnet/modeling_fnet.py @@ -535,7 +535,8 @@ def __init__(self, config, add_pooling_layer=True): self.pooler = FNetPooler(config) if add_pooling_layer else None - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -633,7 +634,8 @@ def __init__(self, config): self.fnet = FNetModel(config) self.cls = FNetPreTrainingHeads(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -723,7 +725,8 @@ def __init__(self, config): self.fnet = FNetModel(config) self.cls = FNetOnlyMLMHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -791,7 +794,8 @@ def __init__(self, config): self.fnet = FNetModel(config) self.cls = FNetOnlyNSPHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC) @@ -885,7 +889,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -969,7 +974,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( @@ -1050,7 +1056,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1119,7 +1126,8 @@ def __init__(self, config): self.fnet = FNetModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/fsmt/modeling_fsmt.py b/src/transformers/models/fsmt/modeling_fsmt.py index 9ddcd1453bbb..a1c4c1ed8ce9 100644 --- a/src/transformers/models/fsmt/modeling_fsmt.py +++ b/src/transformers/models/fsmt/modeling_fsmt.py @@ -1003,7 +1003,8 @@ def __init__(self, config: FSMTConfig): self.encoder = FSMTEncoder(config, encoder_embed_tokens) self.decoder = FSMTDecoder(config, decoder_embed_tokens) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(FSMT_INPUTS_DOCSTRING) @add_code_sample_docstrings( diff --git a/src/transformers/models/funnel/modeling_funnel.py b/src/transformers/models/funnel/modeling_funnel.py index 7ce2e3221c45..fffed242fd96 100644 --- a/src/transformers/models/funnel/modeling_funnel.py +++ b/src/transformers/models/funnel/modeling_funnel.py @@ -900,7 +900,8 @@ def __init__(self, config): self.embeddings = FunnelEmbeddings(config) self.encoder = FunnelEncoder(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -977,7 +978,8 @@ def __init__(self, config): self.encoder = FunnelEncoder(config) self.decoder = FunnelDecoder(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -1082,7 +1084,8 @@ def __init__(self, config): self.funnel = FunnelModel(config) self.discriminator_predictions = FunnelDiscriminatorPredictions(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=FunnelForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) @@ -1164,7 +1167,8 @@ def __init__(self, config): self.funnel = FunnelModel(config) self.lm_head = nn.Linear(config.d_model, config.vocab_size) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.lm_head @@ -1244,7 +1248,8 @@ def __init__(self, config): self.funnel = FunnelBaseModel(config) self.classifier = FunnelClassificationHead(config, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1334,7 +1339,8 @@ def __init__(self, config): self.funnel = FunnelBaseModel(config) self.classifier = FunnelClassificationHead(config, 1) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( @@ -1420,7 +1426,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1502,7 +1509,8 @@ def __init__(self, config): self.funnel = FunnelModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/gpt2/modeling_gpt2.py b/src/transformers/models/gpt2/modeling_gpt2.py index d8f09d3e7282..77ef0386ea93 100644 --- a/src/transformers/models/gpt2/modeling_gpt2.py +++ b/src/transformers/models/gpt2/modeling_gpt2.py @@ -677,13 +677,14 @@ def __init__(self, config): self.h = nn.ModuleList([GPT2Block(config, layer_idx=i) for i in range(config.num_hidden_layers)]) self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) - self.init_weights() - # Model parallel self.model_parallel = False self.device_map = None self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): # Check validity of device_map @@ -947,12 +948,13 @@ def __init__(self, config): self.transformer = GPT2Model(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) - self.init_weights() - # Model parallel self.model_parallel = False self.device_map = None + # Initialize weights and apply final processing + self.post_init() + @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): self.device_map = ( @@ -1117,12 +1119,13 @@ def __init__(self, config): self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) self.multiple_choice_head = SequenceSummary(config) - self.init_weights() - # Model parallel self.model_parallel = False self.device_map = None + # Initialize weights and apply final processing + self.post_init() + @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): self.device_map = ( @@ -1330,12 +1333,13 @@ def __init__(self, config): self.transformer = GPT2Model(config) self.score = nn.Linear(config.n_embd, self.num_labels, bias=False) - self.init_weights() - # Model parallel self.model_parallel = False self.device_map = None + # Initialize weights and apply final processing + self.post_init() + @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, @@ -1461,12 +1465,13 @@ def __init__(self, config): self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() - # Model parallel self.model_parallel = False self.device_map = None + # Initialize weights and apply final processing + self.post_init() + @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, diff --git a/src/transformers/models/gpt_neo/modeling_gpt_neo.py b/src/transformers/models/gpt_neo/modeling_gpt_neo.py index 1be7de2f2cfb..9785178ce864 100755 --- a/src/transformers/models/gpt_neo/modeling_gpt_neo.py +++ b/src/transformers/models/gpt_neo/modeling_gpt_neo.py @@ -486,8 +486,9 @@ def __init__(self, config): self.h = nn.ModuleList([GPTNeoBlock(config, layer_id=i) for i in range(config.num_layers)]) self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) - self.init_weights() self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.wte @@ -675,7 +676,8 @@ def __init__(self, config): self.transformer = GPTNeoModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.lm_head @@ -823,7 +825,8 @@ def __init__(self, config): self.transformer = GPTNeoModel(config) self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING) @add_code_sample_docstrings( diff --git a/src/transformers/models/gptj/modeling_gptj.py b/src/transformers/models/gptj/modeling_gptj.py index 7c01fea81d3e..603619cc5aef 100755 --- a/src/transformers/models/gptj/modeling_gptj.py +++ b/src/transformers/models/gptj/modeling_gptj.py @@ -444,13 +444,15 @@ def __init__(self, config): self.drop = nn.Dropout(config.embd_pdrop) self.h = nn.ModuleList([GPTJBlock(config) for _ in range(config.n_layer)]) self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) - self.init_weights() # Model parallel self.model_parallel = False self.device_map = None self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): # Check validity of device_map @@ -680,12 +682,14 @@ def __init__(self, config): super().__init__(config) self.transformer = GPTJModel(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size) - self.init_weights() # Model parallel self.model_parallel = False self.device_map = None + # Initialize weights and apply final processing + self.post_init() + @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): self.device_map = ( @@ -855,12 +859,13 @@ def __init__(self, config): self.transformer = GPTJModel(config) self.score = nn.Linear(config.n_embd, self.num_labels, bias=False) - self.init_weights() - # Model parallel self.model_parallel = False self.device_map = None + # Initialize weights and apply final processing + self.post_init() + @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, diff --git a/src/transformers/models/hubert/modeling_hubert.py b/src/transformers/models/hubert/modeling_hubert.py index 07e61ec18100..2f8c59257c5e 100755 --- a/src/transformers/models/hubert/modeling_hubert.py +++ b/src/transformers/models/hubert/modeling_hubert.py @@ -899,7 +899,8 @@ def __init__(self, config: HubertConfig): else: self.encoder = HubertEncoder(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states def _mask_hidden_states( @@ -1039,7 +1040,8 @@ def __init__(self, config): ) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def freeze_feature_extractor(self): """ @@ -1147,7 +1149,8 @@ def __init__(self, config): self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def freeze_feature_extractor(self): """ diff --git a/src/transformers/models/ibert/modeling_ibert.py b/src/transformers/models/ibert/modeling_ibert.py index 8173ce1be800..6666258e7035 100644 --- a/src/transformers/models/ibert/modeling_ibert.py +++ b/src/transformers/models/ibert/modeling_ibert.py @@ -754,7 +754,8 @@ def __init__(self, config, add_pooling_layer=True): self.pooler = IBertPooler(config) if add_pooling_layer else None - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -865,7 +866,8 @@ def __init__(self, config): self.ibert = IBertModel(config, add_pooling_layer=False) self.lm_head = IBertLMHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.lm_head.decoder @@ -979,7 +981,8 @@ def __init__(self, config): self.ibert = IBertModel(config, add_pooling_layer=False) self.classifier = IBertClassificationHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1074,7 +1077,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( @@ -1168,7 +1172,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1277,7 +1282,8 @@ def __init__(self, config): self.ibert = IBertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/layoutlm/modeling_layoutlm.py b/src/transformers/models/layoutlm/modeling_layoutlm.py index 541dec879a18..186146e12090 100644 --- a/src/transformers/models/layoutlm/modeling_layoutlm.py +++ b/src/transformers/models/layoutlm/modeling_layoutlm.py @@ -714,7 +714,8 @@ def __init__(self, config): self.encoder = LayoutLMEncoder(config) self.pooler = LayoutLMPooler(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -856,7 +857,8 @@ def __init__(self, config): self.layoutlm = LayoutLMModel(config) self.cls = LayoutLMOnlyMLMHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.layoutlm.embeddings.word_embeddings @@ -979,7 +981,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.layoutlm.embeddings.word_embeddings @@ -1109,7 +1112,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.layoutlm.embeddings.word_embeddings diff --git a/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py b/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py index e80029a300bc..93706000e62a 100755 --- a/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py +++ b/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py @@ -724,7 +724,8 @@ def __init__(self, config): self.encoder = LayoutLMv2Encoder(config) self.pooler = LayoutLMv2Pooler(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -957,7 +958,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size * 3, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.layoutlmv2.embeddings.word_embeddings @@ -1124,7 +1126,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.layoutlmv2.embeddings.word_embeddings @@ -1239,7 +1242,8 @@ def __init__(self, config, has_visual_segment_embedding=True): self.layoutlmv2 = LayoutLMv2Model(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.layoutlmv2.embeddings.word_embeddings diff --git a/src/transformers/models/led/modeling_led.py b/src/transformers/models/led/modeling_led.py index 5433d0a7c729..2f15448522af 100755 --- a/src/transformers/models/led/modeling_led.py +++ b/src/transformers/models/led/modeling_led.py @@ -1629,8 +1629,9 @@ def __init__(self, config: LEDConfig, embed_tokens: Optional[nn.Embedding] = Non self.layers = nn.ModuleList([LEDEncoderLayer(config, i) for i in range(config.encoder_layers)]) self.layernorm_embedding = nn.LayerNorm(embed_dim) - self.init_weights() self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() def _merge_to_attention_mask(self, attention_mask: torch.Tensor, global_attention_mask: torch.Tensor): # longformer self attention expects attention mask to have 0 (no attn), 1 (local attn), 2 (global attn) @@ -1904,8 +1905,9 @@ def __init__(self, config: LEDConfig, embed_tokens: Optional[nn.Embedding] = Non self.layers = nn.ModuleList([LEDDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) - self.init_weights() self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() def forward( self, @@ -2156,7 +2158,8 @@ def __init__(self, config: LEDConfig): self.encoder = LEDEncoder(config, self.shared) self.decoder = LEDDecoder(config, self.shared) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.shared @@ -2283,7 +2286,8 @@ def __init__(self, config: LEDConfig): self.register_buffer("final_logits_bias", torch.zeros((1, self.led.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.led.shared.num_embeddings, bias=False) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_encoder(self): return self.led.get_encoder() diff --git a/src/transformers/models/longformer/modeling_longformer.py b/src/transformers/models/longformer/modeling_longformer.py index 672c0d948ae8..93bfd90dfbce 100755 --- a/src/transformers/models/longformer/modeling_longformer.py +++ b/src/transformers/models/longformer/modeling_longformer.py @@ -1511,7 +1511,8 @@ def __init__(self, config, add_pooling_layer=True): self.encoder = LongformerEncoder(config) self.pooler = LongformerPooler(config) if add_pooling_layer else None - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -1713,7 +1714,8 @@ def __init__(self, config): self.longformer = LongformerModel(config, add_pooling_layer=False) self.lm_head = LongformerLMHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.lm_head.decoder @@ -1818,7 +1820,8 @@ def __init__(self, config): self.longformer = LongformerModel(config, add_pooling_layer=False) self.classifier = LongformerClassificationHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1943,7 +1946,8 @@ def __init__(self, config): self.longformer = LongformerModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=LongformerQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) @@ -2080,7 +2084,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -2170,7 +2175,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward( LONGFORMER_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") diff --git a/src/transformers/models/luke/modeling_luke.py b/src/transformers/models/luke/modeling_luke.py index 97d1f1adfd9c..6edd84a3ce25 100644 --- a/src/transformers/models/luke/modeling_luke.py +++ b/src/transformers/models/luke/modeling_luke.py @@ -818,7 +818,8 @@ def __init__(self, config, add_pooling_layer=True): self.pooler = LukePooler(config) if add_pooling_layer else None - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -1029,7 +1030,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=EntityClassificationOutput, config_class=_CONFIG_FOR_DOC) @@ -1142,7 +1144,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size * 2, config.num_labels, False) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=EntityPairClassificationOutput, config_class=_CONFIG_FOR_DOC) @@ -1257,7 +1260,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size * 3, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=EntitySpanClassificationOutput, config_class=_CONFIG_FOR_DOC) diff --git a/src/transformers/models/lxmert/modeling_lxmert.py b/src/transformers/models/lxmert/modeling_lxmert.py index 1135816cc22c..c78e36fddb75 100644 --- a/src/transformers/models/lxmert/modeling_lxmert.py +++ b/src/transformers/models/lxmert/modeling_lxmert.py @@ -891,7 +891,8 @@ def __init__(self, config): self.embeddings = LxmertEmbeddings(config) self.encoder = LxmertEncoder(config) self.pooler = LxmertPooler(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -1048,7 +1049,8 @@ def __init__(self, config): self.answer_head = LxmertVisualAnswerHead(config, self.num_qa_labels) # Weight initialization - self.init_weights() + # Initialize weights and apply final processing + self.post_init() # Loss functions self.loss_fcts = { @@ -1303,7 +1305,8 @@ def __init__(self, config): self.answer_head = LxmertVisualAnswerHead(config, self.num_qa_labels) # Weight initialization - self.init_weights() + # Initialize weights and apply final processing + self.post_init() # Loss function self.loss = CrossEntropyLoss() diff --git a/src/transformers/models/m2m_100/modeling_m2m_100.py b/src/transformers/models/m2m_100/modeling_m2m_100.py index 1230bf01e78e..4c9caadd8cb4 100755 --- a/src/transformers/models/m2m_100/modeling_m2m_100.py +++ b/src/transformers/models/m2m_100/modeling_m2m_100.py @@ -705,8 +705,9 @@ def __init__(self, config: M2M100Config, embed_tokens: Optional[nn.Embedding] = self.layers = nn.ModuleList([M2M100EncoderLayer(config) for _ in range(config.encoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) - self.init_weights() self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() def forward( self, @@ -870,8 +871,9 @@ def __init__(self, config: M2M100Config, embed_tokens: Optional[nn.Embedding] = self.layers = nn.ModuleList([M2M100DecoderLayer(config) for _ in range(config.decoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) - self.init_weights() self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() def forward( self, @@ -1113,7 +1115,8 @@ def __init__(self, config: M2M100Config): self.encoder = M2M100Encoder(config, self.shared) self.decoder = M2M100Decoder(config, self.shared) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.shared @@ -1232,7 +1235,8 @@ def __init__(self, config: M2M100Config): self.model = M2M100Model(config) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_encoder(self): return self.model.get_encoder() diff --git a/src/transformers/models/marian/modeling_marian.py b/src/transformers/models/marian/modeling_marian.py index 94f0f800bd55..ef4369dcb8c8 100755 --- a/src/transformers/models/marian/modeling_marian.py +++ b/src/transformers/models/marian/modeling_marian.py @@ -668,8 +668,10 @@ def __init__(self, config: MarianConfig, embed_tokens: Optional[nn.Embedding] = self.padding_idx, ) self.layers = nn.ModuleList([MarianEncoderLayer(config) for _ in range(config.encoder_layers)]) - self.init_weights() + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() def forward( self, @@ -829,8 +831,10 @@ def __init__(self, config: MarianConfig, embed_tokens: Optional[nn.Embedding] = self.padding_idx, ) self.layers = nn.ModuleList([MarianDecoderLayer(config) for _ in range(config.decoder_layers)]) - self.init_weights() + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embed_tokens @@ -1087,7 +1091,8 @@ def __init__(self, config: MarianConfig): self.encoder = MarianEncoder(config, self.shared) self.decoder = MarianDecoder(config, self.shared) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.shared @@ -1220,7 +1225,8 @@ def __init__(self, config: MarianConfig): self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_encoder(self): return self.model.get_encoder() @@ -1399,7 +1405,8 @@ def __init__(self, config): self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens diff --git a/src/transformers/models/mbart/modeling_mbart.py b/src/transformers/models/mbart/modeling_mbart.py index 564030fb49c7..82452251bf35 100755 --- a/src/transformers/models/mbart/modeling_mbart.py +++ b/src/transformers/models/mbart/modeling_mbart.py @@ -696,8 +696,14 @@ def __init__(self, config: MBartConfig, embed_tokens: Optional[nn.Embedding] = N self.layernorm_embedding = nn.LayerNorm(embed_dim) self.layer_norm = nn.LayerNorm(config.d_model) - self.init_weights() self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def _backward_compatibility_gradient_checkpointing(self): + # Override to not delete the attribute from the config + if self.supports_gradient_checkpointing and getattr(self.config, "gradient_checkpointing", False): + self.gradient_checkpointing_enable() def forward( self, @@ -862,8 +868,9 @@ def __init__(self, config: MBartConfig, embed_tokens: Optional[nn.Embedding] = N self.layernorm_embedding = nn.LayerNorm(config.d_model) self.layer_norm = nn.LayerNorm(config.d_model) - self.init_weights() self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embed_tokens @@ -1123,7 +1130,8 @@ def __init__(self, config: MBartConfig): self.encoder = MBartEncoder(config, self.shared) self.decoder = MBartDecoder(config, self.shared) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.shared @@ -1243,7 +1251,8 @@ def __init__(self, config: MBartConfig): self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_encoder(self): return self.model.get_encoder() @@ -1664,7 +1673,8 @@ def __init__(self, config): self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens diff --git a/src/transformers/models/megatron_bert/modeling_megatron_bert.py b/src/transformers/models/megatron_bert/modeling_megatron_bert.py index c482b1b639d7..12f026f63c31 100755 --- a/src/transformers/models/megatron_bert/modeling_megatron_bert.py +++ b/src/transformers/models/megatron_bert/modeling_megatron_bert.py @@ -857,7 +857,8 @@ def __init__(self, config, add_pooling_layer=True): self.pooler = MegatronBertPooler(config) if add_pooling_layer else None - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -1018,7 +1019,8 @@ def __init__(self, config, add_binary_head=True): self.bert = MegatronBertModel(config) self.cls = MegatronBertPreTrainingHeads(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1127,7 +1129,8 @@ def __init__(self, config): self.bert = MegatronBertModel(config, add_pooling_layer=False) self.cls = MegatronBertOnlyMLMHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1274,7 +1277,8 @@ def __init__(self, config): self.bert = MegatronBertModel(config, add_pooling_layer=False) self.cls = MegatronBertOnlyMLMHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1375,7 +1379,8 @@ def __init__(self, config): self.bert = MegatronBertModel(config) self.cls = MegatronBertOnlyNSPHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC) @@ -1478,7 +1483,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1574,7 +1580,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward( MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") @@ -1671,7 +1678,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1761,7 +1769,8 @@ def __init__(self, config): self.bert = MegatronBertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/mobilebert/modeling_mobilebert.py b/src/transformers/models/mobilebert/modeling_mobilebert.py index 3c85af3b1bc3..28c01d55211a 100644 --- a/src/transformers/models/mobilebert/modeling_mobilebert.py +++ b/src/transformers/models/mobilebert/modeling_mobilebert.py @@ -799,7 +799,8 @@ def __init__(self, config, add_pooling_layer=True): self.pooler = MobileBertPooler(config) if add_pooling_layer else None - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -907,7 +908,8 @@ def __init__(self, config): self.mobilebert = MobileBertModel(config) self.cls = MobileBertPreTrainingHeads(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1015,7 +1017,8 @@ def __init__(self, config): self.cls = MobileBertOnlyMLMHead(config) self.config = config - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1111,7 +1114,8 @@ def __init__(self, config): self.mobilebert = MobileBertModel(config) self.cls = MobileBertOnlyNSPHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC) @@ -1218,7 +1222,8 @@ def __init__(self, config): self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1318,7 +1323,8 @@ def __init__(self, config): self.mobilebert = MobileBertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1421,7 +1427,8 @@ def __init__(self, config): self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward( MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") @@ -1522,7 +1529,8 @@ def __init__(self, config): self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/mpnet/modeling_mpnet.py b/src/transformers/models/mpnet/modeling_mpnet.py index 52cf537ed25d..70e2d09a93f7 100644 --- a/src/transformers/models/mpnet/modeling_mpnet.py +++ b/src/transformers/models/mpnet/modeling_mpnet.py @@ -493,7 +493,8 @@ def __init__(self, config, add_pooling_layer=True): self.encoder = MPNetEncoder(config) self.pooler = MPNetPooler(config) if add_pooling_layer else None - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -583,7 +584,8 @@ def __init__(self, config): self.mpnet = MPNetModel(config, add_pooling_layer=False) self.lm_head = MPNetLMHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.lm_head.decoder @@ -691,7 +693,8 @@ def __init__(self, config): self.mpnet = MPNetModel(config, add_pooling_layer=False) self.classifier = MPNetClassificationHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -785,7 +788,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( @@ -877,7 +881,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -985,7 +990,8 @@ def __init__(self, config): self.mpnet = MPNetModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/openai/modeling_openai.py b/src/transformers/models/openai/modeling_openai.py index 58dd1b055d3e..782812b7e70f 100644 --- a/src/transformers/models/openai/modeling_openai.py +++ b/src/transformers/models/openai/modeling_openai.py @@ -414,7 +414,8 @@ def __init__(self, config): self.h = nn.ModuleList([Block(config.n_positions, config, scale=True) for _ in range(config.n_layer)]) self.register_buffer("position_ids", torch.arange(config.n_positions)) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.tokens_embed @@ -540,7 +541,8 @@ def __init__(self, config): self.transformer = OpenAIGPTModel(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.lm_head @@ -629,7 +631,8 @@ def __init__(self, config): self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) self.multiple_choice_head = SequenceSummary(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.lm_head @@ -750,7 +753,8 @@ def __init__(self, config): self.transformer = OpenAIGPTModel(config) self.score = nn.Linear(config.n_embd, self.num_labels, bias=False) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING) @add_code_sample_docstrings( diff --git a/src/transformers/models/pegasus/modeling_pegasus.py b/src/transformers/models/pegasus/modeling_pegasus.py index 33a1ca14cb72..4929c1014106 100755 --- a/src/transformers/models/pegasus/modeling_pegasus.py +++ b/src/transformers/models/pegasus/modeling_pegasus.py @@ -658,8 +658,9 @@ def __init__(self, config: PegasusConfig, embed_tokens: Optional[nn.Embedding] = self.layers = nn.ModuleList([PegasusEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) - self.init_weights() self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() def resize_position_embeddings(self, new_num_position_embeddings: int): """ @@ -853,8 +854,9 @@ def __init__(self, config: PegasusConfig, embed_tokens: Optional[nn.Embedding] = self.layers = nn.ModuleList([PegasusDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) - self.init_weights() self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embed_tokens @@ -1142,7 +1144,8 @@ def __init__(self, config: PegasusConfig): self.encoder = PegasusEncoder(config, self.shared) self.decoder = PegasusDecoder(config, self.shared) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.shared @@ -1293,7 +1296,8 @@ def __init__(self, config: PegasusConfig): self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_encoder(self): return self.model.get_encoder() @@ -1490,7 +1494,8 @@ def __init__(self, config): self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens diff --git a/src/transformers/models/prophetnet/modeling_prophetnet.py b/src/transformers/models/prophetnet/modeling_prophetnet.py index 9f72a35f0dfd..a3e89aa69f43 100644 --- a/src/transformers/models/prophetnet/modeling_prophetnet.py +++ b/src/transformers/models/prophetnet/modeling_prophetnet.py @@ -1266,8 +1266,9 @@ def __init__(self, config: ProphetNetConfig, word_embeddings: nn.Embedding = Non self.layers = nn.ModuleList([ProphetNetEncoderLayer(config) for _ in range(config.num_encoder_layers)]) - self.init_weights() self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.word_embeddings @@ -1411,8 +1412,9 @@ def __init__(self, config: ProphetNetConfig, word_embeddings: nn.Embedding = Non self.layers = nn.ModuleList([ProphetNetDecoderLayer(config) for _ in range(config.num_decoder_layers)]) self.embeddings_layer_norm = LayerNorm(config.hidden_size) - self.init_weights() self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.word_embeddings @@ -1765,7 +1767,8 @@ def __init__(self, config): decoder_config.is_encoder_decoder = False self.decoder = ProphetNetDecoder(decoder_config, self.word_embeddings) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.word_embeddings @@ -1882,7 +1885,8 @@ def __init__(self, config: ProphetNetConfig): self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.lm_head @@ -2092,7 +2096,8 @@ def __init__(self, config): self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.prophetnet.decoder.word_embeddings diff --git a/src/transformers/models/reformer/modeling_reformer.py b/src/transformers/models/reformer/modeling_reformer.py index 528875b4aa97..eae0b3009224 100755 --- a/src/transformers/models/reformer/modeling_reformer.py +++ b/src/transformers/models/reformer/modeling_reformer.py @@ -1974,7 +1974,8 @@ def __init__(self, config): self.embeddings = ReformerEmbeddings(config) self.encoder = ReformerEncoder(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -2188,7 +2189,8 @@ def __init__(self, config): self.reformer = ReformerModel(config) self.lm_head = ReformerOnlyLMHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.lm_head.decoder @@ -2303,7 +2305,8 @@ def __init__(self, config): self.reformer = ReformerModel(config) self.lm_head = ReformerOnlyLMHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.lm_head.decoder @@ -2390,7 +2393,8 @@ def __init__(self, config): if config.is_decoder is True: logger.warning("You might want to disable causal masking for sequence classification") - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING) @add_code_sample_docstrings( @@ -2508,7 +2512,8 @@ def __init__(self, config): # 2 * config.hidden_size because we use reversible residual layers self.qa_outputs = nn.Linear(2 * config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING) @add_code_sample_docstrings( diff --git a/src/transformers/models/rembert/modeling_rembert.py b/src/transformers/models/rembert/modeling_rembert.py index 3ebbde7fa7ed..bc5569e553b5 100755 --- a/src/transformers/models/rembert/modeling_rembert.py +++ b/src/transformers/models/rembert/modeling_rembert.py @@ -765,7 +765,8 @@ def __init__(self, config, add_pooling_layer=True): self.pooler = RemBertPooler(config) if add_pooling_layer else None - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -925,7 +926,8 @@ def __init__(self, config): self.rembert = RemBertModel(config, add_pooling_layer=False) self.cls = RemBertOnlyMLMHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1027,7 +1029,8 @@ def __init__(self, config): self.rembert = RemBertModel(config, add_pooling_layer=False) self.cls = RemBertOnlyMLMHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1173,7 +1176,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1269,7 +1273,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( @@ -1361,7 +1366,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1449,7 +1455,8 @@ def __init__(self, config): self.rembert = RemBertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/retribert/modeling_retribert.py b/src/transformers/models/retribert/modeling_retribert.py index 08f56e13ee0f..2456545a2218 100644 --- a/src/transformers/models/retribert/modeling_retribert.py +++ b/src/transformers/models/retribert/modeling_retribert.py @@ -99,7 +99,8 @@ def __init__(self, config): self.ce_loss = nn.CrossEntropyLoss(reduction="mean") - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def embed_sentences_checkpointed( self, diff --git a/src/transformers/models/roberta/modeling_roberta.py b/src/transformers/models/roberta/modeling_roberta.py index 917bb03f80ae..5f85a81fe805 100644 --- a/src/transformers/models/roberta/modeling_roberta.py +++ b/src/transformers/models/roberta/modeling_roberta.py @@ -723,7 +723,8 @@ def __init__(self, config, add_pooling_layer=True): self.pooler = RobertaPooler(config) if add_pooling_layer else None - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -897,7 +898,8 @@ def __init__(self, config): # The LM head weights require special treatment only when they are tied with the word embeddings self.update_keys_to_ignore(config, ["lm_head.decoder.weight"]) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.lm_head.decoder @@ -1050,7 +1052,8 @@ def __init__(self, config): # The LM head weights require special treatment only when they are tied with the word embeddings self.update_keys_to_ignore(config, ["lm_head.decoder.weight"]) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.lm_head.decoder @@ -1169,7 +1172,8 @@ def __init__(self, config): self.roberta = RobertaModel(config, add_pooling_layer=False) self.classifier = RobertaClassificationHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1265,7 +1269,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( @@ -1362,7 +1367,8 @@ def __init__(self, config): self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1474,7 +1480,8 @@ def __init__(self, config): self.roberta = RobertaModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/roformer/modeling_roformer.py b/src/transformers/models/roformer/modeling_roformer.py index 75f690390ea0..14e74a24f836 100644 --- a/src/transformers/models/roformer/modeling_roformer.py +++ b/src/transformers/models/roformer/modeling_roformer.py @@ -817,7 +817,8 @@ def __init__(self, config): self.encoder = RoFormerEncoder(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -973,7 +974,8 @@ def __init__(self, config): self.roformer = RoFormerModel(config) self.cls = RoFormerOnlyMLMHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1073,7 +1075,8 @@ def __init__(self, config): self.roformer = RoFormerModel(config) self.cls = RoFormerOnlyMLMHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1238,7 +1241,8 @@ def __init__(self, config): self.roformer = RoFormerModel(config) self.classifier = RoFormerClassificationHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1330,7 +1334,8 @@ def __init__(self, config): self.sequence_summary = SequenceSummary(config) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward( ROFORMER_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") @@ -1422,7 +1427,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1509,7 +1515,8 @@ def __init__(self, config): self.roformer = RoFormerModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/segformer/modeling_segformer.py b/src/transformers/models/segformer/modeling_segformer.py index 2935d07a1e17..5b1593af2208 100755 --- a/src/transformers/models/segformer/modeling_segformer.py +++ b/src/transformers/models/segformer/modeling_segformer.py @@ -467,7 +467,8 @@ def __init__(self, config): # hierarchical Transformer encoder self.encoder = SegformerEncoder(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def _prune_heads(self, heads_to_prune): """ @@ -541,7 +542,8 @@ def __init__(self, config): # Classifier head self.classifier = nn.Linear(config.hidden_sizes[-1], config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(SEGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) @@ -696,7 +698,8 @@ def __init__(self, config): self.segformer = SegformerModel(config) self.decode_head = SegformerDecodeHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(SEGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) diff --git a/src/transformers/models/sew/modeling_sew.py b/src/transformers/models/sew/modeling_sew.py index 55d6bf7caef3..fd4cf4bf4d28 100644 --- a/src/transformers/models/sew/modeling_sew.py +++ b/src/transformers/models/sew/modeling_sew.py @@ -798,7 +798,8 @@ def __init__(self, config: SEWConfig): self.encoder = SEWEncoder(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states def _mask_hidden_states( @@ -924,7 +925,8 @@ def __init__(self, config): ) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def freeze_feature_extractor(self): """ @@ -1032,7 +1034,8 @@ def __init__(self, config): self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def freeze_feature_extractor(self): """ diff --git a/src/transformers/models/sew_d/modeling_sew_d.py b/src/transformers/models/sew_d/modeling_sew_d.py index facb85f07b11..53f9862b4721 100644 --- a/src/transformers/models/sew_d/modeling_sew_d.py +++ b/src/transformers/models/sew_d/modeling_sew_d.py @@ -1329,7 +1329,8 @@ def __init__(self, config: SEWDConfig): self.encoder = SEWDEncoder(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states def _mask_hidden_states( @@ -1455,7 +1456,8 @@ def __init__(self, config): ) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def freeze_feature_extractor(self): """ @@ -1563,7 +1565,8 @@ def __init__(self, config): self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def freeze_feature_extractor(self): """ diff --git a/src/transformers/models/speech_to_text/modeling_speech_to_text.py b/src/transformers/models/speech_to_text/modeling_speech_to_text.py index e631e75731c1..aead484a5933 100755 --- a/src/transformers/models/speech_to_text/modeling_speech_to_text.py +++ b/src/transformers/models/speech_to_text/modeling_speech_to_text.py @@ -723,8 +723,9 @@ def __init__(self, config: Speech2TextConfig): self.layers = nn.ModuleList([Speech2TextEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) - self.init_weights() self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() def forward( self, @@ -876,8 +877,9 @@ def __init__(self, config: Speech2TextConfig): self.layer_norm = nn.LayerNorm(config.d_model) - self.init_weights() self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embed_tokens @@ -1130,7 +1132,8 @@ def __init__(self, config: Speech2TextConfig): self.encoder = Speech2TextEncoder(config) self.decoder = Speech2TextDecoder(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.decoder.embed_tokens @@ -1253,7 +1256,8 @@ def __init__(self, config: Speech2TextConfig): self.model = Speech2TextModel(config) self.lm_head = nn.Linear(config.d_model, self.config.vocab_size, bias=False) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_encoder(self): return self.model.get_encoder() diff --git a/src/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py b/src/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py index 306cacd48f9a..a6b4e5b54247 100755 --- a/src/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py +++ b/src/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py @@ -476,8 +476,9 @@ def __init__(self, config: Speech2Text2Config): self.layers = nn.ModuleList([Speech2Text2DecoderLayer(config) for _ in range(config.decoder_layers)]) - self.init_weights() self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embed_tokens @@ -751,7 +752,8 @@ def __init__(self, config): self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens diff --git a/src/transformers/models/splinter/modeling_splinter.py b/src/transformers/models/splinter/modeling_splinter.py index 812a5f207034..19dab0457d7d 100755 --- a/src/transformers/models/splinter/modeling_splinter.py +++ b/src/transformers/models/splinter/modeling_splinter.py @@ -619,7 +619,8 @@ def __init__(self, config): self.embeddings = SplinterEmbeddings(config) self.encoder = SplinterEncoder(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -834,7 +835,8 @@ def __init__(self, config): self.splinter_qass = QuestionAwareSpanSelectionHead(config) self.question_token_id = config.question_token_id - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(SPLINTER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/squeezebert/modeling_squeezebert.py b/src/transformers/models/squeezebert/modeling_squeezebert.py index 6ec972f06c9e..ba12c2341dde 100644 --- a/src/transformers/models/squeezebert/modeling_squeezebert.py +++ b/src/transformers/models/squeezebert/modeling_squeezebert.py @@ -553,7 +553,8 @@ def __init__(self, config): self.encoder = SqueezeBertEncoder(config) self.pooler = SqueezeBertPooler(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -654,7 +655,8 @@ def __init__(self, config): self.transformer = SqueezeBertModel(config) self.cls = SqueezeBertOnlyMLMHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -739,7 +741,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -836,7 +839,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward( SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") @@ -930,7 +934,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1017,7 +1022,8 @@ def __init__(self, config): self.transformer = SqueezeBertModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/t5/modeling_t5.py b/src/transformers/models/t5/modeling_t5.py index a9f69c91f5de..78ccd072364a 100644 --- a/src/transformers/models/t5/modeling_t5.py +++ b/src/transformers/models/t5/modeling_t5.py @@ -814,7 +814,8 @@ def __init__(self, config, embed_tokens=None): self.final_layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() # Model parallel self.model_parallel = False self.device_map = None @@ -1267,7 +1268,8 @@ def __init__(self, config: T5Config): decoder_config.num_layers = config.num_decoder_layers self.decoder = T5Stack(decoder_config, self.shared) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() # Model parallel self.model_parallel = False @@ -1457,7 +1459,8 @@ def __init__(self, config): self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() # Model parallel self.model_parallel = False @@ -1731,7 +1734,8 @@ def __init__(self, config: T5Config): encoder_config.is_encoder_decoder = False self.encoder = T5Stack(encoder_config, self.shared) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() # Model parallel self.model_parallel = False diff --git a/src/transformers/models/tapas/modeling_tapas.py b/src/transformers/models/tapas/modeling_tapas.py index e301a2eca505..7ff9081fab2a 100644 --- a/src/transformers/models/tapas/modeling_tapas.py +++ b/src/transformers/models/tapas/modeling_tapas.py @@ -877,7 +877,8 @@ def __init__(self, config, add_pooling_layer=True): self.pooler = TapasPooler(config) if add_pooling_layer else None - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -1016,7 +1017,8 @@ def __init__(self, config): self.tapas = TapasModel(config, add_pooling_layer=False) self.cls = TapasOnlyMLMHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1146,7 +1148,8 @@ def __init__(self, config: TapasConfig): if config.num_aggregation_labels > 0: self.aggregation_classifier = nn.Linear(config.hidden_size, config.num_aggregation_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TableQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC) @@ -1464,7 +1467,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) diff --git a/src/transformers/models/transfo_xl/modeling_transfo_xl.py b/src/transformers/models/transfo_xl/modeling_transfo_xl.py index e18a5b50f6dd..fda73520c9c2 100644 --- a/src/transformers/models/transfo_xl/modeling_transfo_xl.py +++ b/src/transformers/models/transfo_xl/modeling_transfo_xl.py @@ -819,7 +819,8 @@ def __init__(self, config): else: # learnable embeddings and absolute embeddings raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.word_emb @@ -1021,7 +1022,8 @@ def __init__(self, config): config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val ) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def tie_weights(self): """ @@ -1170,7 +1172,8 @@ def __init__(self, config): self.num_labels = config.num_labels self.transformer = TransfoXLModel(config) self.score = nn.Linear(config.d_embed, self.num_labels, bias=False) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING) @add_code_sample_docstrings( diff --git a/src/transformers/models/trocr/modeling_trocr.py b/src/transformers/models/trocr/modeling_trocr.py index 87502901ea73..5b8943a26a93 100644 --- a/src/transformers/models/trocr/modeling_trocr.py +++ b/src/transformers/models/trocr/modeling_trocr.py @@ -503,8 +503,9 @@ def __init__(self, config: TrOCRConfig): self.layers = nn.ModuleList([TrOCRDecoderLayer(config) for _ in range(config.decoder_layers)]) - self.init_weights() self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embed_tokens @@ -784,7 +785,8 @@ def __init__(self, config): self.output_projection = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens diff --git a/src/transformers/models/unispeech/modeling_unispeech.py b/src/transformers/models/unispeech/modeling_unispeech.py index 3f700ee15390..cd4ff0108116 100755 --- a/src/transformers/models/unispeech/modeling_unispeech.py +++ b/src/transformers/models/unispeech/modeling_unispeech.py @@ -1045,7 +1045,8 @@ def __init__(self, config: UniSpeechConfig): else: self.encoder = UniSpeechEncoder(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states def _mask_hidden_states( @@ -1165,7 +1166,8 @@ def __init__(self, config: UniSpeechConfig): self.ctc_proj = nn.Linear(config.hidden_size, config.num_ctc_classes) self.dropout = nn.Dropout(config.final_dropout) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def set_gumbel_temperature(self, temperature: int): """ @@ -1337,7 +1339,8 @@ def __init__(self, config): ) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def freeze_feature_extractor(self): """ @@ -1445,7 +1448,8 @@ def __init__(self, config): self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def freeze_feature_extractor(self): """ diff --git a/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py b/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py index ae2849206480..c69faafc4397 100755 --- a/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py +++ b/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py @@ -1046,7 +1046,8 @@ def __init__(self, config: UniSpeechSatConfig): else: self.encoder = UniSpeechSatEncoder(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states def _mask_hidden_states( @@ -1171,7 +1172,8 @@ def __init__(self, config: UniSpeechSatConfig): if self.config.do_stable_layer_norm: self.layer_norm_for_extract.requires_grad = False - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def set_gumbel_temperature(self, temperature: int): """ @@ -1328,7 +1330,8 @@ def __init__(self, config): ) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def freeze_feature_extractor(self): """ @@ -1436,7 +1439,8 @@ def __init__(self, config): self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def freeze_feature_extractor(self): """ diff --git a/src/transformers/models/visual_bert/modeling_visual_bert.py b/src/transformers/models/visual_bert/modeling_visual_bert.py index 6d8d51b4ab26..eabca9ad4c46 100755 --- a/src/transformers/models/visual_bert/modeling_visual_bert.py +++ b/src/transformers/models/visual_bert/modeling_visual_bert.py @@ -701,7 +701,8 @@ def __init__(self, config, add_pooling_layer=True): if self.bypass_transformer: self.additional_layer = VisualBertLayer(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -877,7 +878,8 @@ def __init__(self, config): self.visual_bert = VisualBertModel(config) self.cls = VisualBertPreTrainingHeads(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1021,7 +1023,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.cls = nn.Linear(config.hidden_size, 1) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward( VISUAL_BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") @@ -1170,7 +1173,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.cls = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(VISUAL_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) @@ -1292,7 +1296,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.cls = nn.Linear(config.hidden_size, config.num_labels) # 2 - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(VISUAL_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) @@ -1448,7 +1453,8 @@ def __init__(self, config): self.cls = VisualBertPreTrainingHeads(config) self.attention = VisualBertRegionToPhraseAttention(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(VISUAL_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) diff --git a/src/transformers/models/vit/modeling_vit.py b/src/transformers/models/vit/modeling_vit.py index fda10a1ecec6..b1bc30312486 100644 --- a/src/transformers/models/vit/modeling_vit.py +++ b/src/transformers/models/vit/modeling_vit.py @@ -487,7 +487,8 @@ def __init__(self, config, add_pooling_layer=True): self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pooler = ViTPooler(config) if add_pooling_layer else None - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings @@ -603,7 +604,8 @@ def __init__(self, config): # Classifier head self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(VIT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) diff --git a/src/transformers/models/wav2vec2/modeling_wav2vec2.py b/src/transformers/models/wav2vec2/modeling_wav2vec2.py index 0bb456620bed..00eec6933be9 100755 --- a/src/transformers/models/wav2vec2/modeling_wav2vec2.py +++ b/src/transformers/models/wav2vec2/modeling_wav2vec2.py @@ -1152,7 +1152,8 @@ def __init__(self, config: Wav2Vec2Config): self.adapter = Wav2Vec2Adapter(config) if config.add_adapter else None - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def _mask_hidden_states( self, @@ -1269,7 +1270,8 @@ def __init__(self, config: Wav2Vec2Config): self.quantizer = Wav2Vec2GumbelVectorQuantizer(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() # make sure that project_hid & project_q are initialized like normal linear layers self.project_hid = nn.Linear(config.hidden_size, config.proj_codevector_dim) @@ -1480,7 +1482,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.final_dropout) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Wav2Vec2BaseModelOutput, config_class=_CONFIG_FOR_DOC) @@ -1563,7 +1566,8 @@ def __init__(self, config): ) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def freeze_feature_extractor(self): """ @@ -1670,7 +1674,8 @@ def __init__(self, config): self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def freeze_feature_extractor(self): """ diff --git a/src/transformers/models/xlm/modeling_xlm.py b/src/transformers/models/xlm/modeling_xlm.py index 4d4b8c0c8d7e..c3219952c27f 100755 --- a/src/transformers/models/xlm/modeling_xlm.py +++ b/src/transformers/models/xlm/modeling_xlm.py @@ -469,7 +469,8 @@ def __init__(self, config): if self.attentions[int(layer)].n_heads == config.n_heads: self.prune_heads({int(layer): list(map(int, heads))}) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) def get_input_embeddings(self): @@ -687,7 +688,8 @@ def __init__(self, config): self.transformer = XLMModel(config) self.pred_layer = XLMPredLayer(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.pred_layer.proj @@ -785,7 +787,8 @@ def __init__(self, config): self.transformer = XLMModel(config) self.sequence_summary = SequenceSummary(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -885,7 +888,8 @@ def __init__(self, config): self.transformer = XLMModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -989,7 +993,8 @@ def __init__(self, config): self.transformer = XLMModel(config) self.qa_outputs = SQuADHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=XLMForQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC) @@ -1108,7 +1113,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1201,7 +1207,8 @@ def __init__(self, config, *inputs, **kwargs): self.sequence_summary = SequenceSummary(config) self.logits_proj = nn.Linear(config.num_labels, 1) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( diff --git a/src/transformers/models/xlnet/modeling_xlnet.py b/src/transformers/models/xlnet/modeling_xlnet.py index 70c37ad84f36..10aadbdfbaac 100755 --- a/src/transformers/models/xlnet/modeling_xlnet.py +++ b/src/transformers/models/xlnet/modeling_xlnet.py @@ -955,7 +955,8 @@ def __init__(self, config): self.layer = nn.ModuleList([XLNetLayer(config) for _ in range(config.n_layer)]) self.dropout = nn.Dropout(config.dropout) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.word_embedding @@ -1311,7 +1312,8 @@ def __init__(self, config): self.transformer = XLNetModel(config) self.lm_loss = nn.Linear(config.d_model, config.vocab_size, bias=True) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.lm_loss @@ -1493,7 +1495,8 @@ def __init__(self, config): self.sequence_summary = SequenceSummary(config) self.logits_proj = nn.Linear(config.d_model, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1600,7 +1603,8 @@ def __init__(self, config): self.transformer = XLNetModel(config) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1697,7 +1701,8 @@ def __init__(self, config): self.sequence_summary = SequenceSummary(config) self.logits_proj = nn.Linear(config.d_model, 1) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( @@ -1800,7 +1805,8 @@ def __init__(self, config): self.transformer = XLNetModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1913,7 +1919,8 @@ def __init__(self, config): self.end_logits = PoolerEndLogits(config) self.answer_class = PoolerAnswerClass(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=XLNetForQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC) diff --git a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py index 10e7bc599532..7d0afd2d9c65 100755 --- a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py +++ b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py @@ -777,7 +777,8 @@ def __init__(self, config): self.embeddings = {{cookiecutter.camelcase_modelname}}Embeddings(config) self.encoder = {{cookiecutter.camelcase_modelname}}Encoder(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings @@ -943,7 +944,8 @@ def __init__(self, config): self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config) self.cls = {{cookiecutter.camelcase_modelname}}OnlyMLMHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1046,7 +1048,8 @@ def __init__(self, config): self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config) self.cls = {{cookiecutter.camelcase_modelname}}OnlyMLMHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder @@ -1217,7 +1220,8 @@ def __init__(self, config): self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config) self.classifier = {{cookiecutter.camelcase_modelname}}ClassificationHead(config) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1309,7 +1313,8 @@ def __init__(self, config): self.sequence_summary = SequenceSummary(config) self.classifier = nn.Linear(config.hidden_size, 1) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( @@ -1399,7 +1404,8 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1486,7 +1492,8 @@ def __init__(self, config): self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -2224,8 +2231,9 @@ def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, embed_tok self.layers = nn.ModuleList([{{cookiecutter.camelcase_modelname}}EncoderLayer(config) for _ in range(config.encoder_layers)]) self.layernorm_embedding = nn.LayerNorm(embed_dim) - self.init_weights() self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() def forward( self, @@ -2388,8 +2396,9 @@ def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, embed_tok self.layers = nn.ModuleList([{{cookiecutter.camelcase_modelname}}DecoderLayer(config) for _ in range(config.decoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) - self.init_weights() self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.embed_tokens @@ -2640,7 +2649,8 @@ def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config): self.encoder = {{cookiecutter.camelcase_modelname}}Encoder(config, self.shared) self.decoder = {{cookiecutter.camelcase_modelname}}Decoder(config, self.shared) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.shared @@ -2755,7 +2765,8 @@ def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config): self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_encoder(self): return self.model.get_encoder() @@ -3170,7 +3181,8 @@ def __init__(self, config): self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - self.init_weights() + # Initialize weights and apply final processing + self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index 49027d3f7e01..05c980c64225 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -222,14 +222,6 @@ def test_gradient_checkpointing_backward_compatibility(self): config.gradient_checkpointing = True model = model_class(config) - # Model does not have gradient checkpointing activated yet, it will be done at the first forward. - self.assertFalse(model.is_gradient_checkpointing) - - model.to(torch_device) - inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) - _ = model(**inputs) - - # Model has gradient checkpointing activated after the first forward. self.assertTrue(model.is_gradient_checkpointing) def test_gradient_checkpointing_enable_disable(self):