From fb07b7fbda450799007076e5d196b517df221a46 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Tue, 11 Aug 2020 14:39:44 +0200 Subject: [PATCH 1/5] add encoder-decoder for roberta --- src/transformers/__init__.py | 1 + src/transformers/modeling_auto.py | 2 + src/transformers/modeling_bert.py | 10 +- src/transformers/modeling_roberta.py | 136 +++++++++++++- src/transformers/modeling_tf_bert.py | 4 +- tests/test_modeling_bert.py | 52 +++--- tests/test_modeling_encoder_decoder.py | 248 +++++++++++++++---------- tests/test_modeling_roberta.py | 140 ++++++++++++-- 8 files changed, 454 insertions(+), 139 deletions(-) diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 5df76248fe9f5..5c34c9fc31572 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -299,6 +299,7 @@ from .tokenization_marian import MarianTokenizer from .modeling_roberta import ( RobertaForMaskedLM, + RobertaForCausalLM, RobertaModel, RobertaForSequenceClassification, RobertaForMultipleChoice, diff --git a/src/transformers/modeling_auto.py b/src/transformers/modeling_auto.py index 5f6ad671edbea..d8881523ea49a 100644 --- a/src/transformers/modeling_auto.py +++ b/src/transformers/modeling_auto.py @@ -133,6 +133,7 @@ ) from .modeling_retribert import RetriBertModel from .modeling_roberta import ( + RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, @@ -248,6 +249,7 @@ MODEL_FOR_CAUSAL_LM_MAPPING = OrderedDict( [ + (RobertaConfig, RobertaForCausalLM), (BertConfig, BertLMHeadModel), (OpenAIGPTConfig, OpenAIGPTLMHeadModel), (GPT2Config, GPT2LMHeadModel), diff --git a/src/transformers/modeling_bert.py b/src/transformers/modeling_bert.py index a40d54f6118df..e650f21a199c6 100755 --- a/src/transformers/modeling_bert.py +++ b/src/transformers/modeling_bert.py @@ -956,7 +956,7 @@ def __init__(self, config): super().__init__(config) if not config.is_decoder: - logger.info("If you want to use `BertLMHeadModel` as a standalone, add `is_decoder=True.`") + logger.warning("If you want to use `BertLMHeadModel` as a standalone, add `is_decoder=True.`") self.bert = BertModel(config) self.cls = BertOnlyMLMHead(config) @@ -976,9 +976,9 @@ def forward( position_ids=None, head_mask=None, inputs_embeds=None, - labels=None, encoder_hidden_states=None, encoder_attention_mask=None, + labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, @@ -1061,8 +1061,8 @@ def __init__(self, config): super().__init__(config) if config.is_decoder: - logger.info( - "If you want to use `TFBertForMaskedLM` make sure `config.is_decoder=False` for " + logger.warning( + "If you want to use `BertForMaskedLM` make sure `config.is_decoder=False` for " "bi-directional self-attention." ) @@ -1089,9 +1089,9 @@ def forward( position_ids=None, head_mask=None, inputs_embeds=None, - labels=None, encoder_hidden_states=None, encoder_attention_mask=None, + labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, diff --git a/src/transformers/modeling_roberta.py b/src/transformers/modeling_roberta.py index 7779e81eceef8..202638a0be16d 100644 --- a/src/transformers/modeling_roberta.py +++ b/src/transformers/modeling_roberta.py @@ -24,9 +24,15 @@ from torch.nn import CrossEntropyLoss, MSELoss from .configuration_roberta import RobertaConfig -from .file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_callable +from .file_utils import ( + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_callable, + replace_return_docstrings, +) from .modeling_bert import BertEmbeddings, BertLayerNorm, BertModel, BertPreTrainedModel, gelu from .modeling_outputs import ( + CausalLMOutput, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, @@ -139,6 +145,14 @@ def create_position_ids_from_inputs_embeds(self, inputs_embeds): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention + if the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask + is used in the cross-attention if the model is configured as a decoder. + Mask values selected in ``[0, 1]``: + ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. output_attentions (:obj:`bool`, `optional`, defaults to :obj:`None`): If set to ``True``, the attentions tensors of all attention layers are returned. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`, defaults to :obj:`None`): @@ -175,6 +189,116 @@ def set_input_embeddings(self, value): self.embeddings.word_embeddings = value +@add_start_docstrings( + """RoBERTa Model with a `language modeling` head on top for CLM fine-tuning. """, ROBERTA_START_DOCSTRING +) +class RobertaForCausalLM(BertPreTrainedModel): + config_class = RobertaConfig + base_model_prefix = "roberta" + + def __init__(self, config): + super().__init__(config) + + if not config.is_decoder: + logger.warning("If you want to use `RobertaLMHeadModel` as a standalone, add `is_decoder=True.`") + + self.roberta = RobertaModel(config) + self.lm_head = RobertaLMHead(config) + + self.init_weights() + + def get_output_embeddings(self): + return self.lm_head.decoder + + @add_start_docstrings_to_callable(ROBERTA_INPUTS_DOCSTRING.format("(batch_size, sequence_length)")) + @replace_return_docstrings(output_type=CausalLMOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + **kwargs + ): + r""" + labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): + Labels for computing the left-to-right language modeling loss (next word prediction). + Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) + Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels + in ``[0, ..., config.vocab_size]`` + kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`): + Used to hide legacy arguments that have been deprecated. + + Returns: + + Example:: + + >>> from transformers import RobertaTokenizer, RobertaLMHeadModel, RobertaConfig + >>> import torch + + >>> tokenizer = RobertaTokenizer.from_pretrained('roberta-base') + >>> config = RobertaConfig.from_pretrained("roberta-base") + >>> config.is_decoder = True + >>> model = RobertaLMHeadModel.from_pretrained('roberta-base', config=config, return_dict=True) + + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + >>> outputs = model(**inputs) + + >>> prediction_logits = outputs.logits + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.roberta( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + prediction_scores = self.lm_head(sequence_output) + + lm_loss = None + if labels is not None: + # we are doing next-token prediction; shift prediction scores and input ids by one + shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() + labels = labels[:, 1:].contiguous() + loss_fct = CrossEntropyLoss() + lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ((lm_loss,) + output) if lm_loss is not None else output + + return CausalLMOutput( + loss=lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, + ) + + def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs): + input_shape = input_ids.shape + + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_shape) + + return {"input_ids": input_ids, "attention_mask": attention_mask} + + @add_start_docstrings("""RoBERTa Model with a `language modeling` head on top. """, ROBERTA_START_DOCSTRING) class RobertaForMaskedLM(BertPreTrainedModel): config_class = RobertaConfig @@ -183,6 +307,12 @@ class RobertaForMaskedLM(BertPreTrainedModel): def __init__(self, config): super().__init__(config) + if config.is_decoder: + logger.warning( + "If you want to use `RobertaForMaskedLM` make sure `config.is_decoder=False` for " + "bi-directional self-attention." + ) + self.roberta = RobertaModel(config) self.lm_head = RobertaLMHead(config) @@ -206,6 +336,8 @@ def forward( position_ids=None, head_mask=None, inputs_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, labels=None, output_attentions=None, output_hidden_states=None, @@ -237,6 +369,8 @@ def forward( position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, diff --git a/src/transformers/modeling_tf_bert.py b/src/transformers/modeling_tf_bert.py index 532997e0975d0..a66724a371967 100644 --- a/src/transformers/modeling_tf_bert.py +++ b/src/transformers/modeling_tf_bert.py @@ -862,7 +862,7 @@ def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) if config.is_decoder: - logger.info( + logger.warning( "If you want to use `TFBertForMaskedLM` make sure `config.is_decoder=False` for " "bi-directional self-attention." ) @@ -941,7 +941,7 @@ def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) if not config.is_decoder: - logger.info("If you want to use `TFBertLMHeadModel` as a standalone, add `is_decoder=True.`") + logger.warning("If you want to use `TFBertLMHeadModel` as a standalone, add `is_decoder=True.`") self.bert = TFBertMainLayer(config, name="bert") self.mlm = TFBertMLMHead(config, self.bert.embeddings, name="mlm___cls") diff --git a/tests/test_modeling_bert.py b/tests/test_modeling_bert.py index 87382337d54cd..200b5676668e1 100644 --- a/tests/test_modeling_bert.py +++ b/tests/test_modeling_bert.py @@ -152,7 +152,7 @@ def prepare_config_and_inputs_for_decoder(self): encoder_attention_mask, ) - def create_and_check_bert_model( + def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = BertModel(config=config) @@ -164,7 +164,7 @@ def create_and_check_bert_model( self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) - def create_and_check_bert_model_as_decoder( + def create_and_check_model_as_decoder( self, config, input_ids, @@ -197,7 +197,7 @@ def create_and_check_bert_model_as_decoder( self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) - def create_and_check_bert_for_causal_lm( + def create_and_check_for_causal_lm( self, config, input_ids, @@ -215,7 +215,7 @@ def create_and_check_bert_for_causal_lm( result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) - def create_and_check_bert_for_masked_lm( + def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = BertForMaskedLM(config=config) @@ -224,7 +224,7 @@ def create_and_check_bert_for_masked_lm( result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) - def create_and_check_bert_model_for_causal_lm_as_decoder( + def create_and_check_model_for_causal_lm_as_decoder( self, config, input_ids, @@ -257,7 +257,7 @@ def create_and_check_bert_model_for_causal_lm_as_decoder( ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) - def create_and_check_bert_for_next_sequence_prediction( + def create_and_check_for_next_sequence_prediction( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = BertForNextSentencePrediction(config=config) @@ -268,7 +268,7 @@ def create_and_check_bert_for_next_sequence_prediction( ) self.parent.assertEqual(result.logits.shape, (self.batch_size, 2)) - def create_and_check_bert_for_pretraining( + def create_and_check_for_pretraining( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = BertForPreTraining(config=config) @@ -284,7 +284,7 @@ def create_and_check_bert_for_pretraining( self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2)) - def create_and_check_bert_for_question_answering( + def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = BertForQuestionAnswering(config=config) @@ -300,7 +300,7 @@ def create_and_check_bert_for_question_answering( self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) - def create_and_check_bert_for_sequence_classification( + def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels @@ -310,7 +310,7 @@ def create_and_check_bert_for_sequence_classification( result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) - def create_and_check_bert_for_token_classification( + def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels @@ -320,7 +320,7 @@ def create_and_check_bert_for_token_classification( result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) - def create_and_check_bert_for_multiple_choice( + def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices @@ -379,15 +379,15 @@ def setUp(self): def test_config(self): self.config_tester.run_common_tests() - def test_bert_model(self): + def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_bert_model(*config_and_inputs) + self.model_tester.create_and_check_model(*config_and_inputs) - def test_bert_model_as_decoder(self): + def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() - self.model_tester.create_and_check_bert_model_as_decoder(*config_and_inputs) + self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) - def test_bert_model_as_decoder_with_default_input_mask(self): + def test_model_as_decoder_with_default_input_mask(self): # This regression test was failing with PyTorch < 1.3 ( config, @@ -403,7 +403,7 @@ def test_bert_model_as_decoder_with_default_input_mask(self): input_mask = None - self.model_tester.create_and_check_bert_model_as_decoder( + self.model_tester.create_and_check_model_as_decoder( config, input_ids, token_type_ids, @@ -417,39 +417,39 @@ def test_bert_model_as_decoder_with_default_input_mask(self): def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() - self.model_tester.create_and_check_bert_for_causal_lm(*config_and_inputs) + self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_bert_for_masked_lm(*config_and_inputs) + self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_causal_lm_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() - self.model_tester.create_and_check_bert_model_for_causal_lm_as_decoder(*config_and_inputs) + self.model_tester.create_and_check_model_for_causal_lm_as_decoder(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_bert_for_multiple_choice(*config_and_inputs) + self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_next_sequence_prediction(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_bert_for_next_sequence_prediction(*config_and_inputs) + self.model_tester.create_and_check_for_next_sequence_prediction(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_bert_for_pretraining(*config_and_inputs) + self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_bert_for_question_answering(*config_and_inputs) + self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_bert_for_sequence_classification(*config_and_inputs) + self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_bert_for_token_classification(*config_and_inputs) + self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): diff --git a/tests/test_modeling_encoder_decoder.py b/tests/test_modeling_encoder_decoder.py index e62d2fb563243..5bc21ca0c40db 100644 --- a/tests/test_modeling_encoder_decoder.py +++ b/tests/test_modeling_encoder_decoder.py @@ -24,60 +24,34 @@ # for now only run module with pytest tests/test_modeling_encoder_decoder.py::EncoderDecoderModelTest from .test_modeling_bert import BertModelTester from .test_modeling_common import ids_tensor +from .test_modeling_roberta import RobertaModelTester if is_torch_available(): - from transformers import BertModel, EncoderDecoderModel, EncoderDecoderConfig - from transformers.modeling_bert import BertLMHeadModel + from transformers import ( + BertModel, + BertLMHeadModel, + RobertaModel, + RobertaForCausalLM, + EncoderDecoderModel, + EncoderDecoderConfig, + ) import numpy as np import torch @require_torch -class EncoderDecoderModelTest(unittest.TestCase): - def prepare_config_and_inputs_bert(self): - bert_model_tester = BertModelTester(self) - encoder_config_and_inputs = bert_model_tester.prepare_config_and_inputs() - decoder_config_and_inputs = bert_model_tester.prepare_config_and_inputs_for_decoder() - ( - config, - input_ids, - token_type_ids, - input_mask, - sequence_labels, - token_labels, - choice_labels, - ) = encoder_config_and_inputs - ( - decoder_config, - decoder_input_ids, - decoder_token_type_ids, - decoder_input_mask, - decoder_sequence_labels, - decoder_token_labels, - decoder_choice_labels, - encoder_hidden_states, - encoder_attention_mask, - ) = decoder_config_and_inputs +class EncoderDecoderMixin: + def get_encoder_decoder_model(self, config, decoder_config): + pass - # make sure that cross attention layers are added - decoder_config.add_cross_attention = True - return { - "config": config, - "input_ids": input_ids, - "attention_mask": input_mask, - "decoder_config": decoder_config, - "decoder_input_ids": decoder_input_ids, - "decoder_token_type_ids": decoder_token_type_ids, - "decoder_attention_mask": decoder_input_mask, - "decoder_sequence_labels": decoder_sequence_labels, - "decoder_token_labels": decoder_token_labels, - "decoder_choice_labels": decoder_choice_labels, - "encoder_hidden_states": encoder_hidden_states, - "labels": decoder_token_labels, - } + def prepare_config_and_inputs(self): + pass + + def get_pretrained_model(self): + pass - def create_and_check_bert_encoder_decoder_model_from_pretrained_configs( + def check_encoder_decoder_model_from_pretrained_configs( self, config, input_ids, @@ -107,7 +81,7 @@ def create_and_check_bert_encoder_decoder_model_from_pretrained_configs( self.assertEqual(outputs_encoder_decoder[0].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,))) self.assertEqual(outputs_encoder_decoder[1].shape, (input_ids.shape + (config.hidden_size,))) - def create_and_check_bert_encoder_decoder_model( + def check_encoder_decoder_model( self, config, input_ids, @@ -118,8 +92,7 @@ def create_and_check_bert_encoder_decoder_model( decoder_attention_mask, **kwargs ): - encoder_model = BertModel(config) - decoder_model = BertLMHeadModel(decoder_config) + encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = EncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) self.assertTrue(enc_dec_model.config.decoder.is_decoder) self.assertTrue(enc_dec_model.config.decoder.add_cross_attention) @@ -145,7 +118,7 @@ def create_and_check_bert_encoder_decoder_model( self.assertEqual(outputs_encoder_decoder[0].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,))) self.assertEqual(outputs_encoder_decoder[1].shape, (input_ids.shape + (config.hidden_size,))) - def create_and_check_bert_encoder_decoder_model_from_pretrained( + def check_encoder_decoder_model_from_pretrained( self, config, input_ids, @@ -156,8 +129,7 @@ def create_and_check_bert_encoder_decoder_model_from_pretrained( decoder_attention_mask, **kwargs ): - encoder_model = BertModel(config) - decoder_model = BertLMHeadModel(decoder_config) + encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model} enc_dec_model = EncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs) enc_dec_model.to(torch_device) @@ -171,7 +143,7 @@ def create_and_check_bert_encoder_decoder_model_from_pretrained( self.assertEqual(outputs_encoder_decoder[0].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,))) self.assertEqual(outputs_encoder_decoder[1].shape, (input_ids.shape + (config.hidden_size,))) - def create_and_check_save_and_load( + def check_save_and_load( self, config, input_ids, @@ -182,8 +154,7 @@ def create_and_check_save_and_load( decoder_attention_mask, **kwargs ): - encoder_model = BertModel(config) - decoder_model = BertLMHeadModel(decoder_config) + encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = EncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model.to(torch_device) enc_dec_model.eval() @@ -212,7 +183,7 @@ def create_and_check_save_and_load( max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) - def create_and_check_save_and_load_encoder_decoder_model( + def check_save_and_load_encoder_decoder_model( self, config, input_ids, @@ -223,8 +194,7 @@ def create_and_check_save_and_load_encoder_decoder_model( decoder_attention_mask, **kwargs ): - encoder_model = BertModel(config) - decoder_model = BertLMHeadModel(decoder_config) + encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = EncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model.to(torch_device) enc_dec_model.eval() @@ -257,7 +227,7 @@ def create_and_check_save_and_load_encoder_decoder_model( max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) - def create_and_check_bert_encoder_decoder_model_labels( + def check_encoder_decoder_model_labels( self, config, input_ids, @@ -269,8 +239,7 @@ def create_and_check_bert_encoder_decoder_model_labels( labels, **kwargs ): - encoder_model = BertModel(config) - decoder_model = BertLMHeadModel(decoder_config) + encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = EncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model.to(torch_device) outputs_encoder_decoder = enc_dec_model( @@ -288,9 +257,8 @@ def create_and_check_bert_encoder_decoder_model_labels( self.assertEqual(outputs_encoder_decoder[1].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,))) self.assertEqual(outputs_encoder_decoder[2].shape, (input_ids.shape + (config.hidden_size,))) - def create_and_check_bert_encoder_decoder_model_generate(self, input_ids, config, decoder_config, **kwargs): - encoder_model = BertModel(config) - decoder_model = BertLMHeadModel(decoder_config) + def check_encoder_decoder_model_generate(self, input_ids, config, decoder_config, **kwargs): + encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = EncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model.to(torch_device) @@ -300,47 +268,37 @@ def create_and_check_bert_encoder_decoder_model_generate(self, input_ids, config ) self.assertEqual(generated_output.shape, (input_ids.shape[0],) + (decoder_config.max_length,)) - def test_bert_encoder_decoder_model(self): - input_ids_dict = self.prepare_config_and_inputs_bert() - self.create_and_check_bert_encoder_decoder_model(**input_ids_dict) + def test_encoder_decoder_model(self): + input_ids_dict = self.prepare_config_and_inputs() + self.check_encoder_decoder_model(**input_ids_dict) - def test_bert_encoder_decoder_model_from_pretrained_configs(self): - input_ids_dict = self.prepare_config_and_inputs_bert() - self.create_and_check_bert_encoder_decoder_model_from_pretrained_configs(**input_ids_dict) + def test_encoder_decoder_model_from_pretrained_configs(self): + input_ids_dict = self.prepare_config_and_inputs() + self.check_encoder_decoder_model_from_pretrained_configs(**input_ids_dict) - def test_bert_encoder_decoder_model_from_pretrained(self): - input_ids_dict = self.prepare_config_and_inputs_bert() - self.create_and_check_bert_encoder_decoder_model_from_pretrained(**input_ids_dict) + def test_encoder_decoder_model_from_pretrained(self): + input_ids_dict = self.prepare_config_and_inputs() + self.check_encoder_decoder_model_from_pretrained(**input_ids_dict) def test_save_and_load_from_pretrained(self): - input_ids_dict = self.prepare_config_and_inputs_bert() - self.create_and_check_save_and_load(**input_ids_dict) + input_ids_dict = self.prepare_config_and_inputs() + self.check_save_and_load(**input_ids_dict) def test_save_and_load_from_encoder_decoder_pretrained(self): - input_ids_dict = self.prepare_config_and_inputs_bert() - self.create_and_check_save_and_load_encoder_decoder_model(**input_ids_dict) - - def test_bert_encoder_decoder_model_labels(self): - input_ids_dict = self.prepare_config_and_inputs_bert() - self.create_and_check_bert_encoder_decoder_model_labels(**input_ids_dict) + input_ids_dict = self.prepare_config_and_inputs() + self.check_save_and_load_encoder_decoder_model(**input_ids_dict) - def test_bert_encoder_decoder_model_generate(self): - input_ids_dict = self.prepare_config_and_inputs_bert() - self.create_and_check_bert_encoder_decoder_model_generate(**input_ids_dict) - - @slow - def test_real_bert_model_from_pretrained(self): - model = EncoderDecoderModel.from_encoder_decoder_pretrained("bert-base-uncased", "bert-base-uncased") - self.assertIsNotNone(model) + def test_encoder_decoder_model_labels(self): + input_ids_dict = self.prepare_config_and_inputs() + self.check_encoder_decoder_model_labels(**input_ids_dict) - @slow - def test_real_bert_model_from_pretrained_add_cross_attention(self): - model = EncoderDecoderModel.from_encoder_decoder_pretrained("bert-base-uncased", "bert-base-uncased") - self.assertTrue(hasattr(model.decoder.bert.encoder.layer[0], "crossattention")) + def test_encoder_decoder_model_generate(self): + input_ids_dict = self.prepare_config_and_inputs() + self.check_encoder_decoder_model_generate(**input_ids_dict) @slow - def test_real_bert_model_save_load_from_pretrained(self): - model_2 = EncoderDecoderModel.from_encoder_decoder_pretrained("bert-base-uncased", "bert-base-uncased") + def test_real_model_save_load_from_pretrained(self): + model_2 = self.get_pretrained_model() model_2.to(torch_device) input_ids = ids_tensor([13, 5], model_2.config.encoder.vocab_size) decoder_input_ids = ids_tensor([13, 1], model_2.config.encoder.vocab_size) @@ -362,3 +320,107 @@ def test_real_bert_model_save_load_from_pretrained(self): out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) + + +class BertEncoderDecoderModelTest(EncoderDecoderMixin, unittest.TestCase): + def get_pretrained_model(self): + return EncoderDecoderModel.from_encoder_decoder_pretrained("bert-base-cased", "bert-base-cased") + + def get_encoder_decoder_model(self, config, decoder_config): + encoder_model = BertModel(config) + decoder_model = BertLMHeadModel(decoder_config) + return encoder_model, decoder_model + + def prepare_config_and_inputs(self): + model_tester = BertModelTester(self) + encoder_config_and_inputs = model_tester.prepare_config_and_inputs() + decoder_config_and_inputs = model_tester.prepare_config_and_inputs_for_decoder() + ( + config, + input_ids, + token_type_ids, + input_mask, + sequence_labels, + token_labels, + choice_labels, + ) = encoder_config_and_inputs + ( + decoder_config, + decoder_input_ids, + decoder_token_type_ids, + decoder_input_mask, + decoder_sequence_labels, + decoder_token_labels, + decoder_choice_labels, + encoder_hidden_states, + encoder_attention_mask, + ) = decoder_config_and_inputs + + # make sure that cross attention layers are added + decoder_config.add_cross_attention = True + return { + "config": config, + "input_ids": input_ids, + "attention_mask": input_mask, + "decoder_config": decoder_config, + "decoder_input_ids": decoder_input_ids, + "decoder_token_type_ids": decoder_token_type_ids, + "decoder_attention_mask": decoder_input_mask, + "decoder_sequence_labels": decoder_sequence_labels, + "decoder_token_labels": decoder_token_labels, + "decoder_choice_labels": decoder_choice_labels, + "encoder_hidden_states": encoder_hidden_states, + "labels": decoder_token_labels, + } + + +class RoBertaEncoderDecoderModelTest(EncoderDecoderMixin, unittest.TestCase): + def get_encoder_decoder_model(self, config, decoder_config): + encoder_model = RobertaModel(config) + decoder_model = RobertaForCausalLM(decoder_config) + return encoder_model, decoder_model + + def prepare_config_and_inputs(self): + model_tester = RobertaModelTester(self) + encoder_config_and_inputs = model_tester.prepare_config_and_inputs() + decoder_config_and_inputs = model_tester.prepare_config_and_inputs_for_decoder() + ( + config, + input_ids, + token_type_ids, + input_mask, + sequence_labels, + token_labels, + choice_labels, + ) = encoder_config_and_inputs + ( + decoder_config, + decoder_input_ids, + decoder_token_type_ids, + decoder_input_mask, + decoder_sequence_labels, + decoder_token_labels, + decoder_choice_labels, + encoder_hidden_states, + encoder_attention_mask, + ) = decoder_config_and_inputs + + # make sure that cross attention layers are added + decoder_config.add_cross_attention = True + return { + "config": config, + "input_ids": input_ids, + "attention_mask": input_mask, + "decoder_config": decoder_config, + "decoder_input_ids": decoder_input_ids, + "decoder_token_type_ids": decoder_token_type_ids, + "decoder_attention_mask": decoder_input_mask, + "decoder_sequence_labels": decoder_sequence_labels, + "decoder_token_labels": decoder_token_labels, + "decoder_choice_labels": decoder_choice_labels, + "encoder_hidden_states": encoder_hidden_states, + "labels": decoder_token_labels, + } + + def get_pretrained_model(self): + return EncoderDecoderModel.from_encoder_decoder_pretrained("roberta-base", "roberta-base") diff --git a/tests/test_modeling_roberta.py b/tests/test_modeling_roberta.py index 00b0b79e540e5..ddf4695127fe0 100644 --- a/tests/test_modeling_roberta.py +++ b/tests/test_modeling_roberta.py @@ -20,7 +20,7 @@ from transformers.testing_utils import require_torch, slow, torch_device from .test_configuration_common import ConfigTester -from .test_modeling_common import ModelTesterMixin, ids_tensor +from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor if is_torch_available(): @@ -28,6 +28,7 @@ from transformers import ( RobertaConfig, RobertaModel, + RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, @@ -101,7 +102,34 @@ def prepare_config_and_inputs(self): return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels - def create_and_check_roberta_model( + def prepare_config_and_inputs_for_decoder(self): + ( + config, + input_ids, + token_type_ids, + input_mask, + sequence_labels, + token_labels, + choice_labels, + ) = self.prepare_config_and_inputs() + + config.is_decoder = True + encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) + encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) + + return ( + config, + input_ids, + token_type_ids, + input_mask, + sequence_labels, + token_labels, + choice_labels, + encoder_hidden_states, + encoder_attention_mask, + ) + + def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = RobertaModel(config=config) @@ -114,7 +142,58 @@ def create_and_check_roberta_model( self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) - def create_and_check_roberta_for_masked_lm( + def create_and_check_model_as_decoder( + self, + config, + input_ids, + token_type_ids, + input_mask, + sequence_labels, + token_labels, + choice_labels, + encoder_hidden_states, + encoder_attention_mask, + ): + config.add_cross_attention = True + model = RobertaModel(config) + model.to(torch_device) + model.eval() + result = model( + input_ids, + attention_mask=input_mask, + token_type_ids=token_type_ids, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + ) + result = model( + input_ids, + attention_mask=input_mask, + token_type_ids=token_type_ids, + encoder_hidden_states=encoder_hidden_states, + ) + result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) + self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) + self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) + + def create_and_check_for_causal_lm( + self, + config, + input_ids, + token_type_ids, + input_mask, + sequence_labels, + token_labels, + choice_labels, + encoder_hidden_states, + encoder_attention_mask, + ): + model = RobertaForCausalLM(config=config) + model.to(torch_device) + model.eval() + result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) + self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) + + def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = RobertaForMaskedLM(config=config) @@ -123,7 +202,7 @@ def create_and_check_roberta_for_masked_lm( result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) - def create_and_check_roberta_for_token_classification( + def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels @@ -133,7 +212,7 @@ def create_and_check_roberta_for_token_classification( result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) - def create_and_check_roberta_for_multiple_choice( + def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices @@ -151,7 +230,7 @@ def create_and_check_roberta_for_multiple_choice( ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) - def create_and_check_roberta_for_question_answering( + def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = RobertaForQuestionAnswering(config=config) @@ -187,6 +266,7 @@ class RobertaModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = ( ( + RobertaForCausalLM, RobertaForMaskedLM, RobertaModel, RobertaForSequenceClassification, @@ -205,25 +285,61 @@ def setUp(self): def test_config(self): self.config_tester.run_common_tests() - def test_roberta_model(self): + def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_roberta_model(*config_and_inputs) + self.model_tester.create_and_check_model(*config_and_inputs) + + def test_model_as_decoder(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() + self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) + + def test_model_as_decoder_with_default_input_mask(self): + # This regression test was failing with PyTorch < 1.3 + ( + config, + input_ids, + token_type_ids, + input_mask, + sequence_labels, + token_labels, + choice_labels, + encoder_hidden_states, + encoder_attention_mask, + ) = self.model_tester.prepare_config_and_inputs_for_decoder() + + input_mask = None + + self.model_tester.create_and_check_model_as_decoder( + config, + input_ids, + token_type_ids, + input_mask, + sequence_labels, + token_labels, + choice_labels, + encoder_hidden_states, + encoder_attention_mask, + ) + + def test_for_causal_lm(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() + self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_roberta_for_masked_lm(*config_and_inputs) + self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_roberta_for_token_classification(*config_and_inputs) + self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_roberta_for_multiple_choice(*config_and_inputs) + self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_roberta_for_question_answering(*config_and_inputs) + self.model_tester.create_and_check_for_question_answering(*config_and_inputs) @slow def test_model_from_pretrained(self): From 9cc70d6a35316ba0ebe2e8f700af35fd6a441120 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Tue, 11 Aug 2020 14:54:11 +0200 Subject: [PATCH 2/5] fix headmask --- src/transformers/modeling_encoder_decoder.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/src/transformers/modeling_encoder_decoder.py b/src/transformers/modeling_encoder_decoder.py index 6fdb961b659be..0ee5f2962b6d2 100644 --- a/src/transformers/modeling_encoder_decoder.py +++ b/src/transformers/modeling_encoder_decoder.py @@ -191,11 +191,9 @@ def forward( input_ids=None, inputs_embeds=None, attention_mask=None, - head_mask=None, encoder_outputs=None, decoder_input_ids=None, decoder_attention_mask=None, - decoder_head_mask=None, decoder_inputs_embeds=None, labels=None, **kwargs, @@ -216,10 +214,6 @@ def forward( Mask to avoid performing attention on padding token indices for the encoder. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. - head_mask: (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`): - Mask to nullify selected heads of the self-attention modules for the encoder. - Mask values selected in ``[0, 1]``: - ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`, defaults to :obj:`None`): Tuple consists of (`last_hidden_state`, `optional`: `hidden_states`, `optional`: `attentions`) `last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`) is a sequence of hidden-states at the output of the last layer of the encoder. @@ -231,10 +225,6 @@ def forward( :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details. decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, tgt_seq_len)`, `optional`, defaults to :obj:`None`): Default behavior: generate a tensor that ignores pad tokens in decoder_input_ids. Causal mask will also be used by default. - decoder_head_mask: (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`): - Mask to nullify selected heads of the self-attention modules for the decoder. - Mask values selected in ``[0, 1]``: - ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors @@ -279,7 +269,6 @@ def forward( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, - head_mask=head_mask, return_dict=False, **kwargs_encoder, ) @@ -293,7 +282,6 @@ def forward( attention_mask=decoder_attention_mask, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, - head_mask=decoder_head_mask, labels=labels, return_dict=False, **kwargs_decoder, From a6b2619b449593eb8d3100df3519826bf5a11840 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Tue, 11 Aug 2020 18:16:53 +0200 Subject: [PATCH 3/5] apply Sylvains suggestions --- docs/source/model_doc/roberta.rst | 7 ++++++ src/transformers/modeling_bert.py | 33 +++++++++++++++------------- src/transformers/modeling_roberta.py | 17 +++++++------- 3 files changed, 33 insertions(+), 24 deletions(-) diff --git a/docs/source/model_doc/roberta.rst b/docs/source/model_doc/roberta.rst index fae0d91a29269..ac83dde4fc5b7 100644 --- a/docs/source/model_doc/roberta.rst +++ b/docs/source/model_doc/roberta.rst @@ -63,6 +63,13 @@ RobertaModel :members: +RobertaForCausalLM +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: transformers.RobertaForCausalLM + :members: + + RobertaForMaskedLM ~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/src/transformers/modeling_bert.py b/src/transformers/modeling_bert.py index e650f21a199c6..3fb13b316ba81 100755 --- a/src/transformers/modeling_bert.py +++ b/src/transformers/modeling_bert.py @@ -683,14 +683,6 @@ class BertForPreTrainingOutput(ModelOutput): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. - encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): - Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention - if the model is configured as a decoder. - encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): - Mask to avoid performing attention on the padding token indices of the encoder input. This mask - is used in the cross-attention if the model is configured as a decoder. - Mask values selected in ``[0, 1]``: - ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. output_attentions (:obj:`bool`, `optional`, defaults to :obj:`None`): If set to ``True``, the attentions tensors of all attention layers are returned. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`, defaults to :obj:`None`): @@ -769,10 +761,16 @@ def forward( output_hidden_states=None, return_dict=None, ): - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) + r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention + if the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask + is used in the cross-attention if the model is configured as a decoder. + Mask values selected in ``[0, 1]``: + ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. + """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: @@ -982,16 +980,21 @@ def forward( output_attentions=None, output_hidden_states=None, return_dict=None, - **kwargs ): r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention + if the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask + is used in the cross-attention if the model is configured as a decoder. + Mask values selected in ``[0, 1]``: + ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` - kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`): - Used to hide legacy arguments that have been deprecated. Returns: diff --git a/src/transformers/modeling_roberta.py b/src/transformers/modeling_roberta.py index 202638a0be16d..a109b2c41da44 100644 --- a/src/transformers/modeling_roberta.py +++ b/src/transformers/modeling_roberta.py @@ -145,14 +145,6 @@ def create_position_ids_from_inputs_embeds(self, inputs_embeds): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. - encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): - Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention - if the model is configured as a decoder. - encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): - Mask to avoid performing attention on the padding token indices of the encoder input. This mask - is used in the cross-attention if the model is configured as a decoder. - Mask values selected in ``[0, 1]``: - ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. output_attentions (:obj:`bool`, `optional`, defaults to :obj:`None`): If set to ``True``, the attentions tensors of all attention layers are returned. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`, defaults to :obj:`None`): @@ -226,9 +218,16 @@ def forward( output_attentions=None, output_hidden_states=None, return_dict=None, - **kwargs ): r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention + if the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask + is used in the cross-attention if the model is configured as a decoder. + Mask values selected in ``[0, 1]``: + ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) From 756f8b00b42aefc6ee5fb55d261a22207149d007 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Tue, 11 Aug 2020 18:45:03 +0200 Subject: [PATCH 4/5] fix typo --- src/transformers/modeling_bert.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/transformers/modeling_bert.py b/src/transformers/modeling_bert.py index 3fb13b316ba81..0c956f82bb592 100755 --- a/src/transformers/modeling_bert.py +++ b/src/transformers/modeling_bert.py @@ -771,6 +771,10 @@ def forward( Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: From ed8414a38801f97cfb5d7de73f243a67ece6bcc7 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Wed, 12 Aug 2020 18:04:14 +0200 Subject: [PATCH 5/5] Apply suggestions from code review --- src/transformers/modeling_roberta.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/transformers/modeling_roberta.py b/src/transformers/modeling_roberta.py index a109b2c41da44..59030ab28b372 100644 --- a/src/transformers/modeling_roberta.py +++ b/src/transformers/modeling_roberta.py @@ -233,8 +233,6 @@ def forward( Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` - kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`): - Used to hide legacy arguments that have been deprecated. Returns: