Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix run_seq2seq.py; porting trainer tests to it #10162

Merged
merged 9 commits into from
Feb 15, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
80 changes: 56 additions & 24 deletions examples/seq2seq/run_seq2seq.py
Expand Up @@ -18,6 +18,7 @@
"""
# You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments.

import json
import logging
import os
import re
Expand All @@ -38,6 +39,7 @@
DataCollatorForSeq2Seq,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
Expand All @@ -53,6 +55,11 @@
logger = logging.getLogger(__name__)


def save_json(content, path, indent=4, **json_dump_kwargs):
with open(path, "w") as f:
json.dump(content, f, indent=indent, sort_keys=True, **json_dump_kwargs)


@dataclass
class ModelArguments:
"""
Expand Down Expand Up @@ -351,8 +358,15 @@ def main():
)

# Set decoder_start_token_id
if model.config.decoder_start_token_id is None and isinstance(tokenizer, MBartTokenizer):
model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.target_lang]
if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):
assert (
data_args.target_lang is not None and data_args.source_lang is not None
), "mBart requires --target_lang and --source_lang"
if isinstance(tokenizer, MBartTokenizer):
model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.target_lang]
else:
model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.target_lang)
Comment on lines +361 to +368
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for putting that logic back. @patil-suraj @patrickvonplaten since you better than me, shouldn't this be done by set_tgt_lang_special_tokens inside mBART?

Copy link
Contributor Author

@stas00 stas00 Feb 13, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we make a priority on merging this, so that I could finish porting the other tests? And this can be dealt with separately? This PR isn't introducing anything new. Only restoring what was there in first place.

I can open an issue so that it doesn't fall between the cracks.

Thank you!

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

set_tgt_lang_special_tokens is a method on MBartTokenizer so this needs to be done outside of the model.

Also, IMO MBartTokenizerFast should also have the lang_code_to_id attribute, not sure why it was treated differently.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

OK, I decided to go ahead and port the other scripts instead of waiting for merging of the first set.

Had to make some more fixes in the script while at it.

So no rush.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Agree with @patil-suraj here -> IMO MBartTokenizerFast should have the lang_code_to_id so that we don't need an if-else here...but this is not necessarily the responsibility of this PR, so it's fine for me as it is

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe for models such as MBart that don't have an unique decoder_start_token_id, we should maybe in the future just leave config.decoder_start_token_id=None and then throw an error / warning when generating

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Setting it manually here model.config.decoder_start_token_id is the correct thing to do here, but not super pretty IMO -> it would be better to incentivize the user to either insert at init or when calling generate But because of backward comp we can't really change it now for mBART anyways...


if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")

Expand Down Expand Up @@ -448,6 +462,8 @@ def preprocess_function(examples):

if training_args.do_train:
train_dataset = datasets["train"]
if "train" not in datasets:
raise ValueError("--do_train requires a train dataset")
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
train_dataset = train_dataset.map(
Expand All @@ -460,6 +476,8 @@ def preprocess_function(examples):

if training_args.do_eval:
max_target_length = data_args.val_max_target_length
if "validation" not in datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = datasets["validation"]
if data_args.max_val_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_val_samples))
Expand All @@ -473,6 +491,8 @@ def preprocess_function(examples):

if training_args.do_predict:
max_target_length = data_args.val_max_target_length
if "test" not in datasets:
raise ValueError("--do_predict requires a test dataset")
test_dataset = datasets["test"]
if data_args.max_test_samples is not None:
test_dataset = test_dataset.select(range(data_args.max_test_samples))
Expand Down Expand Up @@ -550,6 +570,7 @@ def compute_metrics(eval_preds):
compute_metrics=compute_metrics if training_args.predict_with_generate else None,
)

all_metrics = {}
# Training
if training_args.do_train:
if last_checkpoint is not None:
Expand All @@ -561,13 +582,17 @@ def compute_metrics(eval_preds):
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload

output_train_file = os.path.join(training_args.output_dir, "train_results.txt")
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
if trainer.is_world_process_zero():
with open(output_train_file, "w") as writer:
logger.info("***** Train results *****")
for key, value in sorted(train_result.metrics.items()):
logger.info(f" {key} = {value}")
writer.write(f"{key} = {value}\n")
logger.info("***** train metrics *****")
for key in sorted(metrics.keys()):
logger.info(f" {key} = {metrics[key]}")
save_json(metrics, os.path.join(training_args.output_dir, "train_results.json"))
all_metrics.update(metrics)

# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, "trainer_state.json"))
Expand All @@ -577,16 +602,19 @@ def compute_metrics(eval_preds):
if training_args.do_eval:
logger.info("*** Evaluate ***")

results = trainer.evaluate(max_length=data_args.val_max_target_length, num_beams=data_args.num_beams)
results = {k: round(v, 4) for k, v in results.items()}
metrics = trainer.evaluate(
max_length=data_args.val_max_target_length, num_beams=data_args.num_beams, metric_key_prefix="val"
)
metrics = {k: round(v, 4) for k, v in metrics.items()}
max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset)
metrics["val_samples"] = min(max_val_samples, len(eval_dataset))

output_eval_file = os.path.join(training_args.output_dir, "eval_results_seq2seq.txt")
if trainer.is_world_process_zero():
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key, value in sorted(results.items()):
logger.info(f" {key} = {value}")
writer.write(f"{key} = {value}\n")
logger.info("***** val metrics *****")
for key in sorted(metrics.keys()):
logger.info(f" {key} = {metrics[key]}")
save_json(metrics, os.path.join(training_args.output_dir, "val_results.json"))
all_metrics.update(metrics)

if training_args.do_predict:
logger.info("*** Test ***")
Expand All @@ -597,16 +625,17 @@ def compute_metrics(eval_preds):
max_length=data_args.val_max_target_length,
num_beams=data_args.num_beams,
)
test_metrics = test_results.metrics
test_metrics["test_loss"] = round(test_metrics["test_loss"], 4)
metrics = test_results.metrics
max_test_samples = data_args.max_test_samples if data_args.max_test_samples is not None else len(test_dataset)
metrics["test_samples"] = min(max_test_samples, len(test_dataset))
metrics = {k: round(v, 4) for k, v in metrics.items()}

output_test_result_file = os.path.join(training_args.output_dir, "test_results_seq2seq.txt")
if trainer.is_world_process_zero():
with open(output_test_result_file, "w") as writer:
logger.info("***** Test results *****")
for key, value in sorted(test_metrics.items()):
logger.info(f" {key} = {value}")
writer.write(f"{key} = {value}\n")
logger.info("***** test metrics *****")
for key in sorted(metrics.keys()):
logger.info(f" {key} = {metrics[key]}")
save_json(metrics, os.path.join(training_args.output_dir, "test_results.json"))
all_metrics.update(metrics)

if training_args.predict_with_generate:
test_preds = tokenizer.batch_decode(
Expand All @@ -617,6 +646,9 @@ def compute_metrics(eval_preds):
with open(output_test_preds_file, "w") as writer:
writer.write("\n".join(test_preds))

if trainer.is_world_process_zero():
save_json(all_metrics, os.path.join(training_args.output_dir, "all_results.json"))

return results


Expand Down