Skip to content

Commit

Permalink
Remove unused train_masked_language_model helper (#1452)
Browse files Browse the repository at this point in the history
Summary: Pull Request resolved: fairinternal/fairseq-py#1452

Test Plan: Imported from OSS

Reviewed By: lematt1991

Differential Revision: D25108462

Pulled By: myleott

fbshipit-source-id: 3c17a9937a4c3edb69f64130dfd866c5f42a4aaf
  • Loading branch information
myleott authored and facebook-github-bot committed Nov 20, 2020
1 parent dbfca6e commit aee01ff
Showing 1 changed file with 0 additions and 66 deletions.
66 changes: 0 additions & 66 deletions tests/test_binaries.py
Original file line number Diff line number Diff line change
Expand Up @@ -1673,71 +1673,5 @@ def eval_lm_main(data_dir, extra_flags=None):
eval_lm.main(eval_lm_args)


def train_masked_language_model(data_dir, arch, extra_args=()):
train_parser = options.get_training_parser()
# TODO: langs should be in and out right?
train_args = options.parse_args_and_arch(
train_parser,
[
"--task",
"cross_lingual_lm",
data_dir,
"--arch",
arch,
# Optimizer args
"--optimizer",
"adam",
"--lr-scheduler",
"reduce_lr_on_plateau",
"--lr-shrink",
"0.5",
"--lr",
"0.0001",
"--min-lr",
"1e-09",
# dropout, attention args
"--dropout",
"0.1",
"--attention-dropout",
"0.1",
# MLM args
"--criterion",
"masked_lm_loss",
"--masked-lm-only",
"--monolingual-langs",
"in,out",
"--num-segment",
"5",
# Transformer args: use a small transformer model for fast training
"--encoder-layers",
"1",
"--encoder-embed-dim",
"32",
"--encoder-attention-heads",
"1",
"--encoder-ffn-embed-dim",
"32",
# Other training args
"--max-tokens",
"500",
"--tokens-per-sample",
"500",
"--save-dir",
data_dir,
"--max-epoch",
"1",
"--no-progress-bar",
"--distributed-world-size",
"1",
"--dataset-impl",
"raw",
"--num-workers",
"0",
]
+ list(extra_args),
)
train.main(train_args)


if __name__ == "__main__":
unittest.main()

0 comments on commit aee01ff

Please sign in to comment.