diff --git a/examples/seq2seq/test_bash_script.py b/examples/seq2seq/test_bash_script.py index f3470113bd9665..a9cb6e3a092656 100644 --- a/examples/seq2seq/test_bash_script.py +++ b/examples/seq2seq/test_bash_script.py @@ -55,9 +55,8 @@ def test_train_mbart_cc25_enro_script(): if CUDA_AVAILABLE: gpus = 1 # torch.cuda.device_count() else: - bash_script = bash_script.replace("--fp16", "") gpus = 0 - + bash_script = bash_script.replace("--fp16", "") testargs = ( ["finetune.py"] + bash_script.split() diff --git a/examples/seq2seq/test_seq2seq_examples.py b/examples/seq2seq/test_seq2seq_examples.py index e25fb0b0e7edb3..191bbfac70fd1b 100644 --- a/examples/seq2seq/test_seq2seq_examples.py +++ b/examples/seq2seq/test_seq2seq_examples.py @@ -43,7 +43,7 @@ "student_decoder_layers": 1, "val_check_interval": 1.0, "output_dir": "", - "fp16": CUDA_AVAILABLE, + "fp16": False, # TODO(SS): set this to CUDA_AVAILABLE if ci installs apex or start using native amp "no_teacher": False, "fp16_opt_level": "O1", "gpus": 1 if CUDA_AVAILABLE else 0,