Skip to content

Commit

Permalink
Fix megatron p tuning notebook (#4741)
Browse files Browse the repository at this point in the history
* move plugin to plugins arg

Signed-off-by: nithinraok <nithinrao.koluguri@gmail.com>

* another instance

Signed-off-by: nithinraok <nithinrao.koluguri@gmail.com>

Signed-off-by: nithinraok <nithinrao.koluguri@gmail.com>
  • Loading branch information
nithinraok committed Aug 15, 2022
1 parent f7b7985 commit 38cfcd9
Showing 1 changed file with 7 additions and 8 deletions.
15 changes: 7 additions & 8 deletions tutorials/nlp/Multitask_Prompt_and_PTuning.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -837,9 +837,6 @@
"# for PyTorch Native AMP set precision=16\n",
"config.trainer.precision = 16 if torch.cuda.is_available() else 32\n",
"\n",
"# remove distributed training flags\n",
"config.trainer.strategy = None\n",
"\n",
"# setup cluster environment parameters\"\n",
"# use torch elastic cluster environment so `create_process_externally` is True\n",
"# the launcher is set to None. It will not try to spawn new processes.\n",
Expand All @@ -848,8 +845,9 @@
"os.environ[\"RANK\"] = '0'\n",
"os.environ[\"WORLD_SIZE\"] = '1'\n",
"\n",
"strategy = NLPDDPStrategy(find_unused_parameters=False, no_ddp_communication_hook=True), TorchElasticEnvironment()\n",
"trainer = pl.Trainer(strategy=strategy, **config.trainer)\n",
"strategy = NLPDDPStrategy(find_unused_parameters=False,no_ddp_communication_hook=True)\n",
"plugins = [TorchElasticEnvironment()]\n",
"trainer = pl.Trainer(plugins= plugins, strategy=strategy, **config.trainer)\n",
"\n",
"print(\"Trainer config - \\n\")\n",
"print(OmegaConf.to_yaml(config.trainer))"
Expand Down Expand Up @@ -1136,8 +1134,9 @@
"config.model.optim.lr = 5e-4\n",
"\n",
"# Reset the trainer\n",
"strategy = NLPDDPStrategy(find_unused_parameters=False, no_ddp_communication_hook=True), TorchElasticEnvironment()\n",
"trainer = pl.Trainer(strategy=strategy, **config.trainer)\n",
"strategy = NLPDDPStrategy(find_unused_parameters=False, no_ddp_communication_hook=True)\n",
"plugins= [TorchElasticEnvironment()]\n",
"trainer = pl.Trainer(strategy=strategy, plugins=plugins, **config.trainer)\n",
"\n",
"print(\"Trainer config - \\n\")\n",
"print(OmegaConf.to_yaml(config.trainer))"
Expand Down Expand Up @@ -1282,7 +1281,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.13"
"version": "3.9.12"
}
},
"nbformat": 4,
Expand Down

0 comments on commit 38cfcd9

Please sign in to comment.