Skip to content

Commit

Permalink
test: replacing tmpdir with tmp_path in tests_pytorch/plugins (#…
Browse files Browse the repository at this point in the history
  • Loading branch information
fnhirwa committed Mar 17, 2024
1 parent e65d840 commit 82f8ab9
Show file tree
Hide file tree
Showing 4 changed files with 26 additions and 26 deletions.
4 changes: 2 additions & 2 deletions tests/tests_pytorch/plugins/precision/test_amp_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,12 +33,12 @@ def configure_optimizers(self):


@RunIf(min_cuda_gpus=1)
def test_amp_fused_optimizer_parity(tmpdir):
def test_amp_fused_optimizer_parity(tmp_path):
def run(fused=False):
seed_everything(1234)
model = FusedOptimizerParityModel(fused)
trainer = Trainer(
default_root_dir=tmpdir,
default_root_dir=tmp_path,
accelerator="cuda",
devices=1,
precision="16-mixed",
Expand Down
8 changes: 4 additions & 4 deletions tests/tests_pytorch/plugins/precision/test_double.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,22 +142,22 @@ def training_step(self, batch, batch_idx):
DoublePrecisionBoringModelComplexBuffer,
],
)
def test_double_precision(tmpdir, boring_model):
def test_double_precision(tmp_path, boring_model):
model = boring_model()

trainer = Trainer(max_epochs=2, default_root_dir=tmpdir, fast_dev_run=2, precision="64-true", log_every_n_steps=1)
trainer = Trainer(max_epochs=2, default_root_dir=tmp_path, fast_dev_run=2, precision="64-true", log_every_n_steps=1)
trainer.fit(model)
trainer.test(model)
trainer.predict(model)


@RunIf(min_cuda_gpus=2)
def test_double_precision_ddp(tmpdir):
def test_double_precision_ddp(tmp_path):
model = DoublePrecisionBoringModel()

trainer = Trainer(
max_epochs=1,
default_root_dir=tmpdir,
default_root_dir=tmp_path,
strategy="ddp_spawn",
accelerator="gpu",
devices=2,
Expand Down
8 changes: 4 additions & 4 deletions tests/tests_pytorch/plugins/test_amp_plugins.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,12 +130,12 @@ def configure_optimizers(self):

@RunIf(min_cuda_gpus=2)
@pytest.mark.parametrize("accum", [1, 2])
def test_amp_gradient_unscale(tmpdir, accum: int):
def test_amp_gradient_unscale(tmp_path, accum: int):
model = TestPrecisionModel()

trainer = Trainer(
max_epochs=2,
default_root_dir=tmpdir,
default_root_dir=tmp_path,
limit_train_batches=2,
limit_val_batches=0,
strategy="ddp_spawn",
Expand All @@ -153,7 +153,7 @@ def test_amp_gradient_unscale(tmpdir, accum: int):


@RunIf(min_cuda_gpus=1)
def test_amp_skip_optimizer(tmpdir):
def test_amp_skip_optimizer(tmp_path):
"""Test that optimizers can be skipped when using amp."""

class CustomBoringModel(BoringModel):
Expand Down Expand Up @@ -183,7 +183,7 @@ def configure_optimizers(self):
torch.optim.SGD(self.layer2.parameters(), lr=0.1),
]

trainer = Trainer(default_root_dir=tmpdir, accelerator="gpu", devices=1, fast_dev_run=1, precision="16-mixed")
trainer = Trainer(default_root_dir=tmp_path, accelerator="gpu", devices=1, fast_dev_run=1, precision="16-mixed")
model = CustomBoringModel()
trainer.fit(model)

Expand Down
32 changes: 16 additions & 16 deletions tests/tests_pytorch/plugins/test_checkpoint_io_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,16 +37,16 @@ def remove_checkpoint(self, path: _PATH) -> None:
os.remove(path)


def test_checkpoint_plugin_called(tmpdir):
def test_checkpoint_plugin_called(tmp_path):
"""Ensure that the custom checkpoint IO plugin and torch checkpoint IO plugin is called when saving/loading."""
checkpoint_plugin = CustomCheckpointIO()
checkpoint_plugin = MagicMock(wraps=checkpoint_plugin, spec=CustomCheckpointIO)

ck = ModelCheckpoint(dirpath=tmpdir, save_last=True)
ck = ModelCheckpoint(dirpath=tmp_path, save_last=True)

model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
default_root_dir=tmp_path,
accelerator="cpu",
strategy=SingleDeviceStrategy("cpu", checkpoint_io=checkpoint_plugin),
callbacks=ck,
Expand All @@ -57,22 +57,22 @@ def test_checkpoint_plugin_called(tmpdir):
)
trainer.fit(model)

ckpt_files = {fn.name for fn in Path(tmpdir).glob("*.ckpt")}
ckpt_files = {fn.name for fn in Path(tmp_path).glob("*.ckpt")}
assert ckpt_files == {"epoch=1-step=2.ckpt", "last.ckpt"}
assert trainer.checkpoint_callback.best_model_path == tmpdir / "epoch=1-step=2.ckpt"
assert trainer.checkpoint_callback.last_model_path == tmpdir / "last.ckpt"
assert trainer.checkpoint_callback.best_model_path == str(tmp_path / "epoch=1-step=2.ckpt")
assert trainer.checkpoint_callback.last_model_path == str(tmp_path / "last.ckpt")
assert checkpoint_plugin.save_checkpoint.call_count == 4
assert checkpoint_plugin.remove_checkpoint.call_count == 1

trainer.test(model, ckpt_path=ck.last_model_path)
checkpoint_plugin.load_checkpoint.assert_called_with(tmpdir / "last.ckpt")
checkpoint_plugin.load_checkpoint.assert_called_with(str(tmp_path / "last.ckpt"))

checkpoint_plugin.reset_mock()
ck = ModelCheckpoint(dirpath=tmpdir, save_last=True)
ck = ModelCheckpoint(dirpath=tmp_path, save_last=True)

model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
default_root_dir=tmp_path,
accelerator="cpu",
strategy=SingleDeviceStrategy("cpu"),
plugins=[checkpoint_plugin],
Expand All @@ -84,19 +84,19 @@ def test_checkpoint_plugin_called(tmpdir):
)
trainer.fit(model)

ckpt_files = {fn.name for fn in Path(tmpdir).glob("*.ckpt")}
ckpt_files = {fn.name for fn in Path(tmp_path).glob("*.ckpt")}
assert ckpt_files == {"epoch=1-step=2.ckpt", "last.ckpt", "epoch=1-step=2-v1.ckpt", "last-v1.ckpt"}
assert trainer.checkpoint_callback.best_model_path == tmpdir / "epoch=1-step=2-v1.ckpt"
assert trainer.checkpoint_callback.last_model_path == tmpdir / "last-v1.ckpt"
assert trainer.checkpoint_callback.best_model_path == str(tmp_path / "epoch=1-step=2-v1.ckpt")
assert trainer.checkpoint_callback.last_model_path == str(tmp_path / "last-v1.ckpt")
assert checkpoint_plugin.save_checkpoint.call_count == 4
assert checkpoint_plugin.remove_checkpoint.call_count == 1

trainer.test(model, ckpt_path=ck.last_model_path)
checkpoint_plugin.load_checkpoint.assert_called_once()
checkpoint_plugin.load_checkpoint.assert_called_with(tmpdir / "last-v1.ckpt")
checkpoint_plugin.load_checkpoint.assert_called_with(str(tmp_path / "last-v1.ckpt"))


def test_async_checkpoint_plugin(tmpdir):
def test_async_checkpoint_plugin(tmp_path):
"""Ensure that the custom checkpoint IO plugin and torch checkpoint IO plugin is called when async saving and
loading."""

Expand All @@ -111,11 +111,11 @@ def on_fit_start(self):
base_ckpt_io.save_checkpoint = Mock(wraps=base_ckpt_io.save_checkpoint)
base_ckpt_io.remove_checkpoint = Mock(wraps=base_ckpt_io.remove_checkpoint)

ck = ModelCheckpoint(dirpath=tmpdir, save_top_k=2, monitor="step", mode="max")
ck = ModelCheckpoint(dirpath=tmp_path, save_top_k=2, monitor="step", mode="max")

model = CustomBoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
default_root_dir=tmp_path,
plugins=[checkpoint_plugin],
callbacks=ck,
max_epochs=3,
Expand Down

0 comments on commit 82f8ab9

Please sign in to comment.