Skip to content

Commit

Permalink
test: replacing tmpdir with tmp_path in tests_pytorch/utilities (
Browse files Browse the repository at this point in the history
  • Loading branch information
fnhirwa committed Mar 17, 2024
1 parent 7cc71dc commit c66b23f
Show file tree
Hide file tree
Showing 5 changed files with 18 additions and 18 deletions.
6 changes: 3 additions & 3 deletions tests/tests_pytorch/utilities/migration/test_migration.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,8 +86,8 @@ def test_migrate_loop_current_epoch_to_progress_tracking():


@pytest.mark.parametrize("model_class", [BoringModel, ManualOptimBoringModel])
def test_migrate_loop_batches_that_stepped(tmpdir, model_class):
trainer = Trainer(max_steps=1, limit_val_batches=0, default_root_dir=tmpdir)
def test_migrate_loop_batches_that_stepped(tmp_path, model_class):
trainer = Trainer(max_steps=1, limit_val_batches=0, default_root_dir=tmp_path)
model = model_class()
trainer.fit(model)
ckpt_path = trainer.checkpoint_callback.best_model_path
Expand All @@ -103,7 +103,7 @@ def on_train_start(self) -> None:
assert self.trainer.global_step == 1
assert self.trainer.fit_loop.epoch_loop._batches_that_stepped == 1

trainer = Trainer(max_steps=2, limit_val_batches=0, default_root_dir=tmpdir)
trainer = Trainer(max_steps=2, limit_val_batches=0, default_root_dir=tmp_path)
model = TestModel()
trainer.fit(model, ckpt_path=ckpt_path)
new_loop = trainer.fit_loop.epoch_loop
Expand Down
8 changes: 4 additions & 4 deletions tests/tests_pytorch/utilities/test_all_gather_grad.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def test_all_gather_ddp_spawn():


@RunIf(min_cuda_gpus=2, skip_windows=True, standalone=True)
def test_all_gather_collection(tmpdir):
def test_all_gather_collection(tmp_path):
class TestModel(BoringModel):
on_train_epoch_end_called = False

Expand Down Expand Up @@ -87,7 +87,7 @@ def on_train_epoch_end(self):

model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
default_root_dir=tmp_path,
limit_train_batches=8,
limit_val_batches=0,
max_epochs=1,
Expand All @@ -105,7 +105,7 @@ def on_train_epoch_end(self):


@RunIf(min_cuda_gpus=2, skip_windows=True, standalone=True)
def test_all_gather_sync_grads(tmpdir):
def test_all_gather_sync_grads(tmp_path):
class TestModel(BoringModel):
training_step_called = False

Expand All @@ -118,7 +118,7 @@ def training_step(self, batch, batch_idx):

model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
default_root_dir=tmp_path,
limit_train_batches=1,
limit_val_batches=0,
max_epochs=1,
Expand Down
10 changes: 5 additions & 5 deletions tests/tests_pytorch/utilities/test_auto_restart.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def val_dataloader(self):


def _fit_model(
tmpdir, should_signal, val_check_interval, failure_on_step, failure_on_training, on_last_batch, status=None
tmp_path, should_signal, val_check_interval, failure_on_step, failure_on_training, on_last_batch, status=None
):
seed_everything(42)
model = TestAutoRestartModelUnderSignal(should_signal, failure_on_step, failure_on_training, on_last_batch)
Expand All @@ -87,13 +87,13 @@ def on_exception(self, trainer, pl_module, exception):

test_callback = MyTestCallback()
trainer = Trainer(
default_root_dir=tmpdir,
default_root_dir=tmp_path,
max_epochs=1,
limit_train_batches=4,
limit_val_batches=4,
val_check_interval=val_check_interval,
num_sanity_val_steps=0,
callbacks=[test_callback, OnExceptionCheckpoint(tmpdir)],
callbacks=[test_callback, OnExceptionCheckpoint(tmp_path)],
)
if should_signal:
with pytest.raises(SIGTERMException):
Expand All @@ -111,7 +111,7 @@ def on_exception(self, trainer, pl_module, exception):
@pytest.mark.parametrize("failure_on_training", [False, True])
@pytest.mark.parametrize("failure_on_step", [False, True])
@RunIf(skip_windows=True)
def test_auto_restart_under_signal(on_last_batch, val_check_interval, failure_on_training, failure_on_step, tmpdir):
def test_auto_restart_under_signal(on_last_batch, val_check_interval, failure_on_training, failure_on_step, tmp_path):
if failure_on_step:
if on_last_batch:
if failure_on_training:
Expand All @@ -130,4 +130,4 @@ def test_auto_restart_under_signal(on_last_batch, val_check_interval, failure_on
# `on_train_epoch_end` happens after `on_validation_epoch_end` since Lightning v1.4
status = "_FitLoop:on_advance_end" if failure_on_training else "_TrainingEpochLoop:on_advance_end"

_fit_model(tmpdir, True, val_check_interval, failure_on_step, failure_on_training, on_last_batch, status=status)
_fit_model(tmp_path, True, val_check_interval, failure_on_step, failure_on_training, on_last_batch, status=status)
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,11 @@


@RunIf(min_cuda_gpus=2, standalone=True, deepspeed=True)
def test_deepspeed_collate_checkpoint(tmpdir):
def test_deepspeed_collate_checkpoint(tmp_path):
"""Test to ensure that with DeepSpeed Stage 3 we can collate the sharded checkpoints into a single file."""
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
default_root_dir=tmp_path,
strategy=DeepSpeedStrategy(stage=3),
accelerator="gpu",
devices=2,
Expand All @@ -37,12 +37,12 @@ def test_deepspeed_collate_checkpoint(tmpdir):
enable_model_summary=False,
)
trainer.fit(model)
checkpoint_path = os.path.join(tmpdir, "model.pt")
checkpoint_path = os.path.join(tmp_path, "model.pt")
checkpoint_path = trainer.strategy.broadcast(checkpoint_path)
trainer.save_checkpoint(checkpoint_path)
if trainer.is_global_zero:
# ensure function call works
output_path = os.path.join(tmpdir, "single_model.pt")
output_path = os.path.join(tmp_path, "single_model.pt")
convert_zero_checkpoint_to_fp32_state_dict(checkpoint_path, output_path)
_assert_checkpoint_equal(model, output_path)

Expand Down
4 changes: 2 additions & 2 deletions tests/tests_pytorch/utilities/test_dtype_device_mixin.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,10 @@ def on_train_batch_start(self, trainer, model, batch, batch_idx):


@RunIf(min_cuda_gpus=2)
def test_submodules_multi_gpu_ddp_spawn(tmpdir):
def test_submodules_multi_gpu_ddp_spawn(tmp_path):
model = TopModule()
trainer = Trainer(
default_root_dir=tmpdir,
default_root_dir=tmp_path,
strategy="ddp_spawn",
accelerator="gpu",
devices=2,
Expand Down

0 comments on commit c66b23f

Please sign in to comment.