Skip to content

Commit

Permalink
test: replacing tmpdir with tmp_path in `tests_pytorch/accelerato…
Browse files Browse the repository at this point in the history
…rs` (#19641)
  • Loading branch information
fnhirwa committed Mar 16, 2024
1 parent 178e198 commit 84c94ef
Show file tree
Hide file tree
Showing 4 changed files with 19 additions and 19 deletions.
10 changes: 5 additions & 5 deletions tests/tests_pytorch/accelerators/test_cpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def test_availability():


@RunIf(psutil=True)
def test_get_device_stats(tmpdir):
def test_get_device_stats(tmp_path):
gpu_stats = CPUAccelerator().get_device_stats(Mock())
fields = ["cpu_vm_percent", "cpu_percent", "cpu_swap_percent"]

Expand All @@ -38,7 +38,7 @@ def test_get_device_stats(tmpdir):


@pytest.mark.parametrize("restore_after_pre_setup", [True, False])
def test_restore_checkpoint_after_pre_setup(tmpdir, restore_after_pre_setup):
def test_restore_checkpoint_after_pre_setup(tmp_path, restore_after_pre_setup):
"""Test to ensure that if restore_checkpoint_after_setup is True, then we only load the state after pre- dispatch
is called."""

Expand All @@ -58,10 +58,10 @@ def load_checkpoint(self, checkpoint_path: Union[str, Path]) -> Dict[str, Any]:
return super().load_checkpoint(checkpoint_path)

model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
trainer = Trainer(default_root_dir=tmp_path, fast_dev_run=True)
trainer.fit(model)

checkpoint_path = os.path.join(tmpdir, "model.pt")
checkpoint_path = os.path.join(tmp_path, "model.pt")
trainer.save_checkpoint(checkpoint_path)

plugin = TestPlugin(
Expand All @@ -72,7 +72,7 @@ def load_checkpoint(self, checkpoint_path: Union[str, Path]) -> Dict[str, Any]:
)
assert plugin.restore_checkpoint_after_setup == restore_after_pre_setup

trainer = Trainer(default_root_dir=tmpdir, strategy=plugin, fast_dev_run=True)
trainer = Trainer(default_root_dir=tmp_path, strategy=plugin, fast_dev_run=True)
trainer.fit(model, ckpt_path=checkpoint_path)
for func in (trainer.test, trainer.validate, trainer.predict):
plugin.setup_called = False
Expand Down
4 changes: 2 additions & 2 deletions tests/tests_pytorch/accelerators/test_gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,10 @@ def test_get_nvidia_gpu_stats():

@RunIf(min_cuda_gpus=1)
@mock.patch("torch.cuda.set_device")
def test_set_cuda_device(set_device_mock, tmpdir):
def test_set_cuda_device(set_device_mock, tmp_path):
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
default_root_dir=tmp_path,
fast_dev_run=True,
accelerator="gpu",
devices=1,
Expand Down
4 changes: 2 additions & 2 deletions tests/tests_pytorch/accelerators/test_mps.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,10 +54,10 @@ def test_trainer_mps_accelerator(accelerator_value):

@RunIf(mps=True)
@pytest.mark.parametrize("devices", [1, [0], "-1"])
def test_single_gpu_model(tmpdir, devices):
def test_single_gpu_model(tmp_path, devices):
"""Make sure single GPU works."""
trainer_options = {
"default_root_dir": tmpdir,
"default_root_dir": tmp_path,
"enable_progress_bar": False,
"max_epochs": 1,
"limit_train_batches": 0.1,
Expand Down
20 changes: 10 additions & 10 deletions tests/tests_pytorch/accelerators/test_xla.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def forward(self, x):

@RunIf(tpu=True, standalone=True)
@mock.patch.dict(os.environ, os.environ.copy(), clear=True)
def test_resume_training_on_cpu(tmpdir):
def test_resume_training_on_cpu(tmp_path):
"""Checks if training can be resumed from a saved checkpoint on CPU."""
# Train a model on TPU
model = BoringModel()
Expand All @@ -72,16 +72,16 @@ def test_resume_training_on_cpu(tmpdir):
assert weight_tensor.device == torch.device("cpu")

# Verify that training is resumed on CPU
trainer = Trainer(max_epochs=1, default_root_dir=tmpdir)
trainer = Trainer(max_epochs=1, default_root_dir=tmp_path)
trainer.fit(model, ckpt_path=model_path)


@RunIf(tpu=True)
@mock.patch.dict(os.environ, os.environ.copy(), clear=True)
def test_if_test_works_after_train(tmpdir):
def test_if_test_works_after_train(tmp_path):
"""Ensure that .test() works after .fit()"""
model = BoringModel()
trainer = Trainer(max_epochs=1, accelerator="tpu", devices="auto", default_root_dir=tmpdir, fast_dev_run=True)
trainer = Trainer(max_epochs=1, accelerator="tpu", devices="auto", default_root_dir=tmp_path, fast_dev_run=True)
trainer.fit(model)
out = trainer.test(model)
assert len(out) == 1
Expand Down Expand Up @@ -159,13 +159,13 @@ def on_train_end(self):

@RunIf(tpu=True)
@mock.patch.dict(os.environ, os.environ.copy(), clear=True)
def test_manual_optimization_tpus(tmpdir):
def test_manual_optimization_tpus(tmp_path):
model = ManualOptimizationModel()
model_copy = deepcopy(model)

trainer = Trainer(
max_epochs=1,
default_root_dir=tmpdir,
default_root_dir=tmp_path,
limit_train_batches=3,
limit_test_batches=0,
limit_val_batches=0,
Expand Down Expand Up @@ -198,13 +198,13 @@ def test_strategy_choice_tpu_strategy():

@RunIf(tpu=True)
@mock.patch.dict(os.environ, os.environ.copy(), clear=True)
def test_auto_parameters_tying_tpus(tmpdir):
def test_auto_parameters_tying_tpus(tmp_path):
model = WeightSharingModule()
shared_params = find_shared_parameters(model)

assert shared_params[0] == ["layer_1.weight", "layer_3.weight"]

trainer = Trainer(default_root_dir=tmpdir, limit_train_batches=3, accelerator="tpu", devices="auto", max_epochs=1)
trainer = Trainer(default_root_dir=tmp_path, limit_train_batches=3, accelerator="tpu", devices="auto", max_epochs=1)
trainer.fit(model)

assert torch.equal(model.layer_1.weight, model.layer_3.weight)
Expand Down Expand Up @@ -236,9 +236,9 @@ def forward(self, x):

@RunIf(tpu=True)
@mock.patch.dict(os.environ, os.environ.copy(), clear=True)
def test_auto_parameters_tying_tpus_nested_module(tmpdir):
def test_auto_parameters_tying_tpus_nested_module(tmp_path):
model = NestedModule()
trainer = Trainer(default_root_dir=tmpdir, limit_train_batches=3, accelerator="tpu", devices="auto", max_epochs=1)
trainer = Trainer(default_root_dir=tmp_path, limit_train_batches=3, accelerator="tpu", devices="auto", max_epochs=1)
trainer.fit(model)

assert torch.all(torch.eq(model.net_a.layer.weight, model.net_b.layer.weight))
Expand Down

0 comments on commit 84c94ef

Please sign in to comment.