Skip to content

Commit

Permalink
test: replacing tmpdir with tmp_path in tests_pytorch/loops (#1…
Browse files Browse the repository at this point in the history
…9646)

refactored tmpdir from the tets_pytorch/loops dir
  • Loading branch information
fnhirwa committed Mar 17, 2024
1 parent ac259c6 commit dd6d689
Show file tree
Hide file tree
Showing 13 changed files with 90 additions and 90 deletions.
10 changes: 5 additions & 5 deletions tests/tests_pytorch/loops/optimization/test_closure.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,14 @@
from lightning.pytorch.utilities.exceptions import MisconfigurationException


def test_optimizer_step_no_closure_raises(tmpdir):
def test_optimizer_step_no_closure_raises(tmp_path):
class TestModel(BoringModel):
def optimizer_step(self, epoch=None, batch_idx=None, optimizer=None, optimizer_closure=None, **_):
# does not call `optimizer_closure()`
pass

model = TestModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=1)
trainer = Trainer(default_root_dir=tmp_path, fast_dev_run=1)
with pytest.raises(MisconfigurationException, match="The closure hasn't been executed"):
trainer.fit(model)

Expand All @@ -39,12 +39,12 @@ def step(self, closure=None):
return BrokenSGD(self.layer.parameters(), lr=0.1)

model = TestModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=1)
trainer = Trainer(default_root_dir=tmp_path, fast_dev_run=1)
with pytest.raises(MisconfigurationException, match="The closure hasn't been executed"):
trainer.fit(model)


def test_closure_with_no_grad_optimizer(tmpdir):
def test_closure_with_no_grad_optimizer(tmp_path):
"""Test that the closure is guaranteed to run with grad enabled.
There are certain third-party library optimizers
Expand All @@ -67,6 +67,6 @@ def training_step(self, batch, batch_idx):
def configure_optimizers(self):
return NoGradAdamW(self.parameters(), lr=0.1)

trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=1)
trainer = Trainer(default_root_dir=tmp_path, fast_dev_run=1)
model = TestModel()
trainer.fit(model)
4 changes: 2 additions & 2 deletions tests/tests_pytorch/loops/optimization/test_manual_loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def test_manual_result():
assert result.extra == asdict


def test_warning_invalid_trainstep_output(tmpdir):
def test_warning_invalid_trainstep_output(tmp_path):
class InvalidTrainStepModel(BoringModel):
def __init__(self):
super().__init__()
Expand All @@ -38,7 +38,7 @@ def training_step(self, batch, batch_idx):
return 5

model = InvalidTrainStepModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=1)
trainer = Trainer(default_root_dir=tmp_path, fast_dev_run=1)

with pytest.raises(MisconfigurationException, match="return a Tensor or have no return"):
trainer.fit(model)
4 changes: 2 additions & 2 deletions tests/tests_pytorch/loops/optimization/test_optimizer_loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,15 +70,15 @@ def __getitem__(self, key: str) -> T:
(OutputMapping({"a": 5}), "the 'loss' key needs to be present"),
],
)
def test_warning_invalid_trainstep_output(tmpdir, case):
def test_warning_invalid_trainstep_output(tmp_path, case):
output, match = case

class InvalidTrainStepModel(BoringModel):
def training_step(self, batch, batch_idx):
return output

model = InvalidTrainStepModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=1)
trainer = Trainer(default_root_dir=tmp_path, fast_dev_run=1)

with pytest.raises(MisconfigurationException, match=match):
trainer.fit(model)
4 changes: 2 additions & 2 deletions tests/tests_pytorch/loops/test_all.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,13 +87,13 @@ def on_predict_batch_end(self, outputs, batch, *_):
pytest.param("mps", marks=RunIf(mps=True)),
],
)
def test_callback_batch_on_device(tmpdir, accelerator):
def test_callback_batch_on_device(tmp_path, accelerator):
"""Test that the batch object sent to the on_*_batch_start/end hooks is on the right device."""
batch_callback = BatchHookObserverCallback()

model = BatchHookObserverModel()
trainer = Trainer(
default_root_dir=tmpdir,
default_root_dir=tmp_path,
max_steps=1,
limit_train_batches=1,
limit_val_batches=1,
Expand Down
16 changes: 8 additions & 8 deletions tests/tests_pytorch/loops/test_evaluation_loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,12 +27,12 @@


@mock.patch("lightning.pytorch.loops.evaluation_loop._EvaluationLoop._on_evaluation_epoch_end")
def test_on_evaluation_epoch_end(eval_epoch_end_mock, tmpdir):
def test_on_evaluation_epoch_end(eval_epoch_end_mock, tmp_path):
"""Tests that `on_evaluation_epoch_end` is called for `on_validation_epoch_end` and `on_test_epoch_end` hooks."""
model = BoringModel()

trainer = Trainer(
default_root_dir=tmpdir, limit_train_batches=2, limit_val_batches=2, max_epochs=2, enable_model_summary=False
default_root_dir=tmp_path, limit_train_batches=2, limit_val_batches=2, max_epochs=2, enable_model_summary=False
)

trainer.fit(model)
Expand Down Expand Up @@ -89,7 +89,7 @@ def _get_dataloader():
@mock.patch(
"lightning.pytorch.trainer.connectors.logger_connector.logger_connector._LoggerConnector.log_eval_end_metrics"
)
def test_log_epoch_metrics_before_on_evaluation_end(update_eval_epoch_metrics_mock, tmpdir):
def test_log_epoch_metrics_before_on_evaluation_end(update_eval_epoch_metrics_mock, tmp_path):
"""Test that the epoch metrics are logged before the `on_evaluation_end` hook is fired."""
order = []
update_eval_epoch_metrics_mock.side_effect = lambda _: order.append("log_epoch_metrics")
Expand All @@ -99,14 +99,14 @@ def on_validation_end(self):
order.append("on_validation_end")
super().on_validation_end()

trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=1, enable_model_summary=False, num_sanity_val_steps=0)
trainer = Trainer(default_root_dir=tmp_path, fast_dev_run=1, enable_model_summary=False, num_sanity_val_steps=0)
trainer.fit(LessBoringModel())

assert order == ["log_epoch_metrics", "on_validation_end"]


@RunIf(min_cuda_gpus=1)
def test_memory_consumption_validation(tmpdir):
def test_memory_consumption_validation(tmp_path):
"""Test that the training batch is no longer in GPU memory when running validation.
Cannot run with MPS, since there we can only measure shared memory and not dedicated, which device has how much
Expand Down Expand Up @@ -155,7 +155,7 @@ def validation_step(self, batch, batch_idx):
trainer = Trainer(
accelerator="gpu",
devices=1,
default_root_dir=tmpdir,
default_root_dir=tmp_path,
fast_dev_run=2,
enable_model_summary=False,
)
Expand Down Expand Up @@ -460,7 +460,7 @@ def test_step(self, batch, batch_idx):
assert seen == expected


def test_evaluation_loop_when_batch_idx_argument_is_not_given(tmpdir):
def test_evaluation_loop_when_batch_idx_argument_is_not_given(tmp_path):
class TestModel(BoringModel):
def __init__(self) -> None:
super().__init__()
Expand All @@ -476,7 +476,7 @@ def test_step(self, batch):
return {"y": self.step(batch)}

trainer = Trainer(
default_root_dir=tmpdir,
default_root_dir=tmp_path,
fast_dev_run=1,
logger=False,
enable_checkpointing=False,
Expand Down
8 changes: 4 additions & 4 deletions tests/tests_pytorch/loops/test_evaluation_loop_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
from tests_pytorch.helpers.deterministic_model import DeterministicModel


def test__eval_step__flow(tmpdir):
def test__eval_step__flow(tmp_path):
"""Tests that only training_step can be used."""

class TestModel(DeterministicModel):
Expand All @@ -45,7 +45,7 @@ def backward(self, loss):

model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
default_root_dir=tmp_path,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=2,
Expand All @@ -71,7 +71,7 @@ def backward(self, loss):
assert opt_closure_result.item() == 171


def test__eval_step__epoch_end__flow(tmpdir):
def test__eval_step__epoch_end__flow(tmp_path):
"""Tests that only training_step can be used."""

class TestModel(DeterministicModel):
Expand All @@ -96,7 +96,7 @@ def backward(self, loss):

model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
default_root_dir=tmp_path,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=2,
Expand Down
16 changes: 8 additions & 8 deletions tests/tests_pytorch/loops/test_fetchers.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ def measure() -> float:


@pytest.mark.parametrize("automatic_optimization", [False, True])
def test_fetching_dataloader_iter_opt(automatic_optimization, tmpdir):
def test_fetching_dataloader_iter_opt(automatic_optimization, tmp_path):
class TestModel(BoringModel):
def __init__(self, *args, automatic_optimization: bool = False, **kwargs):
super().__init__(*args, **kwargs)
Expand Down Expand Up @@ -207,7 +207,7 @@ def on_train_epoch_end(self):
assert self.count == 64

model = TestModel(automatic_optimization=automatic_optimization)
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, accelerator="cpu")
trainer = Trainer(default_root_dir=tmp_path, max_epochs=1, accelerator="cpu")
trainer.fit(model)


Expand Down Expand Up @@ -290,9 +290,9 @@ def train_dataloader(self):
return DataLoader(RandomDataset(BATCH_SIZE, DATASET_LEN))


def test_training_step_with_dataloader_iter(tmpdir) -> None:
def test_training_step_with_dataloader_iter(tmp_path) -> None:
"""A baseline functional test for `training_step` with dataloader access."""
trainer = Trainer(max_epochs=1, default_root_dir=tmpdir, accelerator="cpu")
trainer = Trainer(max_epochs=1, default_root_dir=tmp_path, accelerator="cpu")
m = AsyncBoringModel()
trainer.fit(m)
assert m.num_batches_processed == DATASET_LEN, f"Expect all {DATASET_LEN} batches to be processed."
Expand Down Expand Up @@ -406,7 +406,7 @@ def length(iterable, limit):


@pytest.mark.parametrize("trigger_stop_iteration", [False, True])
def test_stop_iteration_with_dataloader_iter(trigger_stop_iteration, tmpdir):
def test_stop_iteration_with_dataloader_iter(trigger_stop_iteration, tmp_path):
"""Verify that StopIteration properly terminates the training when this is triggered from the current
`dataloader_iter`"""
EXPECT_NUM_BATCHES_PROCESSED = 2
Expand All @@ -428,7 +428,7 @@ def train_dataloader(self):
return DataLoader(RandomDataset(BATCH_SIZE, 2 * EXPECT_NUM_BATCHES_PROCESSED))
return DataLoader(RandomDataset(BATCH_SIZE, EXPECT_NUM_BATCHES_PROCESSED))

trainer = Trainer(max_epochs=1, default_root_dir=tmpdir, accelerator="cpu")
trainer = Trainer(max_epochs=1, default_root_dir=tmp_path, accelerator="cpu")
m = TestModel(trigger_stop_iteration)
trainer.fit(m)
expected = EXPECT_NUM_BATCHES_PROCESSED
Expand All @@ -437,7 +437,7 @@ def train_dataloader(self):
assert m.num_batches_processed == expected


def test_transfer_hooks_with_unpacking(tmpdir):
def test_transfer_hooks_with_unpacking(tmp_path):
"""This test asserts the `transfer_batch` hooks are called only once per batch."""

class RandomDictDataset(RandomDataset):
Expand Down Expand Up @@ -476,7 +476,7 @@ def validation_step(self, batch, batch_idx):
x, _ = batch
return super().validation_step(x, batch_idx)

trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, num_sanity_val_steps=0)
trainer = Trainer(default_root_dir=tmp_path, max_epochs=1, num_sanity_val_steps=0)
dm = BoringDataModule()
trainer.fit(TestModel(), datamodule=dm)
assert dm.count_called_on_before_batch_transfer == 4
Expand Down
4 changes: 2 additions & 2 deletions tests/tests_pytorch/loops/test_flow_warnings.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,12 @@ def training_step(self, batch, batch_idx):
return self.step(batch[0])


def test_no_depre_without_epoch_end(tmpdir):
def test_no_depre_without_epoch_end(tmp_path):
"""Tests that only training_step can be used."""
model = TestModel()

trainer = Trainer(
default_root_dir=tmpdir,
default_root_dir=tmp_path,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=2,
Expand Down

0 comments on commit dd6d689

Please sign in to comment.