Skip to content

Commit

Permalink
test: replacing tmpdir with tmp_path in `tests_pytorch/checkpoint…
Browse files Browse the repository at this point in the history
…ing` (#19642)

* refactored tmpdir from the tests_pytorch/checkpointing dir
* fixes

---------

Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
  • Loading branch information
3 people committed Mar 16, 2024
1 parent 5325446 commit b9edd18
Show file tree
Hide file tree
Showing 5 changed files with 220 additions and 216 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
from tests_pytorch.helpers.runif import RunIf


def test_disabled_checkpointing(tmpdir):
def test_disabled_checkpointing():
# no callback
trainer = Trainer(max_epochs=3, enable_checkpointing=False)
assert not trainer.checkpoint_callbacks
Expand All @@ -34,10 +34,10 @@ def test_disabled_checkpointing(tmpdir):
@pytest.mark.parametrize(
("epochs", "val_check_interval", "expected"), [(1, 1.0, 1), (2, 1.0, 2), (1, 0.25, 4), (2, 0.3, 6)]
)
def test_default_checkpoint_freq(save_mock, tmpdir, epochs: int, val_check_interval: float, expected: int):
def test_default_checkpoint_freq(save_mock, tmp_path, epochs: int, val_check_interval: float, expected: int):
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
default_root_dir=tmp_path,
max_epochs=epochs,
enable_model_summary=False,
val_check_interval=val_check_interval,
Expand All @@ -55,7 +55,7 @@ def test_default_checkpoint_freq(save_mock, tmpdir, epochs: int, val_check_inter
("k", "epochs", "val_check_interval", "expected"), [(1, 1, 1.0, 1), (2, 2, 1.0, 2), (2, 1, 0.25, 4), (2, 2, 0.3, 6)]
)
@pytest.mark.parametrize("save_last", [False, True, "link"])
def test_top_k(save_mock, tmpdir, k, epochs, val_check_interval, expected, save_last):
def test_top_k(save_mock, tmp_path, k, epochs, val_check_interval, expected, save_last):
class TestModel(BoringModel):
def __init__(self):
super().__init__()
Expand All @@ -71,8 +71,8 @@ def training_step(self, batch, batch_idx):

model = TestModel()
trainer = Trainer(
callbacks=[callbacks.ModelCheckpoint(dirpath=tmpdir, monitor="my_loss", save_top_k=k, save_last=save_last)],
default_root_dir=tmpdir,
callbacks=[callbacks.ModelCheckpoint(dirpath=tmp_path, monitor="my_loss", save_top_k=k, save_last=save_last)],
default_root_dir=tmp_path,
max_epochs=epochs,
enable_model_summary=False,
val_check_interval=val_check_interval,
Expand All @@ -87,7 +87,7 @@ def training_step(self, batch, batch_idx):
@mock.patch("torch.save")
@RunIf(min_cuda_gpus=2, standalone=True)
@pytest.mark.parametrize(("k", "epochs", "val_check_interval", "expected"), [(1, 1, 1.0, 1), (2, 2, 0.3, 4)])
def test_top_k_ddp(save_mock, tmpdir, k, epochs, val_check_interval, expected):
def test_top_k_ddp(save_mock, tmp_path, k, epochs, val_check_interval, expected):
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
local_rank = int(os.getenv("LOCAL_RANK"))
Expand All @@ -106,8 +106,8 @@ def on_train_epoch_end(self):

model = TestModel()
trainer = Trainer(
callbacks=[callbacks.ModelCheckpoint(dirpath=tmpdir, monitor="my_loss_step", save_top_k=k, mode="max")],
default_root_dir=tmpdir,
callbacks=[callbacks.ModelCheckpoint(dirpath=tmp_path, monitor="my_loss_step", save_top_k=k, mode="max")],
default_root_dir=tmp_path,
enable_progress_bar=False,
max_epochs=epochs,
enable_model_summary=False,
Expand Down
10 changes: 5 additions & 5 deletions tests/tests_pytorch/checkpointing/test_legacy_checkpoints.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,15 +38,15 @@

@pytest.mark.parametrize("pl_version", LEGACY_BACK_COMPATIBLE_PL_VERSIONS)
@RunIf(sklearn=True)
def test_load_legacy_checkpoints(tmpdir, pl_version: str):
def test_load_legacy_checkpoints(tmp_path, pl_version: str):
PATH_LEGACY = os.path.join(LEGACY_CHECKPOINTS_PATH, pl_version)
with patch("sys.path", [PATH_LEGACY] + sys.path):
path_ckpts = sorted(glob.glob(os.path.join(PATH_LEGACY, f"*{CHECKPOINT_EXTENSION}")))
assert path_ckpts, f'No checkpoints found in folder "{PATH_LEGACY}"'
path_ckpt = path_ckpts[-1]

model = ClassificationModel.load_from_checkpoint(path_ckpt, num_features=24)
trainer = Trainer(default_root_dir=str(tmpdir))
trainer = Trainer(default_root_dir=tmp_path)
dm = ClassifDataModule(num_features=24, length=6000, batch_size=128, n_clusters_per_class=2, n_informative=8)
res = trainer.test(model, datamodule=dm)
assert res[0]["test_loss"] <= 0.85, str(res[0]["test_loss"])
Expand All @@ -67,7 +67,7 @@ def on_train_epoch_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningMo

@pytest.mark.parametrize("pl_version", LEGACY_BACK_COMPATIBLE_PL_VERSIONS)
@RunIf(sklearn=True)
def test_legacy_ckpt_threading(tmpdir, pl_version: str):
def test_legacy_ckpt_threading(pl_version: str):
PATH_LEGACY = os.path.join(LEGACY_CHECKPOINTS_PATH, pl_version)
path_ckpts = sorted(glob.glob(os.path.join(PATH_LEGACY, f"*{CHECKPOINT_EXTENSION}")))
assert path_ckpts, f'No checkpoints found in folder "{PATH_LEGACY}"'
Expand All @@ -93,7 +93,7 @@ def load_model():

@pytest.mark.parametrize("pl_version", LEGACY_BACK_COMPATIBLE_PL_VERSIONS)
@RunIf(sklearn=True)
def test_resume_legacy_checkpoints(tmpdir, pl_version: str):
def test_resume_legacy_checkpoints(tmp_path, pl_version: str):
PATH_LEGACY = os.path.join(LEGACY_CHECKPOINTS_PATH, pl_version)
with patch("sys.path", [PATH_LEGACY] + sys.path):
path_ckpts = sorted(glob.glob(os.path.join(PATH_LEGACY, f"*{CHECKPOINT_EXTENSION}")))
Expand All @@ -105,7 +105,7 @@ def test_resume_legacy_checkpoints(tmpdir, pl_version: str):
stop = LimitNbEpochs(1)

trainer = Trainer(
default_root_dir=str(tmpdir),
default_root_dir=tmp_path,
accelerator="auto",
devices=1,
precision=("16-mixed" if torch.cuda.is_available() else "32-true"),
Expand Down

0 comments on commit b9edd18

Please sign in to comment.