Skip to content

Commit

Permalink
test: replacing tmpdir with tmp_path in tests_pytorch/models (#…
Browse files Browse the repository at this point in the history
  • Loading branch information
fnhirwa committed Mar 17, 2024
1 parent 1ffa5cd commit ff8e30b
Show file tree
Hide file tree
Showing 9 changed files with 222 additions and 222 deletions.
18 changes: 9 additions & 9 deletions tests/tests_pytorch/models/test_amp.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,10 +57,10 @@ def _assert_autocast_enabled(self):
pytest.param("ddp_spawn", "bf16-mixed", 2, marks=RunIf(skip_windows=True)),
],
)
def test_amp_cpus(tmpdir, strategy, precision, devices):
def test_amp_cpus(tmp_path, strategy, precision, devices):
"""Make sure combinations of AMP and strategies work if supported."""
trainer = Trainer(
default_root_dir=tmpdir,
default_root_dir=tmp_path,
accelerator="cpu",
devices=devices,
strategy=strategy,
Expand All @@ -85,10 +85,10 @@ def test_amp_cpus(tmpdir, strategy, precision, devices):
@pytest.mark.parametrize(
"devices", [pytest.param(1, marks=RunIf(min_cuda_gpus=1)), pytest.param(2, marks=RunIf(min_cuda_gpus=2))]
)
def test_amp_gpus(tmpdir, precision, devices):
def test_amp_gpus(tmp_path, precision, devices):
"""Make sure combinations of AMP and strategies work if supported."""
trainer = Trainer(
default_root_dir=tmpdir,
default_root_dir=tmp_path,
max_epochs=1,
accelerator="gpu",
devices=devices,
Expand All @@ -115,20 +115,20 @@ def test_amp_gpus(tmpdir, precision, devices):
"SLURM_PROCID": "0",
},
)
def test_amp_gpu_ddp_slurm_managed(tmpdir):
def test_amp_gpu_ddp_slurm_managed(tmp_path):
"""Make sure DDP + AMP work."""
# simulate setting slurm flags
model = AMPTestModel()

# exp file to get meta
logger = tutils.get_default_logger(tmpdir)
logger = tutils.get_default_logger(tmp_path)

# exp file to get weights
checkpoint = tutils.init_checkpoint_callback(logger)

# fit model
trainer = Trainer(
default_root_dir=tmpdir,
default_root_dir=tmp_path,
max_epochs=1,
accelerator="gpu",
devices=[0],
Expand All @@ -143,11 +143,11 @@ def test_amp_gpu_ddp_slurm_managed(tmpdir):

@pytest.mark.parametrize("clip_val", [0, 10])
@mock.patch("torch.nn.utils.clip_grad_norm_")
def test_precision_16_clip_gradients(mock_clip_grad_norm, clip_val, tmpdir):
def test_precision_16_clip_gradients(mock_clip_grad_norm, clip_val, tmp_path):
"""Ensure that clip gradients is only called if the value is greater than 0."""
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
default_root_dir=tmp_path,
enable_progress_bar=False,
max_epochs=1,
devices=1,
Expand Down
50 changes: 25 additions & 25 deletions tests/tests_pytorch/models/test_cpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,24 +27,24 @@


@mock.patch("lightning.fabric.plugins.environments.slurm.SLURMEnvironment.detect", return_value=True)
def test_cpu_slurm_save_load(_, tmpdir):
def test_cpu_slurm_save_load(_, tmp_path):
"""Verify model save/load/checkpoint on CPU."""
seed_everything(42)

model = BoringModel()

# logger file to get meta
logger = tutils.get_default_logger(tmpdir)
logger = tutils.get_default_logger(tmp_path)
version = logger.version

# fit model
trainer = Trainer(
default_root_dir=tmpdir,
default_root_dir=tmp_path,
max_epochs=1,
logger=logger,
limit_train_batches=0.2,
limit_val_batches=0.2,
callbacks=[ModelCheckpoint(dirpath=tmpdir)],
callbacks=[ModelCheckpoint(dirpath=tmp_path)],
)
trainer.fit(model)
real_global_step = trainer.global_step
Expand Down Expand Up @@ -75,7 +75,7 @@ def test_cpu_slurm_save_load(_, tmpdir):
assert os.path.exists(hpc_save_path)

# new logger file to get meta
logger = tutils.get_default_logger(tmpdir, version=version)
logger = tutils.get_default_logger(tmp_path, version=version)

model = BoringModel()

Expand All @@ -92,17 +92,17 @@ def on_train_epoch_start(self, trainer, model):
model.train(mode)

trainer = Trainer(
default_root_dir=tmpdir,
default_root_dir=tmp_path,
max_epochs=1,
logger=logger,
callbacks=[_StartCallback(), ModelCheckpoint(dirpath=tmpdir)],
callbacks=[_StartCallback(), ModelCheckpoint(dirpath=tmp_path)],
)
# by calling fit again, we trigger training, loading weights from the cluster
# and our hook to predict using current model before any more weight updates
trainer.fit(model)


def test_early_stopping_cpu_model(tmpdir):
def test_early_stopping_cpu_model(tmp_path):
seed_everything(42)

class ModelTrainVal(BoringModel):
Expand All @@ -114,7 +114,7 @@ def validation_step(self, *args, **kwargs):
stopping = EarlyStopping(monitor="val_loss", min_delta=0.1)
trainer_options = {
"callbacks": [stopping],
"default_root_dir": tmpdir,
"default_root_dir": tmp_path,
"gradient_clip_val": 1.0,
"enable_progress_bar": False,
"accumulate_grad_batches": 2,
Expand All @@ -131,12 +131,12 @@ def validation_step(self, *args, **kwargs):


@RunIf(skip_windows=True, sklearn=True)
def test_multi_cpu_model_ddp(tmpdir):
def test_multi_cpu_model_ddp(tmp_path):
"""Make sure DDP works."""
seed_everything(42)

trainer_options = {
"default_root_dir": tmpdir,
"default_root_dir": tmp_path,
"enable_progress_bar": False,
"max_epochs": 1,
"limit_train_batches": 0.4,
Expand All @@ -151,7 +151,7 @@ def test_multi_cpu_model_ddp(tmpdir):
tpipes.run_model_test(trainer_options, model, data=dm)


def test_lbfgs_cpu_model(tmpdir):
def test_lbfgs_cpu_model(tmp_path):
"""Test each of the trainer options.
Testing LBFGS optimizer
Expand All @@ -167,7 +167,7 @@ def __init__(self, optimizer_name, learning_rate):
self.save_hyperparameters()

trainer_options = {
"default_root_dir": tmpdir,
"default_root_dir": tmp_path,
"max_epochs": 1,
"enable_progress_bar": False,
"limit_train_batches": 0.2,
Expand All @@ -178,12 +178,12 @@ def __init__(self, optimizer_name, learning_rate):
tpipes.run_model_test_without_loggers(trainer_options, model, min_acc=0.01)


def test_default_logger_callbacks_cpu_model(tmpdir):
def test_default_logger_callbacks_cpu_model(tmp_path):
"""Test each of the trainer options."""
seed_everything(42)

trainer_options = {
"default_root_dir": tmpdir,
"default_root_dir": tmp_path,
"max_epochs": 1,
"gradient_clip_val": 1.0,
"overfit_batches": 0.20,
Expand All @@ -200,7 +200,7 @@ def test_default_logger_callbacks_cpu_model(tmpdir):
model.unfreeze()


def test_running_test_after_fitting(tmpdir):
def test_running_test_after_fitting(tmp_path):
"""Verify test() on fitted model."""
seed_everything(42)

Expand All @@ -218,14 +218,14 @@ def test_step(self, *args, **kwargs):
model = ModelTrainValTest()

# logger file to get meta
logger = tutils.get_default_logger(tmpdir)
logger = tutils.get_default_logger(tmp_path)

# logger file to get weights
checkpoint = tutils.init_checkpoint_callback(logger)

# fit model
trainer = Trainer(
default_root_dir=tmpdir,
default_root_dir=tmp_path,
enable_progress_bar=False,
max_epochs=2,
limit_train_batches=0.4,
Expand All @@ -244,7 +244,7 @@ def test_step(self, *args, **kwargs):
tutils.assert_ok_model_acc(trainer, key="test_loss", thr=0.5)


def test_running_test_no_val(tmpdir):
def test_running_test_no_val(tmp_path):
"""Verify `test()` works on a model with no `val_dataloader`.
It performs train and test only
Expand All @@ -263,14 +263,14 @@ def test_step(self, *args, **kwargs):
model = ModelTrainTest()

# logger file to get meta
logger = tutils.get_default_logger(tmpdir)
logger = tutils.get_default_logger(tmp_path)

# logger file to get weights
checkpoint = tutils.init_checkpoint_callback(logger)

# fit model
trainer = Trainer(
default_root_dir=tmpdir,
default_root_dir=tmp_path,
enable_progress_bar=False,
max_epochs=1,
limit_train_batches=0.4,
Expand All @@ -289,11 +289,11 @@ def test_step(self, *args, **kwargs):
tutils.assert_ok_model_acc(trainer, key="test_loss")


def test_cpu_model(tmpdir):
def test_cpu_model(tmp_path):
"""Make sure model trains on CPU."""
seed_everything(42)
trainer_options = {
"default_root_dir": tmpdir,
"default_root_dir": tmp_path,
"enable_progress_bar": False,
"max_epochs": 1,
"limit_train_batches": 4,
Expand All @@ -304,11 +304,11 @@ def test_cpu_model(tmpdir):
tpipes.run_model_test(trainer_options, model)


def test_all_features_cpu_model(tmpdir):
def test_all_features_cpu_model(tmp_path):
"""Test each of the trainer options."""
seed_everything(42)
trainer_options = {
"default_root_dir": tmpdir,
"default_root_dir": tmp_path,
"gradient_clip_val": 1.0,
"overfit_batches": 0.20,
"enable_progress_bar": False,
Expand Down
8 changes: 4 additions & 4 deletions tests/tests_pytorch/models/test_gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,11 +34,11 @@


@RunIf(min_cuda_gpus=2, sklearn=True)
def test_multi_gpu_none_backend(tmpdir):
def test_multi_gpu_none_backend(tmp_path):
"""Make sure when using multiple GPUs the user can't use `accelerator = None`."""
seed_everything(42)
trainer_options = {
"default_root_dir": tmpdir,
"default_root_dir": tmp_path,
"enable_progress_bar": False,
"max_epochs": 1,
"limit_train_batches": 0.2,
Expand All @@ -55,10 +55,10 @@ def test_multi_gpu_none_backend(tmpdir):

@RunIf(min_cuda_gpus=2)
@pytest.mark.parametrize("devices", [1, [0], [1]])
def test_single_gpu_model(tmpdir, devices):
def test_single_gpu_model(tmp_path, devices):
seed_everything(42)
trainer_options = {
"default_root_dir": tmpdir,
"default_root_dir": tmp_path,
"enable_progress_bar": False,
"max_epochs": 1,
"limit_train_batches": 0.1,
Expand Down

0 comments on commit ff8e30b

Please sign in to comment.