Skip to content

Commit

Permalink
test: replacing tmpdir with tmp_path in tests_pytorch/core (#19644
Browse files Browse the repository at this point in the history
)

refactored tmpdir from the tests_pytorch/core dir
  • Loading branch information
fnhirwa committed Mar 17, 2024
1 parent ff8e30b commit da49053
Show file tree
Hide file tree
Showing 4 changed files with 52 additions and 50 deletions.
24 changes: 13 additions & 11 deletions tests/tests_pytorch/core/test_datamodules.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ def test_dm_pickle_after_init():


@RunIf(sklearn=True)
def test_train_loop_only(tmpdir):
def test_train_loop_only(tmp_path):
seed_everything(7)

dm = ClassifDataModule()
Expand All @@ -154,7 +154,7 @@ def test_train_loop_only(tmpdir):
model.validation_step = None
model.test_step = None

trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, enable_model_summary=False)
trainer = Trainer(default_root_dir=tmp_path, max_epochs=1, enable_model_summary=False)

# fit model
trainer.fit(model, datamodule=dm)
Expand All @@ -163,23 +163,23 @@ def test_train_loop_only(tmpdir):


@RunIf(sklearn=True)
def test_train_val_loop_only(tmpdir):
def test_train_val_loop_only(tmp_path):
seed_everything(7)

dm = ClassifDataModule()
model = ClassificationModel()

model.validation_step = None

trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, enable_model_summary=False)
trainer = Trainer(default_root_dir=tmp_path, max_epochs=1, enable_model_summary=False)

# fit model
trainer.fit(model, datamodule=dm)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.callback_metrics["train_loss"] < 1.1


def test_dm_checkpoint_save_and_load(tmpdir):
def test_dm_checkpoint_save_and_load(tmp_path):
class CustomBoringModel(BoringModel):
def validation_step(self, batch, batch_idx):
out = super().validation_step(batch, batch_idx)
Expand All @@ -197,12 +197,12 @@ def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
model = CustomBoringModel()

trainer = Trainer(
default_root_dir=tmpdir,
default_root_dir=tmp_path,
max_epochs=1,
limit_train_batches=2,
limit_val_batches=1,
enable_model_summary=False,
callbacks=[ModelCheckpoint(dirpath=tmpdir, monitor="early_stop_on")],
callbacks=[ModelCheckpoint(dirpath=tmp_path, monitor="early_stop_on")],
)

# fit model
Expand All @@ -219,13 +219,13 @@ def load_state_dict(self, state_dict: Dict[str, Any]) -> None:


@RunIf(sklearn=True)
def test_full_loop(tmpdir):
def test_full_loop(tmp_path):
seed_everything(7)

dm = ClassifDataModule()
model = ClassificationModel()

trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, enable_model_summary=False, deterministic="warn")
trainer = Trainer(default_root_dir=tmp_path, max_epochs=1, enable_model_summary=False, deterministic="warn")

# fit model
trainer.fit(model, dm)
Expand All @@ -243,7 +243,7 @@ def test_full_loop(tmpdir):
assert result[0]["test_acc"] > 0.57


def test_dm_reload_dataloaders_every_n_epochs(tmpdir):
def test_dm_reload_dataloaders_every_n_epochs(tmp_path):
"""Test datamodule, where trainer argument reload_dataloaders_every_n_epochs is set to a non negative integer."""

class CustomBoringDataModule(BoringDataModule):
Expand All @@ -262,7 +262,9 @@ def train_dataloader(self):
model.validation_step = None
model.test_step = None

trainer = Trainer(default_root_dir=tmpdir, max_epochs=3, limit_train_batches=2, reload_dataloaders_every_n_epochs=2)
trainer = Trainer(
default_root_dir=tmp_path, max_epochs=3, limit_train_batches=2, reload_dataloaders_every_n_epochs=2
)
trainer.fit(model, dm)


Expand Down
34 changes: 17 additions & 17 deletions tests/tests_pytorch/core/test_lightning_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,29 +75,29 @@ def test_property_local_rank():
assert model.local_rank == 123


def test_property_logger(tmpdir):
def test_property_logger(tmp_path):
"""Test that the logger in LightningModule is accessible via the Trainer."""
model = BoringModel()
assert model.logger is None

logger = TensorBoardLogger(tmpdir)
logger = TensorBoardLogger(tmp_path)
trainer = Trainer(logger=logger)
model.trainer = trainer
assert model.logger == logger


def test_property_loggers(tmpdir):
def test_property_loggers(tmp_path):
"""Test that loggers in LightningModule is accessible via the Trainer."""
model = BoringModel()
assert model.loggers == []

logger = TensorBoardLogger(tmpdir)
logger = TensorBoardLogger(tmp_path)
trainer = Trainer(logger=logger)
model.trainer = trainer
assert model.loggers == [logger]

logger0 = TensorBoardLogger(tmpdir)
logger1 = TensorBoardLogger(tmpdir)
logger0 = TensorBoardLogger(tmp_path)
logger1 = TensorBoardLogger(tmp_path)
trainer = Trainer(logger=[logger0, logger1])
model.trainer = trainer
assert model.loggers == [logger0, logger1]
Expand All @@ -120,7 +120,7 @@ def test_1_optimizer_toggle_model():
assert not model._param_requires_grad_state


def test_toggle_untoggle_2_optimizers_no_shared_parameters(tmpdir):
def test_toggle_untoggle_2_optimizers_no_shared_parameters(tmp_path):
class TestModel(BoringModel):
def __init__(self):
super().__init__()
Expand Down Expand Up @@ -178,11 +178,11 @@ def configure_optimizers(self):

model = TestModel()

trainer = Trainer(max_epochs=1, default_root_dir=tmpdir, limit_train_batches=8, limit_val_batches=0)
trainer = Trainer(max_epochs=1, default_root_dir=tmp_path, limit_train_batches=8, limit_val_batches=0)
trainer.fit(model)


def test_toggle_untoggle_3_optimizers_shared_parameters(tmpdir):
def test_toggle_untoggle_3_optimizers_shared_parameters(tmp_path):
class TestModel(BoringModel):
def __init__(self):
super().__init__()
Expand Down Expand Up @@ -279,7 +279,7 @@ def configure_optimizers(self):
return [optimizer_1, optimizer_2, optimizer_3]

model = TestModel()
trainer = Trainer(max_epochs=1, default_root_dir=tmpdir, limit_train_batches=8)
trainer = Trainer(max_epochs=1, default_root_dir=tmp_path, limit_train_batches=8)
trainer.fit(model)


Expand All @@ -290,9 +290,9 @@ def configure_optimizers(self):
pytest.param("mps", "mps:0", marks=RunIf(mps=True)),
],
)
def test_device_placement(tmpdir, accelerator, device):
def test_device_placement(tmp_path, accelerator, device):
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, accelerator=accelerator, devices=1)
trainer = Trainer(default_root_dir=tmp_path, fast_dev_run=True, accelerator=accelerator, devices=1)
trainer.fit(model)

def assert_device(device: torch.device) -> None:
Expand Down Expand Up @@ -342,7 +342,7 @@ def __init__(self, spec):
), "Expect the shards to be same after `m_1` loading `m_0`'s state dict"


def test_lightning_module_configure_gradient_clipping(tmpdir):
def test_lightning_module_configure_gradient_clipping(tmp_path):
"""Test custom gradient clipping inside `configure_gradient_clipping` hook."""

class TestModel(BoringModel):
Expand All @@ -359,7 +359,7 @@ def configure_gradient_clipping(self, optimizer, gradient_clip_val, gradient_cli

model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir, max_epochs=1, limit_train_batches=1, limit_val_batches=0, gradient_clip_val=1e-4
default_root_dir=tmp_path, max_epochs=1, limit_train_batches=1, limit_val_batches=0, gradient_clip_val=1e-4
)
trainer.fit(model)

Expand All @@ -371,7 +371,7 @@ def configure_gradient_clipping(self, optimizer, gradient_clip_val, gradient_cli
assert p.grad.max() <= model.custom_gradient_clip_val


def test_lightning_module_configure_gradient_clipping_different_argument_values(tmpdir):
def test_lightning_module_configure_gradient_clipping_different_argument_values(tmp_path):
"""Test that setting gradient clipping arguments in `Trainer` and cusotmizing gradient clipping inside
`configure_gradient_clipping` with different values raises an exception."""

Expand All @@ -383,7 +383,7 @@ def configure_gradient_clipping(self, optimizer, gradient_clip_val, gradient_cli

model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir, max_epochs=1, limit_train_batches=2, limit_val_batches=0, gradient_clip_val=1e-4
default_root_dir=tmp_path, max_epochs=1, limit_train_batches=2, limit_val_batches=0, gradient_clip_val=1e-4
)
with pytest.raises(
MisconfigurationException,
Expand All @@ -399,7 +399,7 @@ def configure_gradient_clipping(self, optimizer, gradient_clip_val, gradient_cli

model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
default_root_dir=tmp_path,
max_epochs=1,
limit_train_batches=2,
limit_val_batches=0,
Expand Down
28 changes: 14 additions & 14 deletions tests/tests_pytorch/core/test_lightning_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@


@pytest.mark.parametrize("auto", [True, False])
def test_lightning_optimizer(tmpdir, auto):
def test_lightning_optimizer(tmp_path, auto):
"""Test that optimizer are correctly wrapped by our LightningOptimizer."""

class TestModel(BoringModel):
Expand All @@ -39,23 +39,23 @@ def configure_optimizers(self):

model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir, limit_train_batches=1, limit_val_batches=1, max_epochs=1, enable_model_summary=False
default_root_dir=tmp_path, limit_train_batches=1, limit_val_batches=1, max_epochs=1, enable_model_summary=False
)
trainer.fit(model)

lightning_opt = model.optimizers()
assert str(lightning_opt) == "Lightning" + str(lightning_opt.optimizer)


def test_init_optimizers_resets_lightning_optimizers(tmpdir):
def test_init_optimizers_resets_lightning_optimizers(tmp_path):
"""Test that the Trainer resets the `lightning_optimizers` list everytime new optimizers get initialized."""

def compare_optimizers():
assert trainer.strategy._lightning_optimizers[0].optimizer is trainer.optimizers[0]

model = BoringModel()
model.lr = 0.2
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1)
trainer = Trainer(default_root_dir=tmp_path, max_epochs=1)
tuner = Tuner(trainer)

tuner.lr_find(model)
Expand All @@ -69,7 +69,7 @@ def compare_optimizers():
compare_optimizers()


def test_lightning_optimizer_manual_optimization_and_accumulated_gradients(tmpdir):
def test_lightning_optimizer_manual_optimization_and_accumulated_gradients(tmp_path):
"""Test that the user can use our LightningOptimizer.
Not recommended.
Expand Down Expand Up @@ -110,7 +110,7 @@ def configure_optimizers(self):

model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir, limit_train_batches=8, limit_val_batches=1, max_epochs=1, enable_model_summary=False
default_root_dir=tmp_path, limit_train_batches=8, limit_val_batches=1, max_epochs=1, enable_model_summary=False
)

with patch.multiple(torch.optim.SGD, zero_grad=DEFAULT, step=DEFAULT) as sgd, patch.multiple(
Expand Down Expand Up @@ -174,7 +174,7 @@ def test_state_mutation():
assert lightning_optimizer2.param_groups[0]["lr"] == 1.0


def test_lightning_optimizer_automatic_optimization_optimizer_zero_grad(tmpdir):
def test_lightning_optimizer_automatic_optimization_optimizer_zero_grad(tmp_path):
"""Test overriding zero_grad works in automatic_optimization."""

class TestModel(BoringModel):
Expand All @@ -189,15 +189,15 @@ def configure_optimizers(self):

model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir, limit_train_batches=20, limit_val_batches=1, max_epochs=1, enable_model_summary=False
default_root_dir=tmp_path, limit_train_batches=20, limit_val_batches=1, max_epochs=1, enable_model_summary=False
)

with patch("torch.optim.SGD.zero_grad") as sgd_zero_grad:
trainer.fit(model)
assert sgd_zero_grad.call_count == 10


def test_lightning_optimizer_automatic_optimization_optimizer_step(tmpdir):
def test_lightning_optimizer_automatic_optimization_optimizer_step(tmp_path):
"""Test overriding step works in automatic_optimization."""

class TestModel(BoringModel):
Expand All @@ -218,7 +218,7 @@ def configure_optimizers(self):

limit_train_batches = 8
trainer = Trainer(
default_root_dir=tmpdir,
default_root_dir=tmp_path,
limit_train_batches=limit_train_batches,
limit_val_batches=1,
max_epochs=1,
Expand All @@ -232,7 +232,7 @@ def configure_optimizers(self):
assert sgd["zero_grad"].call_count == limit_train_batches


def test_lightning_optimizer_automatic_optimization_lbfgs_zero_grad(tmpdir):
def test_lightning_optimizer_automatic_optimization_lbfgs_zero_grad(tmp_path):
"""Test zero_grad is called the same number of times as LBFGS requires for reevaluation of the loss in
automatic_optimization."""

Expand All @@ -242,7 +242,7 @@ def configure_optimizers(self):

model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir, limit_train_batches=1, limit_val_batches=1, max_epochs=1, enable_model_summary=False
default_root_dir=tmp_path, limit_train_batches=1, limit_val_batches=1, max_epochs=1, enable_model_summary=False
)

with patch("torch.optim.LBFGS.zero_grad") as zero_grad:
Expand Down Expand Up @@ -308,7 +308,7 @@ def test_lightning_optimizer_keeps_hooks():
assert len(optimizer._fwd_handles) == 1


def test_params_groups_and_state_are_accessible(tmpdir):
def test_params_groups_and_state_are_accessible(tmp_path):
class TestModel(BoringModel):
def on_train_start(self):
# Update the learning rate manually on the unwrapped optimizer
Expand Down Expand Up @@ -337,5 +337,5 @@ def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_closure, **__):
assert loss == self.__loss

model = TestModel()
trainer = Trainer(max_epochs=1, default_root_dir=tmpdir, limit_train_batches=1, limit_val_batches=0)
trainer = Trainer(max_epochs=1, default_root_dir=tmp_path, limit_train_batches=1, limit_val_batches=0)
trainer.fit(model)

0 comments on commit da49053

Please sign in to comment.