Skip to content

Commit

Permalink
Update tests
Browse files Browse the repository at this point in the history
  • Loading branch information
carmocca committed Jan 30, 2023
1 parent dba6fa6 commit b27175e
Show file tree
Hide file tree
Showing 49 changed files with 149 additions and 641 deletions.
9 changes: 0 additions & 9 deletions tests/tests_pytorch/accelerators/test_ipu.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,15 +47,6 @@ def validation_step(self, batch, batch_idx):
def test_step(self, batch, batch_idx):
return self.step(batch)

def training_epoch_end(self, outputs) -> None:
pass

def validation_epoch_end(self, outputs) -> None:
pass

def test_epoch_end(self, outputs) -> None:
pass


class IPUClassificationModel(ClassificationModel):
def training_step(self, batch, batch_idx):
Expand Down
1 change: 0 additions & 1 deletion tests/tests_pytorch/accelerators/test_tpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,6 @@ def on_train_end(self):
model = ManualOptimizationModel()
model_copy = deepcopy(model)
model.training_step_end = None
model.training_epoch_end = None

trainer = Trainer(
max_epochs=1,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -136,8 +136,6 @@ def predict_step(self, batch, batch_idx, dataloader_idx=None):
return

model = CustomModel()
model.validation_epoch_end = None
model.test_epoch_end = None

# check the sanity dataloaders
num_sanity_val_steps = 4
Expand Down
22 changes: 0 additions & 22 deletions tests/tests_pytorch/callbacks/test_callback_hook_outputs.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,9 +41,6 @@ def on_validation_batch_end(self, outputs, batch, batch_idx: int, dataloader_idx
def on_test_batch_end(self, outputs, batch, batch_idx: int, dataloader_idx: int) -> None:
assert "x" in outputs

def training_epoch_end(self, outputs) -> None:
assert len(outputs) == self.trainer.num_training_batches

model = TestModel()

trainer = Trainer(
Expand All @@ -59,22 +56,3 @@ def training_epoch_end(self, outputs) -> None:
assert any(isinstance(c, CB) for c in trainer.callbacks)

trainer.fit(model)


def test_free_memory_on_eval_outputs(tmpdir):
class CB(Callback):
def on_train_epoch_end(self, trainer, pl_module):
assert not trainer._evaluation_loop._outputs

model = BoringModel()

trainer = Trainer(
callbacks=CB(),
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=1,
enable_model_summary=False,
)

trainer.fit(model)
12 changes: 6 additions & 6 deletions tests/tests_pytorch/callbacks/test_early_stopping.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ def test_early_stopping_patience(tmpdir, loss_values: list, patience: int, expec
class ModelOverrideValidationReturn(BoringModel):
validation_return_values = torch.tensor(loss_values)

def validation_epoch_end(self, outputs):
def on_validation_epoch_end(self):
loss = self.validation_return_values[self.current_epoch]
self.log("test_val_loss", loss)

Expand Down Expand Up @@ -164,7 +164,7 @@ def test_early_stopping_patience_train(
class ModelOverrideTrainReturn(BoringModel):
train_return_values = torch.tensor(loss_values)

def training_epoch_end(self, outputs):
def on_train_epoch_end(self):
loss = self.train_return_values[self.current_epoch]
self.log("train_loss", loss)

Expand Down Expand Up @@ -226,7 +226,7 @@ def test_early_stopping_no_val_step(tmpdir):
)
def test_early_stopping_thresholds(tmpdir, stopping_threshold, divergence_threshold, losses, expected_epoch):
class CurrentModel(BoringModel):
def validation_epoch_end(self, outputs):
def on_validation_epoch_end(self):
val_loss = losses[self.current_epoch]
self.log("abc", val_loss)

Expand All @@ -252,7 +252,7 @@ def test_early_stopping_on_non_finite_monitor(tmpdir, stop_value):
expected_stop_epoch = 2

class CurrentModel(BoringModel):
def validation_epoch_end(self, outputs):
def on_validation_epoch_end(self):
val_loss = losses[self.current_epoch]
self.log("val_loss", val_loss)

Expand Down Expand Up @@ -352,12 +352,12 @@ def _epoch_end(self) -> None:
self.log("abc", torch.tensor(loss))
self.log("cba", torch.tensor(0))

def training_epoch_end(self, outputs):
def on_train_epoch_end(self):
if not self.early_stop_on_train:
return
self._epoch_end()

def validation_epoch_end(self, outputs):
def on_validation_epoch_end(self):
if self.early_stop_on_train:
return
self._epoch_end()
Expand Down
3 changes: 0 additions & 3 deletions tests/tests_pytorch/callbacks/test_lr_monitor.py
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,6 @@ def configure_optimizers(self):
return [optimizer1, optimizer2], [lr_scheduler1, lr_scheduler2]

model = CustomBoringModel()
model.training_epoch_end = None

lr_monitor = LearningRateMonitor(logging_interval=logging_interval)
log_every_n_steps = 2
Expand Down Expand Up @@ -306,7 +305,6 @@ def configure_optimizers(self):
return [optimizer1, optimizer2]

model = CustomBoringModel()
model.training_epoch_end = None

lr_monitor = LearningRateMonitor(logging_interval=logging_interval)
log_every_n_steps = 2
Expand Down Expand Up @@ -563,7 +561,6 @@ def finetune_function(self, pl_module, epoch: int, optimizer, opt_idx: int):
enable_checkpointing=False,
)
model = TestModel()
model.training_epoch_end = None
trainer.fit(model)

expected = [0.1, 0.1, 0.1, 0.1, 0.1]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def training_step(self, batch, batch_idx):
self.log("my_loss", batch_idx * (1 + local_rank), on_epoch=True)
return super().training_step(batch, batch_idx)

def training_epoch_end(self, outputs) -> None:
def on_train_epoch_end(self):
local_rank = int(os.getenv("LOCAL_RANK"))
if self.trainer.is_global_zero:
self.log("my_loss_2", (1 + local_rank), on_epoch=True, rank_zero_only=True)
Expand Down
14 changes: 4 additions & 10 deletions tests/tests_pytorch/checkpointing/test_model_checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,9 +59,8 @@ def training_step(self, batch, batch_idx):
self.log("early_stop_on", out["loss"])
return out

def validation_epoch_end(self, outputs):
outs = torch.stack([x["x"] for x in outputs]).mean()
self.log("val_acc", outs)
def on_validation_epoch_end(self):
self.log("val_acc", torch.tensor(1.23))


def mock_training_epoch_loop(trainer):
Expand Down Expand Up @@ -214,9 +213,8 @@ def validation_step(self, batch, batch_idx):
self.log("val_log", log_value)
return super().validation_step(batch, batch_idx)

def validation_epoch_end(self, outputs):
def on_validation_epoch_end(self):
self.val_loop_count += 1
super().validation_epoch_end(outputs)
self.scores.append(self.trainer.logged_metrics[monitor])

def configure_optimizers(self):
Expand Down Expand Up @@ -829,7 +827,7 @@ def test_checkpointing_with_nan_as_first(tmpdir, mode):
monitor += [5, 7, 8] if mode == "max" else [8, 7, 5]

class CurrentModel(LogInTwoMethods):
def validation_epoch_end(self, outputs):
def on_validation_epoch_end(self):
val_loss = monitor[self.current_epoch]
self.log("abc", val_loss)

Expand Down Expand Up @@ -863,7 +861,6 @@ def validation_step(self, batch, batch_idx):
self.log("val_loss", loss)

model = ExtendedBoringModel()
model.validation_epoch_end = None
trainer_kwargs = {
"max_epochs": 1,
"limit_train_batches": 2,
Expand Down Expand Up @@ -901,9 +898,6 @@ def validation_step(self, batch, batch_idx):
self.log("val_loss", loss)
return {"val_loss": loss}

def validation_epoch_end(self, *_):
...

def assert_trainer_init(trainer):
assert trainer.global_step == 0
assert trainer.current_epoch == 0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@ def validation_step(self, batch, batch_idx):
self.log("val_loss", loss, on_epoch=True, prog_bar=True)

model = ExtendedBoringModel()
model.validation_epoch_end = None
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
Expand Down
5 changes: 0 additions & 5 deletions tests/tests_pytorch/core/test_datamodules.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,10 +163,8 @@ def test_train_loop_only(tmpdir):

model.validation_step = None
model.validation_step_end = None
model.validation_epoch_end = None
model.test_step = None
model.test_step_end = None
model.test_epoch_end = None

trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, enable_model_summary=False)

Expand All @@ -185,7 +183,6 @@ def test_train_val_loop_only(tmpdir):

model.validation_step = None
model.validation_step_end = None
model.validation_epoch_end = None

trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, enable_model_summary=False)

Expand Down Expand Up @@ -278,10 +275,8 @@ def train_dataloader(self):

model.validation_step = None
model.validation_step_end = None
model.validation_epoch_end = None
model.test_step = None
model.test_step_end = None
model.test_epoch_end = None

trainer = Trainer(default_root_dir=tmpdir, max_epochs=3, limit_train_batches=2, reload_dataloaders_every_n_epochs=2)
trainer.fit(model, dm)
Expand Down
2 changes: 0 additions & 2 deletions tests/tests_pytorch/core/test_lightning_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,6 @@ def configure_optimizers(self):
return [optimizer_1, optimizer_2]

model = TestModel()
model.training_epoch_end = None

trainer = Trainer(max_epochs=1, default_root_dir=tmpdir, limit_train_batches=8, limit_val_batches=0)
trainer.fit(model)
Expand Down Expand Up @@ -281,7 +280,6 @@ def configure_optimizers(self):
return [optimizer_1, optimizer_2, optimizer_3]

model = TestModel()
model.training_epoch_end = None
trainer = Trainer(max_epochs=1, default_root_dir=tmpdir, limit_train_batches=8)
trainer.fit(model)

Expand Down
7 changes: 0 additions & 7 deletions tests/tests_pytorch/core/test_lightning_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,6 @@ def configure_optimizers(self):

model = TestModel()
model.training_step_end = None
model.training_epoch_end = None
trainer = Trainer(
default_root_dir=tmpdir, limit_train_batches=8, limit_val_batches=1, max_epochs=1, enable_model_summary=False
)
Expand Down Expand Up @@ -166,9 +165,6 @@ def test_lightning_optimizer_automatic_optimization_optimizer_zero_grad(tmpdir):
"""Test overriding zero_grad works in automatic_optimization."""

class TestModel(BoringModel):
def training_epoch_end(self, outputs):
...

def optimizer_zero_grad(self, epoch, batch_idx, optimizer, optimizer_idx):
if batch_idx % 2 == 0:
optimizer.zero_grad()
Expand All @@ -195,9 +191,6 @@ class TestModel(BoringModel):
def training_step(self, batch, batch_idx, optimizer_idx=None):
return super().training_step(batch, batch_idx)

def training_epoch_end(self, outputs):
...

def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, optimizer_closure, **_):
assert isinstance(optimizer_closure, Closure)
# zero_grad is called inside the closure
Expand Down
14 changes: 0 additions & 14 deletions tests/tests_pytorch/helpers/deterministic_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,9 @@ def __init__(self, weights=None):

self.training_step_called = False
self.training_step_end_called = False
self.training_epoch_end_called = False

self.validation_step_called = False
self.validation_step_end_called = False
self.validation_epoch_end_called = False

self.assert_backward = True

Expand Down Expand Up @@ -74,18 +72,6 @@ def validation_step_end(self, val_step_output):

return val_step_output

def validation_epoch_end(self, outputs):
assert len(outputs) == self.trainer.num_val_batches[0]

for i, out in enumerate(outputs):
assert out["log"]["log_acc1"] >= 12 + i

self.validation_epoch_end_called = True

result = outputs[-1]
result["val_epoch_end"] = torch.tensor(1233)
return result

# -----------------------------
# DATA
# -----------------------------
Expand Down
10 changes: 4 additions & 6 deletions tests/tests_pytorch/loggers/test_all.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,13 +97,11 @@ def training_step(self, batch, batch_idx):
self.log("train_some_val", loss)
return {"loss": loss}

def validation_epoch_end(self, outputs) -> None:
avg_val_loss = torch.stack([x["x"] for x in outputs]).mean()
self.log_dict({"early_stop_on": avg_val_loss, "val_loss": avg_val_loss**0.5})
def on_validation_epoch_end(self):
self.log_dict({"early_stop_on": torch.tensor(1), "val_loss": torch.tensor(0.5)})

def test_epoch_end(self, outputs) -> None:
avg_test_loss = torch.stack([x["y"] for x in outputs]).mean()
self.log("test_loss", avg_test_loss)
def on_test_epoch_end(self):
self.log("test_loss", torch.tensor(2))

class StoreHistoryLogger(logger_class):
def __init__(self, *args, **kwargs) -> None:
Expand Down
4 changes: 2 additions & 2 deletions tests/tests_pytorch/loggers/test_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,11 +147,11 @@ def log_metrics(self, metrics, step):
super().log_metrics(metrics, step)

class CustomModel(BoringModel):
def training_epoch_end(self, outputs):
def on_train_epoch_end(self):
self.logger.logged_step += 1
self.log_dict({"step": self.logger.logged_step, "train_acc": self.logger.logged_step / 10})

def validation_epoch_end(self, outputs):
def on_validation_epoch_end(self):
self.logger.logged_step += 1
self.log_dict({"step": self.logger.logged_step, "val_acc": self.logger.logged_step / 10})

Expand Down
2 changes: 1 addition & 1 deletion tests/tests_pytorch/loggers/test_neptune.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ def test_neptune_log_metrics_on_trained_model(self, neptune):
"""Verify that trained models do log data."""
# given
class LoggingModel(BoringModel):
def validation_epoch_end(self, outputs):
def on_validation_epoch_end(self):
self.log("some/key", 42)

# and
Expand Down
1 change: 0 additions & 1 deletion tests/tests_pytorch/loggers/test_tensorboard.py
Original file line number Diff line number Diff line change
Expand Up @@ -258,7 +258,6 @@ def training_step(self, *args):
return super().training_step(*args)

model = TestModel()
model.training_epoch_end = None
logger_0 = TensorBoardLogger(tmpdir, default_hp_metric=False)
trainer = Trainer(
default_root_dir=tmpdir,
Expand Down

0 comments on commit b27175e

Please sign in to comment.