Skip to content

Commit fe2b666

Browse files
authored
Fixing a small issue in trainer logging (Lightning-AI#1563)
* The epoch was being logged to metrics, which isn't read, rather than to current_metrics. * Updated the tests to account for the epoch arriving at the logger.
1 parent 7989ca8 commit fe2b666

File tree

2 files changed

+4
-4
lines changed

2 files changed

+4
-4
lines changed

pytorch_lightning/trainer/logging.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ def log_metrics(self, metrics, grad_norm_dic, step=None):
6868
step = scalar_metrics.pop("step")
6969
else:
7070
# added metrics by Lightning for convenience
71-
metrics['epoch'] = self.current_epoch
71+
scalar_metrics['epoch'] = self.current_epoch
7272
step = step if step is not None else self.global_step
7373
# log actual metrics
7474
if self.proc_rank == 0 and self.logger is not None:

tests/loggers/test_all.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -62,9 +62,9 @@ def log_metrics(self, metrics, step):
6262
trainer.test()
6363

6464
log_metric_names = [(s, sorted(m.keys())) for s, m in logger.history]
65-
assert log_metric_names == [(0, ['val_acc', 'val_loss']),
66-
(0, ['train_some_val']),
67-
(1, ['test_acc', 'test_loss'])]
65+
assert log_metric_names == [(0, ['epoch', 'val_acc', 'val_loss']),
66+
(0, ['epoch', 'train_some_val']),
67+
(1, ['epoch', 'test_acc', 'test_loss'])]
6868

6969

7070
@pytest.mark.parametrize("logger_class", [

0 commit comments

Comments
 (0)