diff --git a/ignite/handlers/fbresearch_logger.py b/ignite/handlers/fbresearch_logger.py index 61ac775b5f5d..aff7ff2cb7ba 100644 --- a/ignite/handlers/fbresearch_logger.py +++ b/ignite/handlers/fbresearch_logger.py @@ -154,7 +154,7 @@ def log_every(self, engine: Engine, optimizer: Optional[torch.optim.Optimizer] = if torch.cuda.is_available(): cuda_max_mem = f"GPU Max Mem: {torch.cuda.max_memory_allocated() / MB:.0f} MB" - current_iter = engine.state.iteration % (engine.state.epoch_length + 1) + current_iter = ((engine.state.iteration - 1) % engine.state.epoch_length) + 1 iter_avg_time = self.iter_timer.value() eta_seconds = iter_avg_time * (engine.state.epoch_length - current_iter) diff --git a/tests/ignite/handlers/test_fbresearch_logger.py b/tests/ignite/handlers/test_fbresearch_logger.py index 728c97870e09..3b515baeabee 100644 --- a/tests/ignite/handlers/test_fbresearch_logger.py +++ b/tests/ignite/handlers/test_fbresearch_logger.py @@ -104,3 +104,28 @@ def test_fbrlogger_with_state_attrs(mock_logger): trainer.run(data=[10], epoch_length=1, max_epochs=1) attrs = "alpha: 3.8990 beta: 12.2100 gamma: [21.0000, 6.0000]" assert attrs in fbr.logger.info.call_args_list[-2].args[0] + + +def test_fbrlogger_iters_values_bug(mock_logger): + max_epochs = 15 + every = 10 + data_size = 20 + trainer = Engine(lambda e, b: 42) + fbr = FBResearchLogger(logger=mock_logger, show_output=True) + fbr.attach(trainer, "Training", every=every) + trainer.run(data=range(data_size), max_epochs=max_epochs) + + expected_epoch = 1 + expected_iters = [i for i in range(every, data_size + 1, every)] + n_calls_per_epoch = data_size // every + i = 0 + for call_args in fbr.logger.info.call_args_list: + msg = call_args.args[0] + if msg.startswith("Epoch"): + expected_iter = expected_iters[i] + assert f"Epoch [{expected_epoch}/{max_epochs}] [{expected_iter}/{data_size}]" in msg + if i == n_calls_per_epoch - 1: + expected_epoch += 1 + i += 1 + if i == n_calls_per_epoch: + i = 0