From c0b45c98e824891fc0ed46c7fda3d90012e4390a Mon Sep 17 00:00:00 2001 From: Lokkuchakreshkumar Date: Wed, 26 Nov 2025 13:59:31 +0530 Subject: [PATCH] Fix #213: Add metadata to TorchExperiment._evaluate() method The metadata dictionary in TorchExperiment._evaluate() was always empty. This fix populates it with useful training information: - num_epochs_trained: The number of epochs the model was trained for - all_metrics: All metrics collected during training This is consistent with other Hyperactive integrations (sklearn, sktime) which also return useful metadata. --- .../experiment/integrations/torch_lightning_experiment.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/hyperactive/experiment/integrations/torch_lightning_experiment.py b/src/hyperactive/experiment/integrations/torch_lightning_experiment.py index 0bdd1f80..ae4a8f86 100644 --- a/src/hyperactive/experiment/integrations/torch_lightning_experiment.py +++ b/src/hyperactive/experiment/integrations/torch_lightning_experiment.py @@ -177,7 +177,10 @@ def _evaluate(self, params): trainer.fit(model, self.datamodule) val_result = trainer.callback_metrics.get(self.objective_metric) - metadata = {} + metadata = {= { + "num_epochs_trained": trainer.current_epoch, + "all_metrics": trainer.callback_metrics, + } if val_result is None: available_metrics = list(trainer.callback_metrics.keys())