Skip to content

Commit

Permalink
Fix issue with eval results
Browse files Browse the repository at this point in the history
  • Loading branch information
pantonante committed Feb 19, 2024
1 parent 502ba5c commit b83a058
Showing 1 changed file with 3 additions and 3 deletions.
6 changes: 3 additions & 3 deletions continuous_eval/eval/manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ def save_eval_results(self, filepath: Path):
assert self._eval_results is not None, "No samples to save"
assert self._dataset is not None, "Dataset not set"
assert all(
[len(module_res) == len(self._dataset.data) for module_res in self._eval_results]
[len(module_res) == len(self._dataset.data) for module_res in self._eval_results.values()]
), "Evaluation is not complete"
with open(filepath, "w") as json_file:
json.dump(self._eval_results, json_file, indent=None)
Expand All @@ -176,7 +176,7 @@ def load_eval_results(self, filepath: Path):
with open(filepath, "r") as json_file:
self._eval_results = json.load(json_file)
assert all(
[len(module_res) == len(self._dataset.data) for module_res in self._eval_results]
[len(module_res) == len(self._dataset.data) for module_res in self._eval_results.values()]
), "Evaluation is not complete"

# Tests
Expand All @@ -187,7 +187,7 @@ def run_tests(self):
assert self._dataset is not None, "Dataset not set"
assert self._eval_results is not None, "Evaluation results not set"
assert all(
[len(module_res) == len(self._dataset.data) for module_res in self._eval_results]
[len(module_res) == len(self._dataset.data) for module_res in self._eval_results.values()]
), "Evaluation is not complete"
self._test_results = {
module.name: {test.name: test.run(self._eval_results[module.name]) for test in module.tests}
Expand Down

0 comments on commit b83a058

Please sign in to comment.