Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions dspy/evaluate/evaluate.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@ def __call__(
display_table: Optional[bool] = None,
return_all_scores: Optional[bool] = None,
return_outputs: Optional[bool] = None,
callback_metadata: Optional[dict[str, Any]] = None,
):
"""
Args:
Expand All @@ -113,6 +114,7 @@ def __call__(
use `self.return_all_scores`.
return_outputs (bool): Whether to return the dspy program's outputs for every data in `devset`. if not
provided, use `self.return_outputs`.
callback_metadata (dict): Metadata to be used for evaluate callback handlers.

Returns:
The evaluation results are returned in different formats based on the flags:
Expand All @@ -139,6 +141,9 @@ def __call__(
return_all_scores = return_all_scores if return_all_scores is not None else self.return_all_scores
return_outputs = return_outputs if return_outputs is not None else self.return_outputs

if callback_metadata:
logger.debug(f"Evaluate is called with callback metadata: {callback_metadata}")

tqdm.tqdm._instances.clear()

executor = ParallelExecutor(
Expand Down
5 changes: 3 additions & 2 deletions dspy/teleprompt/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,13 +49,14 @@ def eval_candidate_program(batch_size, trainset, candidate_program, evaluate, rn
try:
# Evaluate on the full trainset
if batch_size >= len(trainset):
return evaluate(candidate_program, devset=trainset, return_all_scores=return_all_scores)
return evaluate(candidate_program, devset=trainset, return_all_scores=return_all_scores, callback_metadata={"metric_key": "eval_full"})
# Or evaluate on a minibatch
else:
return evaluate(
candidate_program,
devset=create_minibatch(trainset, batch_size, rng),
return_all_scores=return_all_scores
return_all_scores=return_all_scores,
callback_metadata={"metric_key": "eval_minibatch"}
)
except Exception:
logger.error("An exception occurred during evaluation", exc_info=True)
Expand Down
2 changes: 2 additions & 0 deletions tests/teleprompt/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ def test_eval_candidate_program_full_trainset():
evaluate.assert_called_once()
_, called_kwargs = evaluate.call_args
assert len(called_kwargs['devset']) == len(trainset)
assert called_kwargs['callback_metadata'] == {"metric_key": "eval_full"}
assert result == 0

def test_eval_candidate_program_minibatch():
Expand All @@ -36,6 +37,7 @@ def test_eval_candidate_program_minibatch():
evaluate.assert_called_once()
_, called_kwargs = evaluate.call_args
assert len(called_kwargs['devset']) == batch_size
assert called_kwargs['callback_metadata'] == {"metric_key": "eval_minibatch"}
assert result == 0

@pytest.mark.parametrize("return_all_scores", [True, False])
Expand Down
Loading