Skip to content

Commit

Permalink
fix: Fix the default value of response_column_name in EvalTask.evalua…
Browse files Browse the repository at this point in the history
…te()

PiperOrigin-RevId: 633978835
  • Loading branch information
jsondai authored and Copybara-Service committed May 15, 2024
1 parent 0feac9f commit 98f9b35
Showing 1 changed file with 7 additions and 7 deletions.
14 changes: 7 additions & 7 deletions vertexai/preview/evaluation/_eval_tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -249,7 +249,7 @@ def _evaluate_with_experiment(
model: Optional[Union[GenerativeModel, Callable[[str], str]]] = None,
prompt_template: Optional[str] = None,
experiment_run_name: Optional[str] = None,
response_column_name: str = "response",
response_column_name: Optional[str] = None,
) -> EvalResult:
"""Runs an evaluation for the EvalTask with an experiment.
Expand All @@ -264,7 +264,7 @@ def _evaluate_with_experiment(
to if an experiment is set for this EvalTask. If not provided, a random
unique experiment run name is used.
response_column_name: The column name of model response in the dataset. If
not set, default to `response`.
provided, this will override the `response_column_name` of the `EvalTask`.
Returns:
The evaluation result.
Expand All @@ -279,7 +279,7 @@ def _evaluate_with_experiment(
prompt_template=prompt_template,
content_column_name=self.content_column_name,
reference_column_name=self.reference_column_name,
response_column_name=response_column_name or self.response_column_name,
response_column_name=response_column_name,
)
try:
vertexai.preview.log_metrics(eval_result.summary_metrics)
Expand All @@ -293,7 +293,7 @@ def evaluate(
model: Optional[Union[GenerativeModel, Callable[[str], str]]] = None,
prompt_template: Optional[str] = None,
experiment_run_name: Optional[str] = None,
response_column_name: str = "response",
response_column_name: Optional[str] = None,
) -> EvalResult:
"""Runs an evaluation for the EvalTask.
Expand All @@ -308,7 +308,7 @@ def evaluate(
to if an experiment is set for this EvalTask. If not provided, a random
unique experiment run name is used.
response_column_name: The column name of model response in the dataset. If
not set, default to `response`.
provided, this will override the `response_column_name` of the `EvalTask`.
Returns:
The evaluation result.
Expand All @@ -321,7 +321,7 @@ def evaluate(
"`vertexai.init(experiment='experiment_name')`for logging this"
" evaluation run."
)

response_column_name = response_column_name or self.response_column_name
experiment_run_name = experiment_run_name or f"{uuid.uuid4()}"

if self.experiment and global_experiment_name:
Expand Down Expand Up @@ -354,7 +354,7 @@ def evaluate(
prompt_template=prompt_template,
content_column_name=self.content_column_name,
reference_column_name=self.reference_column_name,
response_column_name=response_column_name or self.response_column_name,
response_column_name=response_column_name,
)
return eval_result

Expand Down

0 comments on commit 98f9b35

Please sign in to comment.