Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "uipath"
version = "2.7.0"
version = "2.7.1"
description = "Python SDK and CLI for UiPath Platform, enabling programmatic interaction with automation services, process management, and deployment tools."
readme = { file = "README.md", content-type = "text/markdown" }
requires-python = ">=3.11"
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import json

from uipath.eval.evaluators import BaseEvaluator, BaseEvaluationCriteria, BaseEvaluatorConfig
from uipath.eval.evaluators import BaseEvaluationCriteria, BaseEvaluatorConfig
from uipath.eval.evaluators.base_evaluator import BaseEvaluator
from uipath.eval.models import AgentExecution, EvaluationResult, NumericEvaluationResult
from opentelemetry.sdk.trace import ReadableSpan

Expand Down
4 changes: 2 additions & 2 deletions src/uipath/_cli/_evals/_console_progress_reporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
EvalSetRunUpdatedEvent,
EvaluationEvents,
)
from uipath.eval.evaluators import BaseEvaluator
from uipath.eval.evaluators.base_evaluator import GenericBaseEvaluator
from uipath.eval.models import ScoreType

logger = logging.getLogger(__name__)
Expand All @@ -26,7 +26,7 @@ class ConsoleProgressReporter:

def __init__(self):
self.console = Console()
self.evaluators: dict[str, BaseEvaluator[Any, Any, Any]] = {}
self.evaluators: dict[str, GenericBaseEvaluator[Any, Any, Any]] = {}
self.display_started = False
self.eval_results_by_name: dict[str, list[Any]] = {}

Expand Down
215 changes: 31 additions & 184 deletions src/uipath/_cli/_evals/_evaluator_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,85 +9,20 @@
from uipath._cli._evals._helpers import ( # type: ignore # Remove after gnarly fix
try_extract_file_and_class_name,
)
from uipath._cli._evals._models._evaluator import (
EvaluatorConfig,
LegacyEqualsEvaluatorParams,
LegacyEvaluator,
LegacyJsonSimilarityEvaluatorParams,
LegacyLLMEvaluatorParams,
LegacyTrajectoryEvaluatorParams,
)
from uipath._cli._evals._models._evaluator_base_params import EvaluatorBaseParams
from uipath._cli._evals._models._evaluator import CodedEvaluator, LegacyEvaluator
from uipath._utils.constants import EVALS_FOLDER
from uipath.eval.evaluators import (
BaseEvaluator,
LegacyBaseEvaluator,
BaseLegacyEvaluator,
LegacyContextPrecisionEvaluator,
LegacyExactMatchEvaluator,
LegacyFaithfulnessEvaluator,
LegacyJsonSimilarityEvaluator,
LegacyLlmAsAJudgeEvaluator,
LegacyTrajectoryEvaluator,
)
from uipath.eval.evaluators.base_evaluator import BaseEvaluatorConfig
from uipath.eval.evaluators.contains_evaluator import (
ContainsEvaluator,
ContainsEvaluatorConfig,
)
from uipath.eval.evaluators.exact_match_evaluator import (
ExactMatchEvaluator,
ExactMatchEvaluatorConfig,
)
from uipath.eval.evaluators.json_similarity_evaluator import (
JsonSimilarityEvaluator,
JsonSimilarityEvaluatorConfig,
)
from uipath.eval.evaluators.llm_judge_output_evaluator import (
LLMJudgeOutputEvaluator,
LLMJudgeOutputEvaluatorConfig,
LLMJudgeStrictJSONSimilarityOutputEvaluator,
LLMJudgeStrictJSONSimilarityOutputEvaluatorConfig,
)
from uipath.eval.evaluators.llm_judge_trajectory_evaluator import (
LLMJudgeTrajectoryEvaluator,
LLMJudgeTrajectoryEvaluatorConfig,
LLMJudgeTrajectorySimulationEvaluator,
LLMJudgeTrajectorySimulationEvaluatorConfig,
)
from uipath.eval.evaluators.tool_call_args_evaluator import (
ToolCallArgsEvaluator,
ToolCallArgsEvaluatorConfig,
)
from uipath.eval.evaluators.tool_call_count_evaluator import (
ToolCallCountEvaluator,
ToolCallCountEvaluatorConfig,
)
from uipath.eval.evaluators.tool_call_order_evaluator import (
ToolCallOrderEvaluator,
ToolCallOrderEvaluatorConfig,
)
from uipath.eval.evaluators.tool_call_output_evaluator import (
ToolCallOutputEvaluator,
ToolCallOutputEvaluatorConfig,
)
from uipath.eval.models import LegacyEvaluatorType
from uipath.eval.evaluators.base_evaluator import GenericBaseEvaluator

logger = logging.getLogger(__name__)

EVALUATOR_SCHEMA_TO_EVALUATOR_CLASS = {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

dumb question: where did this go ? how are we doing this conversion now ?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We don't need this. TypeAdapter(CodedEvaluator).validate_python(...) uses the discriminator to resolve it correctly.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

you mean, dynamic class conversion ? Am curious, why couldn't we do that in the previous form - because of the naming inconsistency ?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In the previous implementation, the coded evaluators were discriminating on the configs and not the evaluators. Ideally the original implementation should've been this implementation. 🙃

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I believe this was done to be too explicit in the previous implementation thanks for fixing it @akshaylive. @mjnovice we will need a few more rounds of refactoring to get it to GA level in the progress reporting side too :D

ContainsEvaluatorConfig: ContainsEvaluator,
ExactMatchEvaluatorConfig: ExactMatchEvaluator,
JsonSimilarityEvaluatorConfig: JsonSimilarityEvaluator,
LLMJudgeOutputEvaluatorConfig: LLMJudgeOutputEvaluator,
LLMJudgeStrictJSONSimilarityOutputEvaluatorConfig: LLMJudgeStrictJSONSimilarityOutputEvaluator,
LLMJudgeTrajectoryEvaluatorConfig: LLMJudgeTrajectoryEvaluator,
LLMJudgeTrajectorySimulationEvaluatorConfig: LLMJudgeTrajectorySimulationEvaluator,
ToolCallArgsEvaluatorConfig: ToolCallArgsEvaluator,
ToolCallCountEvaluatorConfig: ToolCallCountEvaluator,
ToolCallOrderEvaluatorConfig: ToolCallOrderEvaluator,
ToolCallOutputEvaluatorConfig: ToolCallOutputEvaluator,
}


class EvaluatorFactory:
"""Factory class for creating evaluator instances based on configuration."""
Expand Down Expand Up @@ -130,7 +65,7 @@ def create_evaluator(
data: dict[str, Any],
evaluators_dir: Path | None = None,
agent_model: str | None = None,
) -> BaseEvaluator[Any, Any, Any]:
) -> GenericBaseEvaluator[Any, Any, Any]:
if data.get("version", None) == "1.0":
return cls._create_evaluator_internal(data, evaluators_dir)
else:
Expand All @@ -147,31 +82,20 @@ def _create_evaluator_internal(
evaluator_schema
)
if success:
return EvaluatorFactory._create_coded_evaluator_internal(
return EvaluatorFactory._create_custom_coded_evaluator_internal(
data, file_path, class_name, evaluators_dir
)

config: BaseEvaluatorConfig[Any] = TypeAdapter(EvaluatorConfig).validate_python(
data
)
evaluator_class = EVALUATOR_SCHEMA_TO_EVALUATOR_CLASS.get(type(config))
if not evaluator_class:
raise ValueError(f"Unknown evaluator configuration: {config}")
return TypeAdapter(evaluator_class).validate_python(
{
"id": data.get("id"),
"config": EvaluatorFactory._prepare_evaluator_config(data),
}
)
else:
return TypeAdapter(CodedEvaluator).validate_python(data)

@staticmethod
def _create_coded_evaluator_internal(
def _create_custom_coded_evaluator_internal(
data: dict[str, Any],
file_path_str: str,
class_name: str,
evaluators_dir: Path | None = None,
) -> BaseEvaluator[Any, Any, Any]:
"""Create a coded evaluator by dynamically loading from a Python file.
"""Create a custom coded evaluator by dynamically loading from a Python file.

Args:
data: Dictionary containing evaluator configuration with evaluatorTypeId
Expand Down Expand Up @@ -242,18 +166,13 @@ def _create_coded_evaluator_internal(
evaluator_id = data.get("id")
if not evaluator_id or not isinstance(evaluator_id, str):
raise ValueError("Evaluator 'id' must be a non-empty string")
return TypeAdapter(evaluator_class).validate_python(
{
"id": evaluator_id,
"config": EvaluatorFactory._prepare_evaluator_config(data),
}
)
return TypeAdapter(evaluator_class).validate_python(data)

@staticmethod
def _create_legacy_evaluator_internal(
data: dict[str, Any],
agent_model: str | None = None,
) -> LegacyBaseEvaluator[Any]:
) -> BaseLegacyEvaluator[Any]:
"""Create an evaluator instance from configuration data.

Args:
Expand All @@ -267,97 +186,25 @@ def _create_legacy_evaluator_internal(
Raises:
ValueError: If category is unknown or required fields are missing
"""
params: EvaluatorBaseParams = TypeAdapter(LegacyEvaluator).validate_python(data)

match params:
case LegacyEqualsEvaluatorParams():
return EvaluatorFactory._create_legacy_exact_match_evaluator(params)
case LegacyJsonSimilarityEvaluatorParams():
return EvaluatorFactory._create_legacy_json_similarity_evaluator(params)
case LegacyLLMEvaluatorParams():
return EvaluatorFactory._create_legacy_llm_as_judge_evaluator(
params, agent_model
)
case LegacyTrajectoryEvaluatorParams():
return EvaluatorFactory._create_legacy_trajectory_evaluator(
params, agent_model
)
case _:
raise ValueError(f"Unknown evaluator category: {params}")

@staticmethod
def _create_legacy_exact_match_evaluator(
params: LegacyEqualsEvaluatorParams,
) -> LegacyExactMatchEvaluator:
"""Create a deterministic evaluator."""
return LegacyExactMatchEvaluator(**params.model_dump(), config={})

@staticmethod
def _create_legacy_json_similarity_evaluator(
params: LegacyJsonSimilarityEvaluatorParams,
) -> LegacyJsonSimilarityEvaluator:
"""Create a deterministic evaluator."""
return LegacyJsonSimilarityEvaluator(**params.model_dump(), config={})

@staticmethod
def _create_legacy_llm_as_judge_evaluator(
params: LegacyLLMEvaluatorParams,
agent_model: str | None = None,
) -> LegacyBaseEvaluator[Any]:
"""Create an LLM-as-a-judge evaluator or context precision evaluator based on type."""
if not params.model:
raise ValueError("LLM evaluator must include 'model' field")

# Resolve 'same-as-agent' to actual agent model
if params.model == "same-as-agent":
if not agent_model:
raise ValueError(
"'same-as-agent' model option requires agent settings. "
"Ensure agent.json contains valid model settings."
)
logger.info(
f"Resolving 'same-as-agent' to agent model: {agent_model} "
f"for evaluator '{params.name}'"
)
params = params.model_copy(update={"model": agent_model})

# Check evaluator type to determine which evaluator to create
if params.evaluator_type == LegacyEvaluatorType.ContextPrecision:
return LegacyContextPrecisionEvaluator(**params.model_dump(), config={})
elif params.evaluator_type == LegacyEvaluatorType.Faithfulness:
return LegacyFaithfulnessEvaluator(**params.model_dump(), config={})
else:
if not params.prompt:
raise ValueError("LLM evaluator must include 'prompt' field")

return LegacyLlmAsAJudgeEvaluator(**params.model_dump(), config={})

@staticmethod
def _create_legacy_trajectory_evaluator(
params: LegacyTrajectoryEvaluatorParams,
agent_model: str | None = None,
) -> LegacyTrajectoryEvaluator:
"""Create a trajectory evaluator."""
if not params.prompt:
raise ValueError("Trajectory evaluator must include 'prompt' field")

if not params.model:
raise ValueError("Trajectory evaluator must include 'model' field")

# Resolve 'same-as-agent' to actual agent model
if params.model == "same-as-agent":
if not agent_model:
raise ValueError(
"'same-as-agent' model option requires agent settings. "
"Ensure agent.json contains valid model settings."
evaluator: LegacyEvaluator = TypeAdapter(LegacyEvaluator).validate_python(data)

if isinstance(
evaluator,
LegacyTrajectoryEvaluator
| LegacyLlmAsAJudgeEvaluator
| LegacyContextPrecisionEvaluator
| LegacyFaithfulnessEvaluator,
):
if evaluator.model == "same-as-agent":
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Manually tested this part. Screenshot:

image

if not agent_model:
raise ValueError(
"'same-as-agent' model option requires agent settings. "
"Ensure agent.json contains valid model settings."
)
logger.info(
f"Resolving 'same-as-agent' to agent model: {agent_model} "
f"for evaluator '{evaluator.name}'"
)
logger.info(
f"Resolving 'same-as-agent' to agent model: {agent_model} "
f"for evaluator '{params.name}'"
)
params = params.model_copy(update={"model": agent_model})
evaluator.model = agent_model

logger.info(
f"Creating trajectory evaluator '{params.name}' with model: {params.model}"
)
return LegacyTrajectoryEvaluator(**params.model_dump(), config={})
return evaluator
Loading