diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py index 1468c57..4fd56c1 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py @@ -6,7 +6,7 @@ from ldai import log from ldai.agent_graph import AgentGraphDefinition, AgentGraphNode from ldai.providers import AgentGraphRunner, ToolRegistry -from ldai.providers.types import AgentGraphRunnerResult, GraphMetrics +from ldai.providers.types import AgentGraphRunnerResult, AIGraphMetrics from ldai_langchain.langchain_helper import ( build_structured_tools, @@ -281,10 +281,10 @@ async def run(self, input: Any) -> AgentGraphRunnerResult: Builds a LangGraph StateGraph from the AgentGraphDefinition, compiles it, and invokes it. Uses a LangChain callback handler to collect per-node metrics. Graph-level tracking events are emitted by the - managed layer from the returned GraphMetrics. + managed layer from the returned AIGraphMetrics. :param input: The string prompt to send to the agent graph - :return: AgentGraphRunnerResult with the final content and GraphMetrics + :return: AgentGraphRunnerResult with the final content and AIGraphMetrics """ start_ns = time.perf_counter_ns() @@ -309,7 +309,7 @@ async def run(self, input: Any) -> AgentGraphRunnerResult: return AgentGraphRunnerResult( content=output, raw=result, - metrics=GraphMetrics( + metrics=AIGraphMetrics( success=True, path=handler.path, duration_ms=duration_ms, @@ -330,7 +330,7 @@ async def run(self, input: Any) -> AgentGraphRunnerResult: return AgentGraphRunnerResult( content='', raw=None, - metrics=GraphMetrics( + metrics=AIGraphMetrics( success=False, duration_ms=duration_ms, ), diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py index 62a0aaa..9fffcc9 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py @@ -5,7 +5,7 @@ from ldai import log from ldai.agent_graph import AgentGraphDefinition, AgentGraphNode from ldai.providers import AgentGraphRunner, ToolRegistry -from ldai.providers.types import AgentGraphRunnerResult, GraphMetrics, LDAIMetrics +from ldai.providers.types import AgentGraphRunnerResult, AIGraphMetrics, LDAIMetrics from ldai_openai.openai_helper import ( extract_usage_from_request_entry, @@ -72,7 +72,7 @@ async def run(self, input: Any) -> AgentGraphRunnerResult: Graph-level tracking events are emitted by the managed layer. :param input: The string prompt to send to the agent graph - :return: AgentGraphRunnerResult with the final content and GraphMetrics + :return: AgentGraphRunnerResult with the final content and AIGraphMetrics """ self._node_metrics = {} path: List[str] = [] @@ -99,7 +99,7 @@ async def run(self, input: Any) -> AgentGraphRunnerResult: return AgentGraphRunnerResult( content=str(result.final_output), raw=result, - metrics=GraphMetrics( + metrics=AIGraphMetrics( success=True, path=path, duration_ms=duration_ms, @@ -119,7 +119,7 @@ async def run(self, input: Any) -> AgentGraphRunnerResult: return AgentGraphRunnerResult( content='', raw=None, - metrics=GraphMetrics( + metrics=AIGraphMetrics( success=False, path=path, duration_ms=duration_ms, diff --git a/packages/ai-providers/server-ai-openai/tests/test_openai_agent_graph_runner.py b/packages/ai-providers/server-ai-openai/tests/test_openai_agent_graph_runner.py index 3e03067..f6b03b5 100644 --- a/packages/ai-providers/server-ai-openai/tests/test_openai_agent_graph_runner.py +++ b/packages/ai-providers/server-ai-openai/tests/test_openai_agent_graph_runner.py @@ -6,7 +6,7 @@ from ldai.agent_graph import AgentGraphDefinition from ldai.models import AIAgentGraphConfig, AIAgentConfig, Edge, ModelConfig, ProviderConfig from ldai.providers import ToolRegistry -from ldai.providers.types import AgentGraphRunnerResult, GraphMetrics +from ldai.providers.types import AgentGraphRunnerResult, AIGraphMetrics from ldai_openai.openai_agent_graph_runner import OpenAIAgentGraphRunner from ldai_openai.openai_runner_factory import OpenAIRunnerFactory from ldai.evaluator import Evaluator @@ -84,7 +84,7 @@ async def test_openai_agent_graph_runner_run_raises_when_agents_not_installed(): @pytest.mark.asyncio async def test_openai_agent_graph_runner_run_failure_returns_metrics(): - """On import failure, returned GraphMetrics has success=False (no tracker needed).""" + """On import failure, returned AIGraphMetrics has success=False (no tracker needed).""" graph = _make_graph() runner = OpenAIAgentGraphRunner(graph, {}) @@ -130,7 +130,7 @@ async def test_openai_agent_graph_runner_run_failure_marks_node_not_success(): @pytest.mark.asyncio async def test_openai_agent_graph_runner_run_success(): - """Successful run returns AgentGraphRunnerResult with populated GraphMetrics.""" + """Successful run returns AgentGraphRunnerResult with populated AIGraphMetrics.""" graph = _make_graph() mock_result = MagicMock() @@ -169,7 +169,7 @@ async def test_openai_agent_graph_runner_run_success(): assert isinstance(result, AgentGraphRunnerResult) assert result.content == "agent answer" - assert isinstance(result.metrics, GraphMetrics) + assert isinstance(result.metrics, AIGraphMetrics) assert result.metrics.success is True assert result.metrics.duration_ms is not None assert 'root-agent' in result.metrics.path diff --git a/packages/ai-providers/server-ai-openai/tests/test_tracking_openai_agents.py b/packages/ai-providers/server-ai-openai/tests/test_tracking_openai_agents.py index 1091ba8..317a58a 100644 --- a/packages/ai-providers/server-ai-openai/tests/test_tracking_openai_agents.py +++ b/packages/ai-providers/server-ai-openai/tests/test_tracking_openai_agents.py @@ -6,7 +6,7 @@ with the correct payloads — without making real API calls. Tracking events are now emitted by ManagedAgentGraph._flush_graph_tracking() -from the GraphMetrics returned by the runner, rather than directly inside the +from the AIGraphMetrics returned by the runner, rather than directly inside the runner. These tests exercise the full pipeline through ManagedAgentGraph.run(). """ diff --git a/packages/sdk/server-ai/src/ldai/__init__.py b/packages/sdk/server-ai/src/ldai/__init__.py index 3082dbe..77e7a0a 100644 --- a/packages/sdk/server-ai/src/ldai/__init__.py +++ b/packages/sdk/server-ai/src/ldai/__init__.py @@ -34,8 +34,8 @@ from ldai.providers import ( AgentGraphRunner, AgentGraphRunnerResult, - GraphMetrics, - GraphMetricSummary, + AIGraphMetrics, + AIGraphMetricSummary, ManagedGraphResult, ManagedResult, Runner, @@ -50,8 +50,8 @@ 'Evaluator', 'AgentGraphRunner', 'AgentGraphRunnerResult', - 'GraphMetrics', - 'GraphMetricSummary', + 'AIGraphMetrics', + 'AIGraphMetricSummary', 'ManagedGraphResult', 'ManagedResult', 'Runner', diff --git a/packages/sdk/server-ai/src/ldai/providers/__init__.py b/packages/sdk/server-ai/src/ldai/providers/__init__.py index 3012967..91bdb3f 100644 --- a/packages/sdk/server-ai/src/ldai/providers/__init__.py +++ b/packages/sdk/server-ai/src/ldai/providers/__init__.py @@ -4,8 +4,8 @@ from ldai.providers.runner_factory import RunnerFactory from ldai.providers.types import ( AgentGraphRunnerResult, - GraphMetrics, - GraphMetricSummary, + AIGraphMetrics, + AIGraphMetricSummary, JudgeResult, LDAIMetrics, ManagedGraphResult, @@ -18,8 +18,8 @@ 'AIProvider', 'AgentGraphRunner', 'AgentGraphRunnerResult', - 'GraphMetrics', - 'GraphMetricSummary', + 'AIGraphMetrics', + 'AIGraphMetricSummary', 'JudgeResult', 'LDAIMetrics', 'ManagedGraphResult', diff --git a/packages/sdk/server-ai/src/ldai/providers/agent_graph_runner.py b/packages/sdk/server-ai/src/ldai/providers/agent_graph_runner.py index e430cbb..86b2eb3 100644 --- a/packages/sdk/server-ai/src/ldai/providers/agent_graph_runner.py +++ b/packages/sdk/server-ai/src/ldai/providers/agent_graph_runner.py @@ -23,6 +23,6 @@ async def run(self, input: Any) -> AgentGraphRunnerResult: Run the agent graph with the given input. :param input: The input to the agent graph (string prompt or structured input) - :return: AgentGraphRunnerResult containing the content, raw response, and GraphMetrics + :return: AgentGraphRunnerResult containing the content, raw response, and AIGraphMetrics """ ... diff --git a/packages/sdk/server-ai/src/ldai/providers/types.py b/packages/sdk/server-ai/src/ldai/providers/types.py index 9ade163..cdccb68 100644 --- a/packages/sdk/server-ai/src/ldai/providers/types.py +++ b/packages/sdk/server-ai/src/ldai/providers/types.py @@ -87,7 +87,7 @@ class ManagedResult: @dataclass -class GraphMetrics: +class AIGraphMetrics: """Contains raw metrics from a single agent graph run.""" success: bool @@ -107,7 +107,7 @@ class GraphMetrics: @dataclass -class GraphMetricSummary: +class AIGraphMetricSummary: """Contains a summary of metrics for an agent graph run.""" success: Optional[bool] = None @@ -136,7 +136,7 @@ class ManagedGraphResult: content: str """The graph's final output content.""" - metrics: GraphMetricSummary + metrics: AIGraphMetricSummary """Aggregated metric summary from the graph tracker for this run.""" raw: Optional[Any] = None @@ -153,7 +153,7 @@ class AgentGraphRunnerResult: content: str """The graph's final output content.""" - metrics: GraphMetrics + metrics: AIGraphMetrics """Metrics from the graph run.""" raw: Optional[Any] = None diff --git a/packages/sdk/server-ai/src/ldai/tracker.py b/packages/sdk/server-ai/src/ldai/tracker.py index 2d62465..d159546 100644 --- a/packages/sdk/server-ai/src/ldai/tracker.py +++ b/packages/sdk/server-ai/src/ldai/tracker.py @@ -13,7 +13,7 @@ from ldai import log if TYPE_CHECKING: - from ldai.providers.types import GraphMetrics, GraphMetricSummary, LDAIMetrics + from ldai.providers.types import AIGraphMetrics, AIGraphMetricSummary, LDAIMetrics class FeedbackKind(Enum): @@ -615,7 +615,7 @@ class AIGraphTracker: """ Tracks graph-level metrics for AI agent graph operations. - Maintains an internal :class:`~ldai.providers.types.GraphMetricSummary` + Maintains an internal :class:`~ldai.providers.types.AIGraphMetricSummary` that is updated as tracking methods are called. Retrieve it via :meth:`get_summary`. """ @@ -643,15 +643,15 @@ def __init__( self._version = version self._context = context - from ldai.providers.types import GraphMetricSummary - self._summary = GraphMetricSummary() + from ldai.providers.types import AIGraphMetricSummary + self._summary = AIGraphMetricSummary() @property def graph_key(self) -> str: """Graph configuration key used in tracking payloads.""" return self._graph_key - def get_summary(self) -> GraphMetricSummary: + def get_summary(self) -> AIGraphMetricSummary: """ Get the current summary of graph-level metrics. @@ -820,10 +820,10 @@ def track_handoff_failure(self, source_key: str, target_key: str) -> None: def _track_from_graph_metrics( self, result: Any, - metrics_extractor: Callable[[Any], Optional[GraphMetrics]], + metrics_extractor: Callable[[Any], Optional[AIGraphMetrics]], elapsed_ms: int, ) -> None: - metrics: Optional[GraphMetrics] = None + metrics: Optional[AIGraphMetrics] = None try: metrics = metrics_extractor(result) except Exception as exc: @@ -845,24 +845,24 @@ def _track_from_graph_metrics( def track_graph_metrics_of( self, - metrics_extractor: Callable[[Any], Optional[GraphMetrics]], + metrics_extractor: Callable[[Any], Optional[AIGraphMetrics]], func: Callable[[], Any], ) -> Any: """ Track graph-level metrics for a synchronous graph operation. - Times the operation, extracts :class:`~ldai.providers.types.GraphMetrics` + Times the operation, extracts :class:`~ldai.providers.types.AIGraphMetrics` via the provided extractor, and fires graph-level tracking events (path, duration, success/failure, total tokens). - If the extracted ``GraphMetrics`` has a non-``None`` ``duration_ms``, + If the extracted ``AIGraphMetrics`` has a non-``None`` ``duration_ms``, that value is used instead of the wall-clock elapsed time. Node-level metrics are not tracked by this method. For async operations, use :meth:`track_graph_metrics_of_async`. - :param metrics_extractor: Function that extracts GraphMetrics from the result + :param metrics_extractor: Function that extracts AIGraphMetrics from the result :param func: Synchronous callable that runs the graph operation :return: The result of the operation """ @@ -881,7 +881,7 @@ def track_graph_metrics_of( async def track_graph_metrics_of_async( self, - metrics_extractor: Callable[[Any], Optional[GraphMetrics]], + metrics_extractor: Callable[[Any], Optional[AIGraphMetrics]], func: Callable[[], Any], ) -> Any: """ @@ -889,7 +889,7 @@ async def track_graph_metrics_of_async( Same event semantics as :meth:`track_graph_metrics_of`. - :param metrics_extractor: Function that extracts GraphMetrics from the result + :param metrics_extractor: Function that extracts AIGraphMetrics from the result :param func: Async callable that runs the graph operation :return: The result of the operation """ diff --git a/packages/sdk/server-ai/tests/test_managed_agent_graph.py b/packages/sdk/server-ai/tests/test_managed_agent_graph.py index 4327e2e..7bfc7b0 100644 --- a/packages/sdk/server-ai/tests/test_managed_agent_graph.py +++ b/packages/sdk/server-ai/tests/test_managed_agent_graph.py @@ -10,8 +10,8 @@ from ldai.providers import AgentGraphRunner, ToolRegistry from ldai.providers.types import ( AgentGraphRunnerResult, - GraphMetrics, - GraphMetricSummary, + AIGraphMetrics, + AIGraphMetricSummary, LDAIMetrics, ) from ldai.tracker import TokenUsage @@ -28,19 +28,19 @@ async def run(self, input) -> AgentGraphRunnerResult: return AgentGraphRunnerResult( content=self._content, raw={"input": input}, - metrics=GraphMetrics(success=True), + metrics=AIGraphMetrics(success=True), ) class StubRunnerWithMetrics(AgentGraphRunner): - """Runner that returns AgentGraphRunnerResult with full GraphMetrics.""" + """Runner that returns AgentGraphRunnerResult with full AIGraphMetrics.""" def __init__(self, content: str = "new shape output"): self._content = content async def run(self, input) -> AgentGraphRunnerResult: return AgentGraphRunnerResult( content=self._content, - metrics=GraphMetrics( + metrics=AIGraphMetrics( success=True, path=["root", "specialist"], duration_ms=42, @@ -67,7 +67,7 @@ async def run(self, input) -> AgentGraphRunnerResult: def _make_graph_tracker_mock(runner_result): """Create a mock graph tracker whose track_graph_metrics_of_async returns runner_result.""" m = runner_result.metrics - summary = GraphMetricSummary( + summary = AIGraphMetricSummary( success=m.success, path=list(m.path), duration_ms=m.duration_ms, @@ -104,7 +104,7 @@ def test_managed_agent_graph_get_runner(): @pytest.mark.asyncio async def test_managed_agent_graph_run_surfaces_graph_metrics(): - """GraphMetrics fields are reflected in GraphMetricSummary.""" + """AIGraphMetrics fields are reflected in AIGraphMetricSummary.""" runner = StubRunnerWithMetrics("final answer") runner_result = await runner.run("test input") @@ -214,7 +214,7 @@ async def run(self, input) -> AgentGraphRunnerResult: return AgentGraphRunnerResult( content='', raw=None, - metrics=GraphMetrics(success=False, duration_ms=5), + metrics=AIGraphMetrics(success=False, duration_ms=5), ) failing_runner = FailingRunner() diff --git a/packages/sdk/server-ai/tests/test_runner_abcs.py b/packages/sdk/server-ai/tests/test_runner_abcs.py index d9af2e3..7a7bf25 100644 --- a/packages/sdk/server-ai/tests/test_runner_abcs.py +++ b/packages/sdk/server-ai/tests/test_runner_abcs.py @@ -5,7 +5,7 @@ AgentGraphRunnerResult, ToolRegistry, ) -from ldai.providers.types import GraphMetrics, LDAIMetrics, RunnerResult +from ldai.providers.types import AIGraphMetrics, LDAIMetrics, RunnerResult # --- Concrete test doubles --- @@ -15,7 +15,7 @@ async def run(self, input): return AgentGraphRunnerResult( content=f"graph response to: {input}", raw={"raw": input}, - metrics=GraphMetrics(success=True), + metrics=AIGraphMetrics(success=True), ) @@ -54,7 +54,7 @@ async def test_agent_graph_runner_run_returns_agent_graph_runner_result(): @pytest.mark.asyncio async def test_agent_graph_runner_result_fields(): - metrics = GraphMetrics(success=False) + metrics = AIGraphMetrics(success=False) result = AgentGraphRunnerResult(content="", raw=None, metrics=metrics) assert result.content == "" assert result.raw is None diff --git a/packages/sdk/server-ai/tests/test_tracker.py b/packages/sdk/server-ai/tests/test_tracker.py index cd122d9..609cdbd 100644 --- a/packages/sdk/server-ai/tests/test_tracker.py +++ b/packages/sdk/server-ai/tests/test_tracker.py @@ -5,7 +5,7 @@ from ldclient import Config, Context, LDClient from ldclient.integrations.test_data import TestData -from ldai.providers.types import GraphMetrics, GraphMetricSummary, LDAIMetrics +from ldai.providers.types import AIGraphMetrics, AIGraphMetricSummary, LDAIMetrics from ldai.tracker import AIGraphTracker, FeedbackKind, LDAIConfigTracker, TokenUsage @@ -576,7 +576,7 @@ def test_ai_graph_tracker_summary_starts_empty(client: LDClient): context = Context.create("user-key") g = AIGraphTracker(client, "variation-key", "graph-key", 2, context) s = g.get_summary() - assert isinstance(s, GraphMetricSummary) + assert isinstance(s, AIGraphMetricSummary) assert s.success is None assert s.duration_ms is None assert s.usage is None @@ -666,7 +666,7 @@ def test_track_graph_metrics_of_tracks_success(client: LDClient): g = AIGraphTracker(client, "variation-key", "graph-key", 2, context) result_obj = "done" - metrics = GraphMetrics( + metrics = AIGraphMetrics( success=True, path=["a", "b"], duration_ms=100, @@ -688,7 +688,7 @@ def test_track_graph_metrics_of_tracks_failure(client: LDClient): context = Context.create("user-key") g = AIGraphTracker(client, "variation-key", "graph-key", 2, context) - metrics = GraphMetrics(success=False, duration_ms=5) + metrics = AIGraphMetrics(success=False, duration_ms=5) g.track_graph_metrics_of(lambda r: metrics, lambda: "done") @@ -701,7 +701,7 @@ def test_track_graph_metrics_of_uses_wallclock_when_no_duration_ms(client: LDCli context = Context.create("user-key") g = AIGraphTracker(client, "variation-key", "graph-key", 2, context) - metrics = GraphMetrics(success=True, duration_ms=None) + metrics = AIGraphMetrics(success=True, duration_ms=None) g.track_graph_metrics_of(lambda r: metrics, lambda: "done") @@ -739,7 +739,7 @@ def test_track_graph_metrics_of_skips_empty_path(client: LDClient): context = Context.create("user-key") g = AIGraphTracker(client, "variation-key", "graph-key", 2, context) - metrics = GraphMetrics(success=True) + metrics = AIGraphMetrics(success=True) g.track_graph_metrics_of(lambda r: metrics, lambda: "done") @@ -752,7 +752,7 @@ async def test_track_graph_metrics_of_async_tracks_success(client: LDClient): context = Context.create("user-key") g = AIGraphTracker(client, "variation-key", "graph-key", 2, context) - metrics = GraphMetrics( + metrics = AIGraphMetrics( success=True, path=["x", "y"], duration_ms=50,