Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions examples/agent_patterns/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ The mental model for handoffs is that the new agent "takes over". It sees the pr
For example, you could model the translation task above as tool calls instead: rather than handing over to the language-specific agent, you could call the agent as a tool, and then use the result in the next step. This enables things like translating multiple languages at once.

See the [`agents_as_tools.py`](./agents_as_tools.py) file for an example of this.
See the [`agents_as_tools_streaming.py`](./agents_as_tools_streaming.py) file for a streaming variant that taps into nested agent events via `on_stream`.

## LLM-as-a-judge

Expand Down
57 changes: 57 additions & 0 deletions examples/agent_patterns/agents_as_tools_streaming.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
import asyncio

from agents import Agent, AgentToolStreamEvent, ModelSettings, Runner, function_tool, trace


@function_tool(
name_override="billing_status_checker",
description_override="Answer questions about customer billing status.",
)
def billing_status_checker(customer_id: str | None = None, question: str = "") -> str:
"""Return a canned billing answer or a fallback when the question is unrelated."""
normalized = question.lower()
if "bill" in normalized or "billing" in normalized:
return f"This customer (ID: {customer_id})'s bill is $100"
return "I can only answer questions about billing."


def handle_stream(event: AgentToolStreamEvent) -> None:
"""Print streaming events emitted by the nested billing agent."""
stream = event["event"]
print(f"[stream] agent={event['agent_name']} type={stream.type} {stream}")


async def main() -> None:
with trace("Agents as tools streaming example"):
billing_agent = Agent(
name="Billing Agent",
instructions="You are a billing agent that answers billing questions.",
model_settings=ModelSettings(tool_choice="required"),
tools=[billing_status_checker],
)

billing_agent_tool = billing_agent.as_tool(
tool_name="billing_agent",
tool_description="You are a billing agent that answers billing questions.",
on_stream=handle_stream,
)

main_agent = Agent(
name="Customer Support Agent",
instructions=(
"You are a customer support agent. Always call the billing agent to answer billing "
"questions and return the billing agent response to the user."
),
tools=[billing_agent_tool],
)

result = await Runner.run(
main_agent,
"Hello, my customer ID is ABC123. How much is my bill for this month?",
)

print(f"\nFinal response:\n{result.final_output}")


if __name__ == "__main__":
asyncio.run(main())
4 changes: 2 additions & 2 deletions examples/financial_research_agent/manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

from rich.console import Console

from agents import Runner, RunResult, custom_span, gen_trace_id, trace
from agents import Runner, RunResult, RunResultStreaming, custom_span, gen_trace_id, trace

from .agents.financials_agent import financials_agent
from .agents.planner_agent import FinancialSearchItem, FinancialSearchPlan, planner_agent
Expand All @@ -17,7 +17,7 @@
from .printer import Printer


async def _summary_extractor(run_result: RunResult) -> str:
async def _summary_extractor(run_result: RunResult | RunResultStreaming) -> str:
"""Custom output extractor for sub‑agents that return an AnalysisSummary."""
# The financial/risk analyst agents emit an AnalysisSummary with a `summary` field.
# We want the tool call to return just that summary text so the writer can drop it inline.
Expand Down
2 changes: 2 additions & 0 deletions src/agents/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from .agent import (
Agent,
AgentBase,
AgentToolStreamEvent,
StopAtTools,
ToolsToFinalOutputFunction,
ToolsToFinalOutputResult,
Expand Down Expand Up @@ -214,6 +215,7 @@ def enable_verbose_stdout_logging():
__all__ = [
"Agent",
"AgentBase",
"AgentToolStreamEvent",
"StopAtTools",
"ToolsToFinalOutputFunction",
"ToolsToFinalOutputResult",
Expand Down
80 changes: 64 additions & 16 deletions src/agents/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,9 @@
from .lifecycle import AgentHooks, RunHooks
from .mcp import MCPServer
from .memory.session import Session
from .result import RunResult
from .result import RunResult, RunResultStreaming
from .run import RunConfig
from .stream_events import StreamEvent


@dataclass
Expand All @@ -58,6 +59,19 @@ class ToolsToFinalOutputResult:
"""


class AgentToolStreamEvent(TypedDict):
"""Streaming event emitted when an agent is invoked as a tool."""

event: StreamEvent
"""The streaming event from the nested agent run."""

agent_name: str
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

instead of agent_name, why not pass the Agent object?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yeah this is true. i will revisit the properties in this object

"""The name of the nested agent emitting the event."""

tool_call_id: str | None
"""The originating tool call ID, if available."""


class StopAtTools(TypedDict):
stop_at_tool_names: list[str]
"""A list of tool names, any of which will stop the agent from running further."""
Expand Down Expand Up @@ -382,9 +396,12 @@ def as_tool(
self,
tool_name: str | None,
tool_description: str | None,
custom_output_extractor: Callable[[RunResult], Awaitable[str]] | None = None,
custom_output_extractor: (
Callable[[RunResult | RunResultStreaming], Awaitable[str]] | None
) = None,
is_enabled: bool
| Callable[[RunContextWrapper[Any], AgentBase[Any]], MaybeAwaitable[bool]] = True,
on_stream: Callable[[AgentToolStreamEvent], MaybeAwaitable[None]] | None = None,
run_config: RunConfig | None = None,
max_turns: int | None = None,
hooks: RunHooks[TContext] | None = None,
Expand All @@ -409,6 +426,8 @@ def as_tool(
is_enabled: Whether the tool is enabled. Can be a bool or a callable that takes the run
context and agent and returns whether the tool is enabled. Disabled tools are hidden
from the LLM at runtime.
on_stream: Optional callback (sync or async) to receive streaming events from the nested
agent run. When provided, the nested agent is executed in streaming mode.
"""

@function_tool(
Expand All @@ -420,22 +439,51 @@ async def run_agent(context: RunContextWrapper, input: str) -> Any:
from .run import DEFAULT_MAX_TURNS, Runner

resolved_max_turns = max_turns if max_turns is not None else DEFAULT_MAX_TURNS

output = await Runner.run(
starting_agent=self,
input=input,
context=context.context,
run_config=run_config,
max_turns=resolved_max_turns,
hooks=hooks,
previous_response_id=previous_response_id,
conversation_id=conversation_id,
session=session,
)
run_result: RunResult | RunResultStreaming

if on_stream is not None:
run_result = Runner.run_streamed(
starting_agent=self,
input=input,
context=context.context,
run_config=run_config,
max_turns=resolved_max_turns,
hooks=hooks,
previous_response_id=previous_response_id,
conversation_id=conversation_id,
session=session,
)
async for event in run_result.stream_events():
payload: AgentToolStreamEvent = {
"event": event,
"agent_name": self.name,
"tool_call_id": getattr(context, "tool_call_id", None),
}
try:
maybe_result = on_stream(payload)
if inspect.isawaitable(maybe_result):
await maybe_result
Comment on lines +463 to +465
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this can be kinda bad since on_stream will block you from going to the next event. fine for now, but we should consider a queue based pattern where we write to a queue from here and the consumer reads from the queue, and we aren't blocking

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

good call; actually i made these executions async in the TS SDK. so will make this more efficient and consistent.

except Exception:
logger.exception(
"Error while handling on_stream event for agent tool %s.",
self.name,
)
else:
run_result = await Runner.run(
starting_agent=self,
input=input,
context=context.context,
run_config=run_config,
max_turns=resolved_max_turns,
hooks=hooks,
previous_response_id=previous_response_id,
conversation_id=conversation_id,
session=session,
)
if custom_output_extractor:
return await custom_output_extractor(output)
return await custom_output_extractor(run_result)

return output.final_output
return run_result.final_output

return run_agent

Expand Down
Loading