Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions examples/financial_research_agent/agents/search_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@

search_agent = Agent(
name="FinancialSearchAgent",
model="gpt-4.1",
instructions=INSTRUCTIONS,
tools=[WebSearchTool()],
model_settings=ModelSettings(tool_choice="required"),
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this option is not compatible with gpt-5, so explicitly set gpt-4.1 for this example

Expand Down
10 changes: 10 additions & 0 deletions examples/handoffs/message_filter.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

from agents import Agent, HandoffInputData, Runner, function_tool, handoff, trace
from agents.extensions import handoff_filters
from agents.models import is_gpt_5_default


@function_tool
Expand All @@ -14,6 +15,15 @@ def random_number_tool(max: int) -> int:


def spanish_handoff_message_filter(handoff_message_data: HandoffInputData) -> HandoffInputData:
if is_gpt_5_default():
print("gpt-5 is enabled, so we're not filtering the input history")
# when using gpt-5, removing some of the items could break things, so we do this filtering only for other models
return HandoffInputData(
input_history=handoff_message_data.input_history,
pre_handoff_items=tuple(handoff_message_data.pre_handoff_items),
new_items=tuple(handoff_message_data.new_items),
)

# First, we'll remove any tool-related messages from the message history
handoff_message_data = handoff_filters.remove_all_tools(handoff_message_data)

Expand Down
10 changes: 10 additions & 0 deletions examples/handoffs/message_filter_streaming.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

from agents import Agent, HandoffInputData, Runner, function_tool, handoff, trace
from agents.extensions import handoff_filters
from agents.models import is_gpt_5_default


@function_tool
Expand All @@ -14,6 +15,15 @@ def random_number_tool(max: int) -> int:


def spanish_handoff_message_filter(handoff_message_data: HandoffInputData) -> HandoffInputData:
if is_gpt_5_default():
print("gpt-5 is enabled, so we're not filtering the input history")
# when using gpt-5, removing some of the items could break things, so we do this filtering only for other models
return HandoffInputData(
input_history=handoff_message_data.input_history,
pre_handoff_items=tuple(handoff_message_data.pre_handoff_items),
new_items=tuple(handoff_message_data.new_items),
)

# First, we'll remove any tool-related messages from the message history
handoff_message_data = handoff_filters.remove_all_tools(handoff_message_data)

Expand Down
5 changes: 4 additions & 1 deletion examples/hosted_mcp/approvals.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,10 @@ async def main(verbose: bool, stream: bool):
print(f"Got event of type {event.item.__class__.__name__}")
print(f"Done streaming; final result: {result.final_output}")
else:
res = await Runner.run(agent, "Which language is this repo written in?")
res = await Runner.run(
agent,
"Which language is this repo written in? Your MCP server should know what the repo is.",
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

gpt-5 models could be confused with this MCP server's rule (= repo information is available in the MCP server URL), so I updated the instructions to be clearer

)
print(res.final_output)

if verbose:
Expand Down
5 changes: 4 additions & 1 deletion examples/hosted_mcp/simple.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,10 @@ async def main(verbose: bool, stream: bool):
print(f"Got event of type {event.item.__class__.__name__}")
print(f"Done streaming; final result: {result.final_output}")
else:
res = await Runner.run(agent, "Which language is this repo written in?")
res = await Runner.run(
agent,
"Which language is this repo written in? Your MCP server should know what the repo is.",
)
print(res.final_output)
# The repository is primarily written in multiple languages, including Rust and TypeScript...

Expand Down
36 changes: 17 additions & 19 deletions examples/reasoning_content/main.py
Original file line number Diff line number Diff line change
@@ -1,25 +1,26 @@
"""
Example demonstrating how to use the reasoning content feature with models that support it.

Some models, like deepseek-reasoner, provide a reasoning_content field in addition to the regular content.
Some models, like gpt-5, provide a reasoning_content field in addition to the regular content.
This example shows how to access and use this reasoning content from both streaming and non-streaming responses.

To run this example, you need to:
1. Set your OPENAI_API_KEY environment variable
2. Use a model that supports reasoning content (e.g., deepseek-reasoner)
2. Use a model that supports reasoning content (e.g., gpt-5)
"""

import asyncio
import os
from typing import Any, cast

from openai.types.responses import ResponseOutputRefusal, ResponseOutputText
from openai.types.shared.reasoning import Reasoning

from agents import ModelSettings
from agents.models.interface import ModelTracing
from agents.models.openai_provider import OpenAIProvider

MODEL_NAME = os.getenv("EXAMPLE_MODEL_NAME") or "deepseek-reasoner"
MODEL_NAME = os.getenv("EXAMPLE_MODEL_NAME") or "gpt-5"


async def stream_with_reasoning_content():
Expand All @@ -36,10 +37,11 @@ async def stream_with_reasoning_content():
reasoning_content = ""
regular_content = ""

output_text_already_started = False
async for event in model.stream_response(
system_instructions="You are a helpful assistant that writes creative content.",
input="Write a haiku about recursion in programming",
model_settings=ModelSettings(),
model_settings=ModelSettings(reasoning=Reasoning(effort="medium", summary="detailed")),
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Without these settings, reasoning summary items won't show up with OpenAI models; so I've updated this example.

tools=[],
output_schema=None,
handoffs=[],
Expand All @@ -48,18 +50,16 @@ async def stream_with_reasoning_content():
prompt=None,
):
if event.type == "response.reasoning_summary_text.delta":
print(
f"\033[33m{event.delta}\033[0m", end="", flush=True
) # Yellow for reasoning content
# Yellow for reasoning content
print(f"\033[33m{event.delta}\033[0m", end="", flush=True)
reasoning_content += event.delta
elif event.type == "response.output_text.delta":
print(f"\033[32m{event.delta}\033[0m", end="", flush=True) # Green for regular content
if not output_text_already_started:
print("\n")
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Improved the readability of the output

output_text_already_started = True
# Green for regular content
print(f"\033[32m{event.delta}\033[0m", end="", flush=True)
regular_content += event.delta

print("\n\nReasoning Content:")
print(reasoning_content)
print("\nRegular Content:")
print(regular_content)
print("\n")


Expand All @@ -77,7 +77,7 @@ async def get_response_with_reasoning_content():
response = await model.get_response(
system_instructions="You are a helpful assistant that explains technical concepts clearly.",
input="Explain the concept of recursion in programming",
model_settings=ModelSettings(),
model_settings=ModelSettings(reasoning=Reasoning(effort="medium", summary="detailed")),
tools=[],
output_schema=None,
handoffs=[],
Expand All @@ -102,12 +102,10 @@ async def get_response_with_reasoning_content():
refusal_item = cast(Any, content_item)
regular_content = refusal_item.refusal

print("\nReasoning Content:")
print("\n\n### Reasoning Content:")
print(reasoning_content or "No reasoning content provided")

print("\nRegular Content:")
print("\n\n### Regular Content:")
print(regular_content or "No regular content provided")

print("\n")


Expand All @@ -118,7 +116,7 @@ async def main():
except Exception as e:
print(f"Error: {e}")
print("\nNote: This example requires a model that supports reasoning content.")
print("You may need to use a specific model like deepseek-reasoner or similar.")
print("You may need to use a specific model like gpt-5 or similar.")


if __name__ == "__main__":
Expand Down
63 changes: 23 additions & 40 deletions examples/reasoning_content/runner_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,17 +6,18 @@

To run this example, you need to:
1. Set your OPENAI_API_KEY environment variable
2. Use a model that supports reasoning content (e.g., deepseek-reasoner)
2. Use a model that supports reasoning content (e.g., gpt-5)
"""

import asyncio
import os
from typing import Any

from agents import Agent, Runner, trace
from openai.types.shared.reasoning import Reasoning

from agents import Agent, ModelSettings, Runner, trace
from agents.items import ReasoningItem

MODEL_NAME = os.getenv("EXAMPLE_MODEL_NAME") or "deepseek-reasoner"
MODEL_NAME = os.getenv("EXAMPLE_MODEL_NAME") or "gpt-5"


async def main():
Expand All @@ -27,6 +28,7 @@ async def main():
name="Reasoning Agent",
instructions="You are a helpful assistant that explains your reasoning step by step.",
model=MODEL_NAME,
model_settings=ModelSettings(reasoning=Reasoning(effort="medium", summary="detailed")),
)

# Example 1: Non-streaming response
Expand All @@ -35,53 +37,34 @@ async def main():
result = await Runner.run(
agent, "What is the square root of 841? Please explain your reasoning."
)

# Extract reasoning content from the result items
reasoning_content = None
# RunResult has 'response' attribute which has 'output' attribute
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This does not work with OpenAI models, so I revised this example

for item in result.response.output: # type: ignore
if isinstance(item, ReasoningItem):
reasoning_content = item.summary[0].text # type: ignore
for item in result.new_items:
if isinstance(item, ReasoningItem) and len(item.raw_item.summary) > 0:
reasoning_content = item.raw_item.summary[0].text
break

print("\nReasoning Content:")
print("\n### Reasoning Content:")
print(reasoning_content or "No reasoning content provided")

print("\nFinal Output:")
print("\n### Final Output:")
print(result.final_output)

# Example 2: Streaming response
with trace("Reasoning Content - Streaming"):
print("\n=== Example 2: Streaming response ===")
print("\nStreaming response:")

# Buffers to collect reasoning and regular content
reasoning_buffer = ""
content_buffer = ""

# RunResultStreaming is async iterable
stream = Runner.run_streamed(agent, "What is 15 x 27? Please explain your reasoning.")

async for event in stream: # type: ignore
if isinstance(event, ReasoningItem):
# This is reasoning content
reasoning_item: Any = event
reasoning_buffer += reasoning_item.summary[0].text
print(
f"\033[33m{reasoning_item.summary[0].text}\033[0m", end="", flush=True
) # Yellow for reasoning
elif hasattr(event, "text"):
# This is regular content
content_buffer += event.text
print(
f"\033[32m{event.text}\033[0m", end="", flush=True
) # Green for regular content

print("\n\nCollected Reasoning Content:")
print(reasoning_buffer)

print("\nCollected Final Answer:")
print(content_buffer)
output_text_already_started = False
async for event in stream.stream_events():
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

same as above

if event.type == "raw_response_event":
if event.data.type == "response.reasoning_summary_text.delta":
print(f"\033[33m{event.data.delta}\033[0m", end="", flush=True)
elif event.data.type == "response.output_text.delta":
if not output_text_already_started:
print("\n")
output_text_already_started = True
print(f"\033[32m{event.data.delta}\033[0m", end="", flush=True)

print("\n")


if __name__ == "__main__":
Expand Down
6 changes: 4 additions & 2 deletions examples/research_bot/agents/planner_agent.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from openai.types.shared.reasoning import Reasoning
from pydantic import BaseModel

from agents import Agent
from agents import Agent, ModelSettings

PROMPT = (
"You are a helpful research assistant. Given a query, come up with a set of web searches "
Expand All @@ -24,6 +25,7 @@ class WebSearchPlan(BaseModel):
planner_agent = Agent(
name="PlannerAgent",
instructions=PROMPT,
model="gpt-4o",
model="gpt-5",
model_settings=ModelSettings(reasoning=Reasoning(effort="medium")),
output_type=WebSearchPlan,
)
3 changes: 3 additions & 0 deletions examples/research_bot/agents/search_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,10 @@

search_agent = Agent(
name="Search agent",
model="gpt-4.1",
instructions=INSTRUCTIONS,
tools=[WebSearchTool()],
# Note that gpt-5 model does not support tool_choice="required",
# so if you want to migrate to gpt-5, you'll need to use "auto" instead
model_settings=ModelSettings(tool_choice="required"),
)
6 changes: 4 additions & 2 deletions examples/research_bot/agents/writer_agent.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
# Agent used to synthesize a final report from the individual summaries.
from openai.types.shared.reasoning import Reasoning
from pydantic import BaseModel

from agents import Agent
from agents import Agent, ModelSettings

PROMPT = (
"You are a senior researcher tasked with writing a cohesive report for a research query. "
Expand All @@ -28,6 +29,7 @@ class ReportData(BaseModel):
writer_agent = Agent(
name="WriterAgent",
instructions=PROMPT,
model="o3-mini",
model="gpt-5-mini",
model_settings=ModelSettings(reasoning=Reasoning(effort="medium")),
output_type=ReportData,
)
3 changes: 3 additions & 0 deletions examples/tools/code_interpreter.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,9 @@
async def main():
agent = Agent(
name="Code interpreter",
# Note that using gpt-5 model with streaming for this tool requires org verification
# Also, code interpreter tool does not support gpt-5's minimal reasoning effort
model="gpt-4.1",
instructions="You love doing math.",
tools=[
CodeInterpreterTool(
Expand Down
Loading