diff --git a/examples/financial_research_agent/agents/search_agent.py b/examples/financial_research_agent/agents/search_agent.py index 4ef2522da..6e7c0b054 100644 --- a/examples/financial_research_agent/agents/search_agent.py +++ b/examples/financial_research_agent/agents/search_agent.py @@ -12,6 +12,7 @@ search_agent = Agent( name="FinancialSearchAgent", + model="gpt-4.1", instructions=INSTRUCTIONS, tools=[WebSearchTool()], model_settings=ModelSettings(tool_choice="required"), diff --git a/examples/handoffs/message_filter.py b/examples/handoffs/message_filter.py index 96f74ec9c..20460d3ac 100644 --- a/examples/handoffs/message_filter.py +++ b/examples/handoffs/message_filter.py @@ -5,6 +5,7 @@ from agents import Agent, HandoffInputData, Runner, function_tool, handoff, trace from agents.extensions import handoff_filters +from agents.models import is_gpt_5_default @function_tool @@ -14,6 +15,15 @@ def random_number_tool(max: int) -> int: def spanish_handoff_message_filter(handoff_message_data: HandoffInputData) -> HandoffInputData: + if is_gpt_5_default(): + print("gpt-5 is enabled, so we're not filtering the input history") + # when using gpt-5, removing some of the items could break things, so we do this filtering only for other models + return HandoffInputData( + input_history=handoff_message_data.input_history, + pre_handoff_items=tuple(handoff_message_data.pre_handoff_items), + new_items=tuple(handoff_message_data.new_items), + ) + # First, we'll remove any tool-related messages from the message history handoff_message_data = handoff_filters.remove_all_tools(handoff_message_data) diff --git a/examples/handoffs/message_filter_streaming.py b/examples/handoffs/message_filter_streaming.py index 35a2984f4..604c5d1d6 100644 --- a/examples/handoffs/message_filter_streaming.py +++ b/examples/handoffs/message_filter_streaming.py @@ -5,6 +5,7 @@ from agents import Agent, HandoffInputData, Runner, function_tool, handoff, trace from agents.extensions import handoff_filters +from agents.models import is_gpt_5_default @function_tool @@ -14,6 +15,15 @@ def random_number_tool(max: int) -> int: def spanish_handoff_message_filter(handoff_message_data: HandoffInputData) -> HandoffInputData: + if is_gpt_5_default(): + print("gpt-5 is enabled, so we're not filtering the input history") + # when using gpt-5, removing some of the items could break things, so we do this filtering only for other models + return HandoffInputData( + input_history=handoff_message_data.input_history, + pre_handoff_items=tuple(handoff_message_data.pre_handoff_items), + new_items=tuple(handoff_message_data.new_items), + ) + # First, we'll remove any tool-related messages from the message history handoff_message_data = handoff_filters.remove_all_tools(handoff_message_data) diff --git a/examples/hosted_mcp/approvals.py b/examples/hosted_mcp/approvals.py index 3080a1d63..c3de0db44 100644 --- a/examples/hosted_mcp/approvals.py +++ b/examples/hosted_mcp/approvals.py @@ -44,7 +44,10 @@ async def main(verbose: bool, stream: bool): print(f"Got event of type {event.item.__class__.__name__}") print(f"Done streaming; final result: {result.final_output}") else: - res = await Runner.run(agent, "Which language is this repo written in?") + res = await Runner.run( + agent, + "Which language is this repo written in? Your MCP server should know what the repo is.", + ) print(res.final_output) if verbose: diff --git a/examples/hosted_mcp/simple.py b/examples/hosted_mcp/simple.py index 895fdfbe0..5de78648c 100644 --- a/examples/hosted_mcp/simple.py +++ b/examples/hosted_mcp/simple.py @@ -29,7 +29,10 @@ async def main(verbose: bool, stream: bool): print(f"Got event of type {event.item.__class__.__name__}") print(f"Done streaming; final result: {result.final_output}") else: - res = await Runner.run(agent, "Which language is this repo written in?") + res = await Runner.run( + agent, + "Which language is this repo written in? Your MCP server should know what the repo is.", + ) print(res.final_output) # The repository is primarily written in multiple languages, including Rust and TypeScript... diff --git a/examples/reasoning_content/main.py b/examples/reasoning_content/main.py index a250aa9ca..9da2a5690 100644 --- a/examples/reasoning_content/main.py +++ b/examples/reasoning_content/main.py @@ -1,12 +1,12 @@ """ Example demonstrating how to use the reasoning content feature with models that support it. -Some models, like deepseek-reasoner, provide a reasoning_content field in addition to the regular content. +Some models, like gpt-5, provide a reasoning_content field in addition to the regular content. This example shows how to access and use this reasoning content from both streaming and non-streaming responses. To run this example, you need to: 1. Set your OPENAI_API_KEY environment variable -2. Use a model that supports reasoning content (e.g., deepseek-reasoner) +2. Use a model that supports reasoning content (e.g., gpt-5) """ import asyncio @@ -14,12 +14,13 @@ from typing import Any, cast from openai.types.responses import ResponseOutputRefusal, ResponseOutputText +from openai.types.shared.reasoning import Reasoning from agents import ModelSettings from agents.models.interface import ModelTracing from agents.models.openai_provider import OpenAIProvider -MODEL_NAME = os.getenv("EXAMPLE_MODEL_NAME") or "deepseek-reasoner" +MODEL_NAME = os.getenv("EXAMPLE_MODEL_NAME") or "gpt-5" async def stream_with_reasoning_content(): @@ -36,10 +37,11 @@ async def stream_with_reasoning_content(): reasoning_content = "" regular_content = "" + output_text_already_started = False async for event in model.stream_response( system_instructions="You are a helpful assistant that writes creative content.", input="Write a haiku about recursion in programming", - model_settings=ModelSettings(), + model_settings=ModelSettings(reasoning=Reasoning(effort="medium", summary="detailed")), tools=[], output_schema=None, handoffs=[], @@ -48,18 +50,16 @@ async def stream_with_reasoning_content(): prompt=None, ): if event.type == "response.reasoning_summary_text.delta": - print( - f"\033[33m{event.delta}\033[0m", end="", flush=True - ) # Yellow for reasoning content + # Yellow for reasoning content + print(f"\033[33m{event.delta}\033[0m", end="", flush=True) reasoning_content += event.delta elif event.type == "response.output_text.delta": - print(f"\033[32m{event.delta}\033[0m", end="", flush=True) # Green for regular content + if not output_text_already_started: + print("\n") + output_text_already_started = True + # Green for regular content + print(f"\033[32m{event.delta}\033[0m", end="", flush=True) regular_content += event.delta - - print("\n\nReasoning Content:") - print(reasoning_content) - print("\nRegular Content:") - print(regular_content) print("\n") @@ -77,7 +77,7 @@ async def get_response_with_reasoning_content(): response = await model.get_response( system_instructions="You are a helpful assistant that explains technical concepts clearly.", input="Explain the concept of recursion in programming", - model_settings=ModelSettings(), + model_settings=ModelSettings(reasoning=Reasoning(effort="medium", summary="detailed")), tools=[], output_schema=None, handoffs=[], @@ -102,12 +102,10 @@ async def get_response_with_reasoning_content(): refusal_item = cast(Any, content_item) regular_content = refusal_item.refusal - print("\nReasoning Content:") + print("\n\n### Reasoning Content:") print(reasoning_content or "No reasoning content provided") - - print("\nRegular Content:") + print("\n\n### Regular Content:") print(regular_content or "No regular content provided") - print("\n") @@ -118,7 +116,7 @@ async def main(): except Exception as e: print(f"Error: {e}") print("\nNote: This example requires a model that supports reasoning content.") - print("You may need to use a specific model like deepseek-reasoner or similar.") + print("You may need to use a specific model like gpt-5 or similar.") if __name__ == "__main__": diff --git a/examples/reasoning_content/runner_example.py b/examples/reasoning_content/runner_example.py index e51f85799..579e7e1e6 100644 --- a/examples/reasoning_content/runner_example.py +++ b/examples/reasoning_content/runner_example.py @@ -6,17 +6,18 @@ To run this example, you need to: 1. Set your OPENAI_API_KEY environment variable -2. Use a model that supports reasoning content (e.g., deepseek-reasoner) +2. Use a model that supports reasoning content (e.g., gpt-5) """ import asyncio import os -from typing import Any -from agents import Agent, Runner, trace +from openai.types.shared.reasoning import Reasoning + +from agents import Agent, ModelSettings, Runner, trace from agents.items import ReasoningItem -MODEL_NAME = os.getenv("EXAMPLE_MODEL_NAME") or "deepseek-reasoner" +MODEL_NAME = os.getenv("EXAMPLE_MODEL_NAME") or "gpt-5" async def main(): @@ -27,6 +28,7 @@ async def main(): name="Reasoning Agent", instructions="You are a helpful assistant that explains your reasoning step by step.", model=MODEL_NAME, + model_settings=ModelSettings(reasoning=Reasoning(effort="medium", summary="detailed")), ) # Example 1: Non-streaming response @@ -35,53 +37,34 @@ async def main(): result = await Runner.run( agent, "What is the square root of 841? Please explain your reasoning." ) - # Extract reasoning content from the result items reasoning_content = None - # RunResult has 'response' attribute which has 'output' attribute - for item in result.response.output: # type: ignore - if isinstance(item, ReasoningItem): - reasoning_content = item.summary[0].text # type: ignore + for item in result.new_items: + if isinstance(item, ReasoningItem) and len(item.raw_item.summary) > 0: + reasoning_content = item.raw_item.summary[0].text break - print("\nReasoning Content:") + print("\n### Reasoning Content:") print(reasoning_content or "No reasoning content provided") - - print("\nFinal Output:") + print("\n### Final Output:") print(result.final_output) # Example 2: Streaming response with trace("Reasoning Content - Streaming"): print("\n=== Example 2: Streaming response ===") - print("\nStreaming response:") - - # Buffers to collect reasoning and regular content - reasoning_buffer = "" - content_buffer = "" - - # RunResultStreaming is async iterable stream = Runner.run_streamed(agent, "What is 15 x 27? Please explain your reasoning.") - - async for event in stream: # type: ignore - if isinstance(event, ReasoningItem): - # This is reasoning content - reasoning_item: Any = event - reasoning_buffer += reasoning_item.summary[0].text - print( - f"\033[33m{reasoning_item.summary[0].text}\033[0m", end="", flush=True - ) # Yellow for reasoning - elif hasattr(event, "text"): - # This is regular content - content_buffer += event.text - print( - f"\033[32m{event.text}\033[0m", end="", flush=True - ) # Green for regular content - - print("\n\nCollected Reasoning Content:") - print(reasoning_buffer) - - print("\nCollected Final Answer:") - print(content_buffer) + output_text_already_started = False + async for event in stream.stream_events(): + if event.type == "raw_response_event": + if event.data.type == "response.reasoning_summary_text.delta": + print(f"\033[33m{event.data.delta}\033[0m", end="", flush=True) + elif event.data.type == "response.output_text.delta": + if not output_text_already_started: + print("\n") + output_text_already_started = True + print(f"\033[32m{event.data.delta}\033[0m", end="", flush=True) + + print("\n") if __name__ == "__main__": diff --git a/examples/research_bot/agents/planner_agent.py b/examples/research_bot/agents/planner_agent.py index e80a8e656..cf8fe91cb 100644 --- a/examples/research_bot/agents/planner_agent.py +++ b/examples/research_bot/agents/planner_agent.py @@ -1,6 +1,7 @@ +from openai.types.shared.reasoning import Reasoning from pydantic import BaseModel -from agents import Agent +from agents import Agent, ModelSettings PROMPT = ( "You are a helpful research assistant. Given a query, come up with a set of web searches " @@ -24,6 +25,7 @@ class WebSearchPlan(BaseModel): planner_agent = Agent( name="PlannerAgent", instructions=PROMPT, - model="gpt-4o", + model="gpt-5", + model_settings=ModelSettings(reasoning=Reasoning(effort="medium")), output_type=WebSearchPlan, ) diff --git a/examples/research_bot/agents/search_agent.py b/examples/research_bot/agents/search_agent.py index 61f91701f..ab54d94db 100644 --- a/examples/research_bot/agents/search_agent.py +++ b/examples/research_bot/agents/search_agent.py @@ -12,7 +12,10 @@ search_agent = Agent( name="Search agent", + model="gpt-4.1", instructions=INSTRUCTIONS, tools=[WebSearchTool()], + # Note that gpt-5 model does not support tool_choice="required", + # so if you want to migrate to gpt-5, you'll need to use "auto" instead model_settings=ModelSettings(tool_choice="required"), ) diff --git a/examples/research_bot/agents/writer_agent.py b/examples/research_bot/agents/writer_agent.py index 7b7d01a27..f29d4873f 100644 --- a/examples/research_bot/agents/writer_agent.py +++ b/examples/research_bot/agents/writer_agent.py @@ -1,7 +1,8 @@ # Agent used to synthesize a final report from the individual summaries. +from openai.types.shared.reasoning import Reasoning from pydantic import BaseModel -from agents import Agent +from agents import Agent, ModelSettings PROMPT = ( "You are a senior researcher tasked with writing a cohesive report for a research query. " @@ -28,6 +29,7 @@ class ReportData(BaseModel): writer_agent = Agent( name="WriterAgent", instructions=PROMPT, - model="o3-mini", + model="gpt-5-mini", + model_settings=ModelSettings(reasoning=Reasoning(effort="medium")), output_type=ReportData, ) diff --git a/examples/tools/code_interpreter.py b/examples/tools/code_interpreter.py index a5843ce3f..406e570e7 100644 --- a/examples/tools/code_interpreter.py +++ b/examples/tools/code_interpreter.py @@ -6,6 +6,9 @@ async def main(): agent = Agent( name="Code interpreter", + # Note that using gpt-5 model with streaming for this tool requires org verification + # Also, code interpreter tool does not support gpt-5's minimal reasoning effort + model="gpt-4.1", instructions="You love doing math.", tools=[ CodeInterpreterTool( diff --git a/examples/tools/file_search.py b/examples/tools/file_search.py index 2a3d4cf12..cd5332718 100644 --- a/examples/tools/file_search.py +++ b/examples/tools/file_search.py @@ -1,16 +1,42 @@ import asyncio +from openai import OpenAI + from agents import Agent, FileSearchTool, Runner, trace async def main(): + vector_store_id: str | None = None + + if vector_store_id is None: + print("### Preparing vector store:\n") + # Create a new vector store and index a file + client = OpenAI() + text = "Arrakis, the desert planet in Frank Herbert's 'Dune,' was inspired by the scarcity of water as a metaphor for oil and other finite resources." + file_upload = client.files.create( + file=("example.txt", text.encode("utf-8")), + purpose="assistants", + ) + print(f"File uploaded: {file_upload.to_dict()}") + + vector_store = client.vector_stores.create(name="example-vector-store") + print(f"Vector store created: {vector_store.to_dict()}") + + indexed = client.vector_stores.files.create_and_poll( + vector_store_id=vector_store.id, + file_id=file_upload.id, + ) + print(f"Stored files in vector store: {indexed.to_dict()}") + vector_store_id = vector_store.id + + # Create an agent that can search the vector store agent = Agent( name="File searcher", - instructions="You are a helpful agent.", + instructions="You are a helpful agent. You answer only based on the information in the vector store.", tools=[ FileSearchTool( max_num_results=3, - vector_store_ids=["vs_67bf88953f748191be42b462090e53e7"], + vector_store_ids=[vector_store_id], include_search_results=True, ) ], @@ -20,13 +46,16 @@ async def main(): result = await Runner.run( agent, "Be concise, and tell me 1 sentence about Arrakis I might not know." ) + + print("\n### Final output:\n") print(result.final_output) """ Arrakis, the desert planet in Frank Herbert's "Dune," was inspired by the scarcity of water as a metaphor for oil and other finite resources. """ - print("\n".join([str(out) for out in result.new_items])) + print("\n### Output items:\n") + print("\n".join([str(out.raw_item) + "\n" for out in result.new_items])) """ {"id":"...", "queries":["Arrakis"], "results":[...]} """ diff --git a/examples/voice/static/main.py b/examples/voice/static/main.py index 1b9e20243..69297e3e8 100644 --- a/examples/voice/static/main.py +++ b/examples/voice/static/main.py @@ -44,7 +44,7 @@ def get_weather(city: str) -> str: instructions=prompt_with_handoff_instructions( "You're speaking to a human, so be polite and concise. Speak in Spanish.", ), - model="gpt-4o-mini", + model="gpt-5-mini", ) agent = Agent( @@ -52,7 +52,7 @@ def get_weather(city: str) -> str: instructions=prompt_with_handoff_instructions( "You're speaking to a human, so be polite and concise. If the user speaks in Spanish, handoff to the spanish agent.", ), - model="gpt-4o-mini", + model="gpt-5-mini", handoffs=[spanish_agent], tools=[get_weather], ) diff --git a/src/agents/agent.py b/src/agents/agent.py index 2be7595b5..b64a6ea1d 100644 --- a/src/agents/agent.py +++ b/src/agents/agent.py @@ -17,6 +17,11 @@ from .logger import logger from .mcp import MCPUtil from .model_settings import ModelSettings +from .models.default_models import ( + get_default_model_settings, + gpt_5_reasoning_settings_required, + is_gpt_5_default, +) from .models.interface import Model from .prompts import DynamicPromptFunction, Prompt, PromptUtil from .run_context import RunContextWrapper, TContext @@ -168,10 +173,10 @@ class Agent(AgentBase, Generic[TContext]): """The model implementation to use when invoking the LLM. By default, if not set, the agent will use the default model configured in - `openai_provider.DEFAULT_MODEL` (currently "gpt-4o"). + `agents.models.get_default_model()` (currently "gpt-4.1"). """ - model_settings: ModelSettings = field(default_factory=ModelSettings) + model_settings: ModelSettings = field(default_factory=get_default_model_settings) """Configures model-specific tuning parameters (e.g. temperature, top_p). """ @@ -286,6 +291,26 @@ def __post_init__(self): f"got {type(self.model_settings).__name__}" ) + if ( + # The user sets a non-default model + self.model is not None + and ( + # The default model is gpt-5 + is_gpt_5_default() is True + # However, the specified model is not a gpt-5 model + and ( + isinstance(self.model, str) is False + or gpt_5_reasoning_settings_required(self.model) is False # type: ignore + ) + # The model settings are not customized for the specified model + and self.model_settings == get_default_model_settings() + ) + ): + # In this scenario, we should use a generic model settings + # because non-gpt-5 models are not compatible with the default gpt-5 model settings. + # This is a best-effort attempt to make the agent work with non-gpt-5 models. + self.model_settings = ModelSettings() + if not isinstance(self.input_guardrails, list): raise TypeError( f"Agent input_guardrails must be a list, got {type(self.input_guardrails).__name__}" diff --git a/src/agents/extensions/models/litellm_provider.py b/src/agents/extensions/models/litellm_provider.py index 5a2dc1666..b046d4080 100644 --- a/src/agents/extensions/models/litellm_provider.py +++ b/src/agents/extensions/models/litellm_provider.py @@ -1,6 +1,8 @@ +from ...models.default_models import get_default_model from ...models.interface import Model, ModelProvider from .litellm_model import LitellmModel +# This is kept for backward compatiblity but using get_default_model() method is recommended. DEFAULT_MODEL: str = "gpt-4.1" @@ -18,4 +20,4 @@ class LitellmProvider(ModelProvider): """ def get_model(self, model_name: str | None) -> Model: - return LitellmModel(model_name or DEFAULT_MODEL) + return LitellmModel(model_name or get_default_model()) diff --git a/src/agents/models/__init__.py b/src/agents/models/__init__.py index e69de29bb..82998ac57 100644 --- a/src/agents/models/__init__.py +++ b/src/agents/models/__init__.py @@ -0,0 +1,13 @@ +from .default_models import ( + get_default_model, + get_default_model_settings, + gpt_5_reasoning_settings_required, + is_gpt_5_default, +) + +__all__ = [ + "get_default_model", + "get_default_model_settings", + "gpt_5_reasoning_settings_required", + "is_gpt_5_default", +] diff --git a/src/agents/models/default_models.py b/src/agents/models/default_models.py new file mode 100644 index 000000000..0259534ac --- /dev/null +++ b/src/agents/models/default_models.py @@ -0,0 +1,58 @@ +import copy +import os +from typing import Optional + +from openai.types.shared.reasoning import Reasoning + +from agents.model_settings import ModelSettings + +OPENAI_DEFAULT_MODEL_ENV_VARIABLE_NAME = "OPENAI_DEFAULT_MODEL" + +# discourage directly accessing this constant +# use the get_default_model and get_default_model_settings() functions instead +_GPT_5_DEFAULT_MODEL_SETTINGS: ModelSettings = ModelSettings( + # We chose "low" instead of "minimal" because some of the built-in tools + # (e.g., file search, image generation, etc.) do not support "minimal" + # If you want to use "minimal" reasoning effort, you can pass your own model settings + reasoning=Reasoning(effort="low"), + verbosity="low", +) + + +def gpt_5_reasoning_settings_required(model_name: str) -> bool: + """ + Returns True if the model name is a GPT-5 model and reasoning settings are required. + """ + if model_name.startswith("gpt-5-chat"): + # gpt-5-chat-latest does not require reasoning settings + return False + # matches any of gpt-5 models + return model_name.startswith("gpt-5") + + +def is_gpt_5_default() -> bool: + """ + Returns True if the default model is a GPT-5 model. + This is used to determine if the default model settings are compatible with GPT-5 models. + If the default model is not a GPT-5 model, the model settings are compatible with other models. + """ + return gpt_5_reasoning_settings_required(get_default_model()) + + +def get_default_model() -> str: + """ + Returns the default model name. + """ + return os.getenv(OPENAI_DEFAULT_MODEL_ENV_VARIABLE_NAME, "gpt-4.1").lower() + + +def get_default_model_settings(model: Optional[str] = None) -> ModelSettings: + """ + Returns the default model settings. + If the default model is a GPT-5 model, returns the GPT-5 default model settings. + Otherwise, returns the legacy default model settings. + """ + _model = model if model is not None else get_default_model() + if gpt_5_reasoning_settings_required(_model): + return copy.deepcopy(_GPT_5_DEFAULT_MODEL_SETTINGS) + return ModelSettings() diff --git a/src/agents/models/openai_provider.py b/src/agents/models/openai_provider.py index e7e922ab4..91f2366bc 100644 --- a/src/agents/models/openai_provider.py +++ b/src/agents/models/openai_provider.py @@ -4,10 +4,12 @@ from openai import AsyncOpenAI, DefaultAsyncHttpxClient from . import _openai_shared +from .default_models import get_default_model from .interface import Model, ModelProvider from .openai_chatcompletions import OpenAIChatCompletionsModel from .openai_responses import OpenAIResponsesModel +# This is kept for backward compatiblity but using get_default_model() method is recommended. DEFAULT_MODEL: str = "gpt-4o" @@ -80,7 +82,7 @@ def _get_client(self) -> AsyncOpenAI: def get_model(self, model_name: str | None) -> Model: if model_name is None: - model_name = DEFAULT_MODEL + model_name = get_default_model() client = self._get_client() diff --git a/tests/models/test_default_models.py b/tests/models/test_default_models.py new file mode 100644 index 000000000..f797a91d9 --- /dev/null +++ b/tests/models/test_default_models.py @@ -0,0 +1,56 @@ +import os +from unittest.mock import patch + +from agents.models import ( + get_default_model, + get_default_model_settings, + gpt_5_reasoning_settings_required, + is_gpt_5_default, +) + + +def test_default_model_is_gpt_4_1(): + assert get_default_model() == "gpt-4.1" + assert is_gpt_5_default() is False + assert gpt_5_reasoning_settings_required(get_default_model()) is False + assert get_default_model_settings().reasoning is None + + +@patch.dict(os.environ, {"OPENAI_DEFAULT_MODEL": "gpt-5"}) +def test_default_model_env_gpt_5(): + assert get_default_model() == "gpt-5" + assert is_gpt_5_default() is True + assert gpt_5_reasoning_settings_required(get_default_model()) is True + assert get_default_model_settings().reasoning.effort == "low" # type: ignore [union-attr] + + +@patch.dict(os.environ, {"OPENAI_DEFAULT_MODEL": "gpt-5-mini"}) +def test_default_model_env_gpt_5_mini(): + assert get_default_model() == "gpt-5-mini" + assert is_gpt_5_default() is True + assert gpt_5_reasoning_settings_required(get_default_model()) is True + assert get_default_model_settings().reasoning.effort == "low" # type: ignore [union-attr] + + +@patch.dict(os.environ, {"OPENAI_DEFAULT_MODEL": "gpt-5-nano"}) +def test_default_model_env_gpt_5_nano(): + assert get_default_model() == "gpt-5-nano" + assert is_gpt_5_default() is True + assert gpt_5_reasoning_settings_required(get_default_model()) is True + assert get_default_model_settings().reasoning.effort == "low" # type: ignore [union-attr] + + +@patch.dict(os.environ, {"OPENAI_DEFAULT_MODEL": "gpt-5-chat-latest"}) +def test_default_model_env_gpt_5_chat_latest(): + assert get_default_model() == "gpt-5-chat-latest" + assert is_gpt_5_default() is False + assert gpt_5_reasoning_settings_required(get_default_model()) is False + assert get_default_model_settings().reasoning is None + + +@patch.dict(os.environ, {"OPENAI_DEFAULT_MODEL": "gpt-4o"}) +def test_default_model_env_gpt_4o(): + assert get_default_model() == "gpt-4o" + assert is_gpt_5_default() is False + assert gpt_5_reasoning_settings_required(get_default_model()) is False + assert get_default_model_settings().reasoning is None