From e89861b7ea5a4d804b6aec4d332f34552144ad0f Mon Sep 17 00:00:00 2001 From: Kazuhiro Sera Date: Thu, 21 Aug 2025 23:26:55 +0900 Subject: [PATCH 1/3] Add a quick opt-in option to switch to gpt-5 and fix issues around gpt-5 --- examples/basic/hello-world-gpt-5.ts | 3 +- examples/basic/hello-world.ts | 5 +- examples/basic/reasoning.ts | 5 +- examples/financial-research-agent/agents.ts | 5 +- packages/agents-core/src/agent.ts | 30 +++++++++-- packages/agents-core/src/defaultModel.ts | 56 ++++++++++++++++++++ packages/agents-core/src/index.ts | 7 +++ packages/agents-core/src/run.ts | 50 +++++++++++++++++ packages/agents-openai/src/openaiProvider.ts | 5 +- 9 files changed, 155 insertions(+), 11 deletions(-) create mode 100644 packages/agents-core/src/defaultModel.ts diff --git a/examples/basic/hello-world-gpt-5.ts b/examples/basic/hello-world-gpt-5.ts index fff60640..487f0d29 100644 --- a/examples/basic/hello-world-gpt-5.ts +++ b/examples/basic/hello-world-gpt-5.ts @@ -21,7 +21,8 @@ async function main() { outputType: output, }); - const prompt = 'Tell me about recursion in programming.'; + const prompt = + 'Tell me about recursion in programming. Quickly responding with a single answer is fine.'; const result = await run(agent, prompt); console.log(result.finalOutput); diff --git a/examples/basic/hello-world.ts b/examples/basic/hello-world.ts index 81c2ddca..9deeec8d 100644 --- a/examples/basic/hello-world.ts +++ b/examples/basic/hello-world.ts @@ -6,7 +6,10 @@ async function main() { instructions: 'You only respond in haikus.', }); - const result = await run(agent, 'Tell me about recursion in programming.'); + const result = await run( + agent, + 'Tell me about recursion in programming. Quickly responding with a single answer is fine.', + ); console.log(result.finalOutput); // Example output: // Function calls itself, diff --git a/examples/basic/reasoning.ts b/examples/basic/reasoning.ts index 21ed49e8..2cee2df0 100644 --- a/examples/basic/reasoning.ts +++ b/examples/basic/reasoning.ts @@ -7,13 +7,16 @@ const THINKING_PREFIX = styleText(['bgGray', 'black'], 'Thought'); async function main() { const agent = new Agent({ name: 'Agent', - model: 'o3', + model: 'gpt-5', modelSettings: { providerData: { reasoning: { effort: 'high', summary: 'auto', }, + text: { + verbosity: 'high', + }, }, }, }); diff --git a/examples/financial-research-agent/agents.ts b/examples/financial-research-agent/agents.ts index bb5b1b55..6ff02136 100644 --- a/examples/financial-research-agent/agents.ts +++ b/examples/financial-research-agent/agents.ts @@ -46,7 +46,7 @@ export type FinancialSearchPlan = z.infer; export const plannerAgent = new Agent({ name: 'FinancialPlannerAgent', instructions: plannerPrompt, - model: 'o3-mini', + model: 'gpt-5-mini', outputType: FinancialSearchPlan, }); @@ -69,6 +69,7 @@ Focus on key numbers, events, or quotes that will be useful to a financial analy export const searchAgent = new Agent({ name: 'FinancialSearchAgent', instructions: searchAgentPrompt, + model: 'gpt-4.1', tools: [webSearchTool()], modelSettings: { toolChoice: 'required' }, }); @@ -92,7 +93,7 @@ export type VerificationResult = z.infer; export const verifierAgent = new Agent({ name: 'VerificationAgent', instructions: verifierPrompt, - model: 'gpt-4o', + model: 'gpt-4.1', outputType: VerificationResult, }); diff --git a/packages/agents-core/src/agent.ts b/packages/agents-core/src/agent.ts index 1bc09332..cd6cbf87 100644 --- a/packages/agents-core/src/agent.ts +++ b/packages/agents-core/src/agent.ts @@ -4,6 +4,11 @@ import type { InputGuardrail, OutputGuardrail } from './guardrail'; import { AgentHooks } from './lifecycle'; import { getAllMcpTools, type MCPServer } from './mcp'; import type { Model, ModelSettings, Prompt } from './model'; +import { + getDefaultModelSettings, + gpt5ReasoningSettingsRequired, + isGpt5Default, +} from './defaultModel'; import type { RunContext } from './runContext'; import { type FunctionTool, @@ -165,8 +170,10 @@ export interface AgentConfiguration< handoffOutputTypeWarningEnabled?: boolean; /** - * The model implementation to use when invoking the LLM. By default, if not set, the agent will - * use the default model configured in modelSettings.defaultModel + * The model implementation to use when invoking the LLM. + * + * By default, if not set, the agent will use the default model returned by + * getDefaultModel (currently "gpt-4.1"). */ model: string | Model; @@ -348,7 +355,7 @@ export class Agent< this.handoffDescription = config.handoffDescription ?? ''; this.handoffs = config.handoffs ?? []; this.model = config.model ?? ''; - this.modelSettings = config.modelSettings ?? {}; + this.modelSettings = config.modelSettings ?? getDefaultModelSettings(); this.tools = config.tools ?? []; this.mcpServers = config.mcpServers ?? []; this.inputGuardrails = config.inputGuardrails ?? []; @@ -359,6 +366,23 @@ export class Agent< this.toolUseBehavior = config.toolUseBehavior ?? 'run_llm_again'; this.resetToolChoice = config.resetToolChoice ?? true; + if ( + // The user sets a non-default model + config.model !== undefined && + // The default model is gpt-5 + isGpt5Default() && + // However, the specified model is not a gpt-5 model + (typeof config.model !== 'string' || + !gpt5ReasoningSettingsRequired(config.model)) && + // The model settings are not customized for the specified model + config.modelSettings === undefined + ) { + // In this scenario, we should use a generic model settings + // because non-gpt-5 models are not compatible with the default gpt-5 model settings. + // This is a best-effort attempt to make the agent work with non-gpt-5 models. + this.modelSettings = {}; + } + // --- Runtime warning for handoff output type compatibility --- if ( config.handoffOutputTypeWarningEnabled === undefined || diff --git a/packages/agents-core/src/defaultModel.ts b/packages/agents-core/src/defaultModel.ts new file mode 100644 index 00000000..210a1fbc --- /dev/null +++ b/packages/agents-core/src/defaultModel.ts @@ -0,0 +1,56 @@ +import { loadEnv } from './config'; +import { ModelSettings } from './model'; + +export const OPENAI_DEFAULT_MODEL_ENV_VARIABLE_NAME = 'OPENAI_DEFAULT_MODEL'; + +/** + * Returns True if the model name is a GPT-5 model and reasoning settings are required. + */ +export function gpt5ReasoningSettingsRequired(modelName: string): boolean { + if (modelName.startsWith('gpt-5-chat')) { + // gpt-5-chat-latest does not require reasoning settings + return false; + } + // matches any of gpt-5 models + return modelName.startsWith('gpt-5'); +} + +/** + * Returns True if the default model is a GPT-5 model. + * This is used to determine if the default model settings are compatible with GPT-5 models. + * If the default model is not a GPT-5 model, the model settings are compatible with other models. + */ +export function isGpt5Default(): boolean { + return gpt5ReasoningSettingsRequired(getDefaultModel()); +} + +/** + * Returns the default model name. + */ +export function getDefaultModel(): string { + const env = loadEnv(); + return ( + env[OPENAI_DEFAULT_MODEL_ENV_VARIABLE_NAME]?.toLowerCase() ?? 'gpt-4.1' + ); +} + +/** + * Returns the default model settings. + * If the default model is a GPT-5 model, returns the GPT-5 default model settings. + * Otherwise, returns the legacy default model settings. + */ +export function getDefaultModelSettings(model?: string): ModelSettings { + const _model = model ?? getDefaultModel(); + if (gpt5ReasoningSettingsRequired(_model)) { + return { + providerData: { + // We chose "low" instead of "minimal" because some of the built-in tools + // (e.g., file search, image generation, etc.) do not support "minimal" + // If you want to use "minimal" reasoning effort, you can pass your own model settings + reasoning: { effort: 'low' }, + text: { verbosity: 'low' }, + }, + }; + } + return {}; +} diff --git a/packages/agents-core/src/index.ts b/packages/agents-core/src/index.ts index 6f3fd125..fc5266ca 100644 --- a/packages/agents-core/src/index.ts +++ b/packages/agents-core/src/index.ts @@ -94,6 +94,13 @@ export { SerializedTool, SerializedOutputType, } from './model'; +export { + OPENAI_DEFAULT_MODEL_ENV_VARIABLE_NAME, + gpt5ReasoningSettingsRequired, + getDefaultModel, + getDefaultModelSettings, + isGpt5Default, +} from './defaultModel'; export { setDefaultModelProvider } from './providers'; export { RunResult, StreamedRunResult } from './result'; export { diff --git a/packages/agents-core/src/run.ts b/packages/agents-core/src/run.ts index 982624a3..5b2b4f87 100644 --- a/packages/agents-core/src/run.ts +++ b/packages/agents-core/src/run.ts @@ -54,6 +54,7 @@ import { RunAgentUpdatedStreamEvent, RunRawModelStreamEvent } from './events'; import { RunState } from './runState'; import { StreamEventResponseCompleted } from './types/protocol'; import { convertAgentOutputTypeToSerializable } from './utils/tools'; +import { gpt5ReasoningSettingsRequired, isGpt5Default } from './defaultModel'; const DEFAULT_MAX_TURNS = 10; @@ -369,6 +370,14 @@ export class Runner extends RunHooks> { ...this.config.modelSettings, ...state._currentAgent.modelSettings, }; + const agentModel = state._currentAgent.model; + const agentModelSettings = state._currentAgent.modelSettings; + modelSettings = sanitizeModelSettingsForNonGpt5Runner( + agentModel, + agentModelSettings, + model, + modelSettings, + ); modelSettings = maybeResetToolChoice( state._currentAgent, state._toolUseTracker, @@ -709,6 +718,14 @@ export class Runner extends RunHooks> { ...this.config.modelSettings, ...currentAgent.modelSettings, }; + const agentModel = currentAgent.model; + const agentModelSettings = currentAgent.modelSettings; + modelSettings = sanitizeModelSettingsForNonGpt5Runner( + agentModel, + agentModelSettings, + model, + modelSettings, + ); modelSettings = maybeResetToolChoice( currentAgent, result.state._toolUseTracker, @@ -1029,3 +1046,36 @@ export async function run, TContext = undefined>( return await runner.run(agent, input, options); } } + +/** + * When the default model is a GPT-5 variant, agents may carry GPT-5-specific providerData + * (e.g., reasoning effort, text verbosity). If a run resolves to a non-GPT-5 model and the + * agent relied on the default model (i.e., no explicit model set), these GPT-5-only settings + * are incompatible and should be stripped to avoid runtime errors. + */ +function sanitizeModelSettingsForNonGpt5Runner( + agentModel: string | Model, + agentModelSettings: ModelSettings, + runnerModel: string | Model, + modelSettings: ModelSettings, +): ModelSettings { + if ( + // gpt-5 is enabled for the default model for agents + isGpt5Default() && + // no explicitly set model for the agent + typeof agentModel === 'string' && + agentModel === Agent.DEFAULT_MODEL_PLACEHOLDER && + // this runner uses a non-gpt-5 model + (typeof runnerModel !== 'string' || + !gpt5ReasoningSettingsRequired(runnerModel)) && + (agentModelSettings.providerData?.reasoning || + agentModelSettings.providerData?.text?.verbosity || + (agentModelSettings.providerData as any)?.reasoning_effort) + ) { + // the incompatible parameters should be removed to avoid runtime errors + delete modelSettings.providerData?.reasoning; + delete (modelSettings.providerData as any)?.text?.verbosity; + delete (modelSettings.providerData as any)?.reasoning_effort; + } + return modelSettings; +} diff --git a/packages/agents-openai/src/openaiProvider.ts b/packages/agents-openai/src/openaiProvider.ts index 396b7d64..69995565 100644 --- a/packages/agents-openai/src/openaiProvider.ts +++ b/packages/agents-openai/src/openaiProvider.ts @@ -1,7 +1,6 @@ -import { Model, ModelProvider } from '@openai/agents-core'; +import { Model, ModelProvider, getDefaultModel } from '@openai/agents-core'; import OpenAI from 'openai'; import { - DEFAULT_OPENAI_MODEL, getDefaultOpenAIClient, getDefaultOpenAIKey, shouldUseResponsesByDefault, @@ -65,7 +64,7 @@ export class OpenAIProvider implements ModelProvider { } async getModel(modelName?: string | undefined): Promise { - const model = modelName || DEFAULT_OPENAI_MODEL; + const model = modelName || getDefaultModel(); const useResponses = this.#useResponses ?? shouldUseResponsesByDefault(); if (useResponses) { From ac97264997bbc4f664929518019d9e4842991ac7 Mon Sep 17 00:00:00 2001 From: Kazuhiro Sera Date: Thu, 21 Aug 2025 23:29:27 +0900 Subject: [PATCH 2/3] add changeset --- .changeset/two-squids-smell.md | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 .changeset/two-squids-smell.md diff --git a/.changeset/two-squids-smell.md b/.changeset/two-squids-smell.md new file mode 100644 index 00000000..75d4f2d2 --- /dev/null +++ b/.changeset/two-squids-smell.md @@ -0,0 +1,6 @@ +--- +'@openai/agents-openai': patch +'@openai/agents-core': patch +--- + +Add a quick opt-in option to switch to gpt-5 From 04a1b3010b5e466fc5bda4636e3312c8e6873652 Mon Sep 17 00:00:00 2001 From: Kazuhiro Sera Date: Mon, 25 Aug 2025 20:53:43 +0900 Subject: [PATCH 3/3] GPT-5 full support --- examples/handoffs/index.ts | 7 +++ examples/mcp/hosted-mcp-human-in-the-loop.ts | 3 +- examples/mcp/hosted-mcp-on-approval.ts | 3 +- examples/mcp/hosted-mcp-simple.ts | 3 +- .../streamable-http-custom-fetch-example.ts | 49 ------------------- examples/mcp/streamable-http-example.ts | 2 +- .../model-providers/custom-example-agent.ts | 3 +- examples/tools/file-search.ts | 23 ++++++++- examples/tools/image-generation.ts | 2 +- packages/agents-core/src/run.ts | 26 +++++----- packages/agents-core/src/runImplementation.ts | 2 +- .../test/runImplementation.test.ts | 43 ++++++++++++++++ 12 files changed, 98 insertions(+), 68 deletions(-) delete mode 100644 examples/mcp/streamable-http-custom-fetch-example.ts diff --git a/examples/handoffs/index.ts b/examples/handoffs/index.ts index 20509dcc..6ca808d4 100644 --- a/examples/handoffs/index.ts +++ b/examples/handoffs/index.ts @@ -5,6 +5,7 @@ import { HandoffInputData, handoff, withTrace, + isGpt5Default, } from '@openai/agents'; import { removeAllTools } from '@openai/agents-core/extensions'; import { z } from 'zod'; @@ -23,6 +24,12 @@ const randomNumberTool = tool({ // Message filter for handoff (removes tool messages and first two history items) function spanishHandoffMessageFilter(handoffMessageData: HandoffInputData) { + if (isGpt5Default()) { + console.log( + 'GPT-5 models do not work if you remove the toll call results, so this filter does nothing.', + ); + return handoffMessageData; + } // Remove all tool-related messages return removeAllTools(handoffMessageData); } diff --git a/examples/mcp/hosted-mcp-human-in-the-loop.ts b/examples/mcp/hosted-mcp-human-in-the-loop.ts index 7fbf186b..f43ec77e 100644 --- a/examples/mcp/hosted-mcp-human-in-the-loop.ts +++ b/examples/mcp/hosted-mcp-human-in-the-loop.ts @@ -21,7 +21,8 @@ async function main(verbose: boolean, stream: boolean): Promise { }; const agent = new Agent({ name: 'MCP Assistant', - instructions: 'You must always use the MCP tools to answer questions.', + instructions: + 'You must always use the MCP tools to answer questions. The mcp server knows which repo to investigate, so you do not need to ask the user about it.', tools: [ hostedMcpTool({ serverLabel: 'gitmcp', diff --git a/examples/mcp/hosted-mcp-on-approval.ts b/examples/mcp/hosted-mcp-on-approval.ts index bb4a9e59..3deb40af 100644 --- a/examples/mcp/hosted-mcp-on-approval.ts +++ b/examples/mcp/hosted-mcp-on-approval.ts @@ -25,7 +25,8 @@ async function main(verbose: boolean, stream: boolean): Promise { }; const agent = new Agent({ name: 'MCP Assistant', - instructions: 'You must always use the MCP tools to answer questions.', + instructions: + 'You must always use the MCP tools to answer questions. The mcp server knows which repo to investigate, so you do not need to ask the user about it.', tools: [ hostedMcpTool({ serverLabel: 'gitmcp', diff --git a/examples/mcp/hosted-mcp-simple.ts b/examples/mcp/hosted-mcp-simple.ts index afb407cd..a917beae 100644 --- a/examples/mcp/hosted-mcp-simple.ts +++ b/examples/mcp/hosted-mcp-simple.ts @@ -4,7 +4,8 @@ async function main(verbose: boolean, stream: boolean): Promise { withTrace('Hosted MCP Example', async () => { const agent = new Agent({ name: 'MCP Assistant', - instructions: 'You must always use the MCP tools to answer questions.', + instructions: + 'You must always use the MCP tools to answer questions. The mcp server knows which repo to investigate, so you do not need to ask the user about it.', tools: [ hostedMcpTool({ serverLabel: 'gitmcp', diff --git a/examples/mcp/streamable-http-custom-fetch-example.ts b/examples/mcp/streamable-http-custom-fetch-example.ts deleted file mode 100644 index 7bf9235a..00000000 --- a/examples/mcp/streamable-http-custom-fetch-example.ts +++ /dev/null @@ -1,49 +0,0 @@ -import { Agent, run, MCPServerStreamableHttp, withTrace } from '@openai/agents'; - -async function main() { - // Example of using a custom fetch implementation - const customFetch = async (url: string | URL, init?: RequestInit) => { - console.log(`Custom fetch called for URL: ${url}`); - // You could add custom headers, logging, retries, etc. here - const response = await fetch(url, { - ...init, - headers: { - ...init?.headers, - 'User-Agent': 'MyCustomAgent/1.0', - 'X-Custom-Header': 'custom-value', - }, - }); - console.log(`Response status: ${response.status}`); - return response; - }; - - const mcpServer = new MCPServerStreamableHttp({ - url: 'https://gitmcp.io/openai/codex', - name: 'GitMCP Documentation Server', - fetch: customFetch, // Pass custom fetch implementation - }); - - const agent = new Agent({ - name: 'GitMCP Assistant', - instructions: 'Use the tools to respond to user requests.', - mcpServers: [mcpServer], - }); - - try { - await withTrace('GitMCP Documentation Server Example with Custom Fetch', async () => { - await mcpServer.connect(); - const result = await run( - agent, - 'Which language is this repo written in?', - ); - console.log(result.finalOutput); - }); - } finally { - await mcpServer.close(); - } -} - -main().catch((err) => { - console.error(err); - process.exit(1); -}); diff --git a/examples/mcp/streamable-http-example.ts b/examples/mcp/streamable-http-example.ts index 5a685337..a651907e 100644 --- a/examples/mcp/streamable-http-example.ts +++ b/examples/mcp/streamable-http-example.ts @@ -16,7 +16,7 @@ async function main() { await mcpServer.connect(); const result = await run( agent, - 'Which language is this repo written in?', + 'Which language is this repo written in? The MCP server knows which repo to investigate.', ); console.log(result.finalOutput); }); diff --git a/examples/model-providers/custom-example-agent.ts b/examples/model-providers/custom-example-agent.ts index c7556be1..3d55a4a1 100644 --- a/examples/model-providers/custom-example-agent.ts +++ b/examples/model-providers/custom-example-agent.ts @@ -13,6 +13,7 @@ const getWeatherTool = tool({ description: 'Get the weather for a given city', parameters: z.object({ city: z.string() }), execute: async (input) => { + console.log(`[debug] Getting weather for ${input.city}\n`); return `The weather in ${input.city} is sunny`; }, }); @@ -28,7 +29,7 @@ const agent = new Agent({ async function main() { await withTrace('ChatCompletions Assistant Example', async () => { const result = await run(agent, "What's the weather in Tokyo?"); - console.log(`\n\nFinal response:\n${result.finalOutput}`); + console.log(`[Final response]\n${result.finalOutput}`); }); } diff --git a/examples/tools/file-search.ts b/examples/tools/file-search.ts index 0c5021a2..592c7ea5 100644 --- a/examples/tools/file-search.ts +++ b/examples/tools/file-search.ts @@ -1,11 +1,32 @@ import { Agent, run, fileSearchTool, withTrace } from '@openai/agents'; +import OpenAI, { toFile } from 'openai'; async function main() { + const client = new OpenAI({ + apiKey: process.env.OPENAI_API_KEY, + }); + + const text = `Arrakis, the desert planet in Frank Herbert's "Dune," was inspired by the scarcity of water + as a metaphor for oil and other finite resources.`; + const upload = await client.files.create({ + file: await toFile(Buffer.from(text, 'utf-8'), 'cafe.txt'), + purpose: 'assistants', + }); + const vectorStore = await client.vectorStores.create({ + name: 'Arrakis', + }); + console.log(vectorStore); + const indexed = await client.vectorStores.files.createAndPoll( + vectorStore.id, + { file_id: upload.id }, + ); + console.log(indexed); + const agent = new Agent({ name: 'File searcher', instructions: 'You are a helpful agent.', tools: [ - fileSearchTool(['vs_67bf88953f748191be42b462090e53e7'], { + fileSearchTool([vectorStore.id], { maxNumResults: 3, includeSearchResults: true, }), diff --git a/examples/tools/image-generation.ts b/examples/tools/image-generation.ts index 13b4e0f3..a92dc23a 100644 --- a/examples/tools/image-generation.ts +++ b/examples/tools/image-generation.ts @@ -25,7 +25,7 @@ async function main() { console.log('Generating image, this may take a while...'); const result = await run( agent, - 'Create an image of a frog eating a pizza, comic book style.', + 'Create an image of a frog eating a pizza, comic book style. Return a text description of the image as a message too.', ); console.log(result.finalOutput); diff --git a/packages/agents-core/src/run.ts b/packages/agents-core/src/run.ts index 5b2b4f87..987fded3 100644 --- a/packages/agents-core/src/run.ts +++ b/packages/agents-core/src/run.ts @@ -254,6 +254,10 @@ export class Runner extends RunHooks> { try { while (true) { + const explictlyModelSet = + (state._currentAgent.model !== undefined && + state._currentAgent.model !== '') || + (this.config.model !== undefined && this.config.model !== ''); let model = selectModel(state._currentAgent.model, this.config.model); if (typeof model === 'string') { @@ -370,10 +374,9 @@ export class Runner extends RunHooks> { ...this.config.modelSettings, ...state._currentAgent.modelSettings, }; - const agentModel = state._currentAgent.model; const agentModelSettings = state._currentAgent.modelSettings; - modelSettings = sanitizeModelSettingsForNonGpt5Runner( - agentModel, + modelSettings = adjustModelSettingsForNonGPT5RunnerModel( + explictlyModelSet, agentModelSettings, model, modelSettings, @@ -704,6 +707,9 @@ export class Runner extends RunHooks> { `Running agent ${currentAgent.name} (turn ${result.state._currentTurn})`, ); + const explictlyModelSet = + (currentAgent.model !== undefined && currentAgent.model !== '') || + (this.config.model !== undefined && this.config.model !== ''); let model = selectModel(currentAgent.model, this.config.model); if (typeof model === 'string') { @@ -718,10 +724,9 @@ export class Runner extends RunHooks> { ...this.config.modelSettings, ...currentAgent.modelSettings, }; - const agentModel = currentAgent.model; const agentModelSettings = currentAgent.modelSettings; - modelSettings = sanitizeModelSettingsForNonGpt5Runner( - agentModel, + modelSettings = adjustModelSettingsForNonGPT5RunnerModel( + explictlyModelSet, agentModelSettings, model, modelSettings, @@ -1053,8 +1058,8 @@ export async function run, TContext = undefined>( * agent relied on the default model (i.e., no explicit model set), these GPT-5-only settings * are incompatible and should be stripped to avoid runtime errors. */ -function sanitizeModelSettingsForNonGpt5Runner( - agentModel: string | Model, +function adjustModelSettingsForNonGPT5RunnerModel( + explictlyModelSet: boolean, agentModelSettings: ModelSettings, runnerModel: string | Model, modelSettings: ModelSettings, @@ -1062,9 +1067,8 @@ function sanitizeModelSettingsForNonGpt5Runner( if ( // gpt-5 is enabled for the default model for agents isGpt5Default() && - // no explicitly set model for the agent - typeof agentModel === 'string' && - agentModel === Agent.DEFAULT_MODEL_PLACEHOLDER && + // explicitly set model for the agent + explictlyModelSet && // this runner uses a non-gpt-5 model (typeof runnerModel !== 'string' || !gpt5ReasoningSettingsRequired(runnerModel)) && diff --git a/packages/agents-core/src/runImplementation.ts b/packages/agents-core/src/runImplementation.ts index 78460d2b..3acff6d9 100644 --- a/packages/agents-core/src/runImplementation.ts +++ b/packages/agents-core/src/runImplementation.ts @@ -573,7 +573,7 @@ export async function executeToolsAndSideEffects( : undefined; // if there is no output we just run again - if (!potentialFinalOutput) { + if (typeof potentialFinalOutput === 'undefined') { return new SingleStepResult( originalInput, newResponse, diff --git a/packages/agents-core/test/runImplementation.test.ts b/packages/agents-core/test/runImplementation.test.ts index 4f6e9d57..cb243c2d 100644 --- a/packages/agents-core/test/runImplementation.test.ts +++ b/packages/agents-core/test/runImplementation.test.ts @@ -930,4 +930,47 @@ describe('executeToolsAndSideEffects', () => { expect(result.nextStep.output).toBe('Hello World'); } }); + + it('returns final output when final message text is empty', async () => { + const textAgent = new Agent({ name: 'TextAgent', outputType: 'text' }); + const imageCall: protocol.HostedToolCallItem = { + type: 'hosted_tool_call', + id: 'img1', + name: 'image_generation_call', + status: 'completed', + output: 'iVBORw0KGgoAAAANSUhEUgAABAAAAAYACAIAAABn4K39AAHH1....', // base64 encoded image + providerData: { type: 'image_generation_call' }, + }; + const emptyMessage: protocol.AssistantMessageItem = { + id: 'msg1', + type: 'message', + role: 'assistant', + status: 'completed', + content: [{ type: 'output_text', text: '' }], + }; + const response: ModelResponse = { + output: [imageCall, emptyMessage], + usage: new Usage(), + } as any; + const processedResponse = processModelResponse(response, textAgent, [], []); + + expect(processedResponse.hasToolsOrApprovalsToRun()).toBe(false); + + const result = await withTrace('test', () => + executeToolsAndSideEffects( + textAgent, + 'test input', + [], + response, + processedResponse, + runner, + state, + ), + ); + + expect(result.nextStep.type).toBe('next_step_final_output'); + if (result.nextStep.type === 'next_step_final_output') { + expect(result.nextStep.output).toBe(''); + } + }); });