Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions .changeset/plain-breads-enjoy.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
---
'@openai/agents-openai': patch
'@openai/agents-core': patch
---

fix: Omit tools parameter when prompt ID is set but tools in the agent is absent
6 changes: 6 additions & 0 deletions packages/agents-core/src/agent.ts
Original file line number Diff line number Diff line change
Expand Up @@ -382,6 +382,7 @@ export class Agent<
outputType: TOutput = 'text' as TOutput;
toolUseBehavior: ToolUseBehavior;
resetToolChoice: boolean;
private readonly _toolsExplicitlyConfigured: boolean;

constructor(config: AgentOptions<TContext, TOutput>) {
super();
Expand All @@ -396,6 +397,7 @@ export class Agent<
this.model = config.model ?? '';
this.modelSettings = config.modelSettings ?? getDefaultModelSettings();
this.tools = config.tools ?? [];
this._toolsExplicitlyConfigured = config.tools !== undefined;
this.mcpServers = config.mcpServers ?? [];
this.inputGuardrails = config.inputGuardrails ?? [];
this.outputGuardrails = config.outputGuardrails ?? [];
Expand Down Expand Up @@ -679,6 +681,10 @@ export class Agent<
return [...mcpTools, ...enabledTools];
}

hasExplicitToolConfig(): boolean {
return this._toolsExplicitlyConfigured;
}

/**
* Returns the handoffs that should be exposed to the model for the current run.
*
Expand Down
7 changes: 7 additions & 0 deletions packages/agents-core/src/model.ts
Original file line number Diff line number Diff line change
Expand Up @@ -263,6 +263,13 @@ export type ModelRequest = {
*/
tools: SerializedTool[];

/**
* When true, the caller explicitly configured the tools list (even if empty).
* Providers can use this to avoid overwriting prompt-defined tools when an agent
* does not specify its own tools.
*/
toolsExplicitlyProvided?: boolean;

/**
* The type of the output to use for the model.
*/
Expand Down
4 changes: 4 additions & 0 deletions packages/agents-core/src/run.ts
Original file line number Diff line number Diff line change
Expand Up @@ -779,6 +779,7 @@ export class Runner extends RunHooks<any, AgentOutputType<unknown>> {
conversationId: preparedCall.conversationId,
modelSettings: preparedCall.modelSettings,
tools: preparedCall.serializedTools,
toolsExplicitlyProvided: preparedCall.toolsExplicitlyProvided,
outputType: convertAgentOutputTypeToSerializable(
state._currentAgent.outputType,
),
Expand Down Expand Up @@ -1052,6 +1053,7 @@ export class Runner extends RunHooks<any, AgentOutputType<unknown>> {
conversationId: preparedCall.conversationId,
modelSettings: preparedCall.modelSettings,
tools: preparedCall.serializedTools,
toolsExplicitlyProvided: preparedCall.toolsExplicitlyProvided,
handoffs: preparedCall.serializedHandoffs,
outputType: convertAgentOutputTypeToSerializable(
currentAgent.outputType,
Expand Down Expand Up @@ -1946,6 +1948,7 @@ type AgentArtifacts<TContext = unknown> = {
tools: Tool<TContext>[];
serializedHandoffs: SerializedHandoff[];
serializedTools: SerializedTool[];
toolsExplicitlyProvided: boolean;
};

/**
Expand Down Expand Up @@ -1980,6 +1983,7 @@ async function prepareAgentArtifacts<
tools,
serializedHandoffs: handoffs.map((handoff) => serializeHandoff(handoff)),
serializedTools: tools.map((tool) => serializeTool(tool)),
toolsExplicitlyProvided: state._currentAgent.hasExplicitToolConfig(),
};
}

Expand Down
7 changes: 6 additions & 1 deletion packages/agents-openai/src/openaiResponsesModel.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1599,12 +1599,17 @@ export class OpenAIResponsesModel implements Model {
const shouldSendModel =
!request.prompt || request.overridePromptModel === true;

const shouldSendTools =
tools.length > 0 ||
request.toolsExplicitlyProvided === true ||
!request.prompt;

const requestData = {
...(shouldSendModel ? { model: this.#model } : {}),
instructions: normalizeInstructions(request.systemInstructions),
input,
include,
tools,
...(shouldSendTools ? { tools } : {}),
previous_response_id: request.previousResponseId,
conversation: request.conversationId,
prompt,
Expand Down
92 changes: 92 additions & 0 deletions packages/agents-openai/test/openaiResponsesModel.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,36 @@ describe('OpenAIResponsesModel', () => {
});
});

it('still sends an empty tools array when no prompt is provided', async () => {
await withTrace('test', async () => {
const fakeResponse = { id: 'res-no-prompt', usage: {}, output: [] };
const createMock = vi.fn().mockResolvedValue(fakeResponse);
const fakeClient = {
responses: { create: createMock },
} as unknown as OpenAI;
const model = new OpenAIResponsesModel(fakeClient, 'gpt-default');

const request = {
systemInstructions: undefined,
input: 'hello',
modelSettings: {},
tools: [],
toolsExplicitlyProvided: false,
outputType: 'text',
handoffs: [],
tracing: false,
signal: undefined,
};

await model.getResponse(request as any);

expect(createMock).toHaveBeenCalledTimes(1);
const [args] = createMock.mock.calls[0];
expect(args.tools).toEqual([]);
expect(args.prompt).toBeUndefined();
});
});

it('omits model when a prompt is provided', async () => {
await withTrace('test', async () => {
const fakeResponse = { id: 'res-prompt', usage: {}, output: [] };
Expand Down Expand Up @@ -135,6 +165,68 @@ describe('OpenAIResponsesModel', () => {
});
});

it('omits tools when agent did not configure any and prompt should supply them', async () => {
await withTrace('test', async () => {
const fakeResponse = { id: 'res-no-tools', usage: {}, output: [] };
const createMock = vi.fn().mockResolvedValue(fakeResponse);
const fakeClient = {
responses: { create: createMock },
} as unknown as OpenAI;
const model = new OpenAIResponsesModel(fakeClient, 'gpt-default');

const request = {
systemInstructions: undefined,
prompt: { promptId: 'pmpt_789' },
input: 'hello',
modelSettings: {},
tools: [],
toolsExplicitlyProvided: false,
outputType: 'text',
handoffs: [],
tracing: false,
signal: undefined,
};

await model.getResponse(request as any);

expect(createMock).toHaveBeenCalledTimes(1);
const [args] = createMock.mock.calls[0];
expect('tools' in args).toBe(false);
expect(args.prompt).toMatchObject({ id: 'pmpt_789' });
});
});

it('sends an explicit empty tools array when the agent intentionally disabled tools', async () => {
await withTrace('test', async () => {
const fakeResponse = { id: 'res-empty-tools', usage: {}, output: [] };
const createMock = vi.fn().mockResolvedValue(fakeResponse);
const fakeClient = {
responses: { create: createMock },
} as unknown as OpenAI;
const model = new OpenAIResponsesModel(fakeClient, 'gpt-default');

const request = {
systemInstructions: undefined,
prompt: { promptId: 'pmpt_999' },
input: 'hello',
modelSettings: {},
tools: [],
toolsExplicitlyProvided: true,
outputType: 'text',
handoffs: [],
tracing: false,
signal: undefined,
};

await model.getResponse(request as any);

expect(createMock).toHaveBeenCalledTimes(1);
const [args] = createMock.mock.calls[0];
expect(args.tools).toEqual([]);
expect(args.prompt).toMatchObject({ id: 'pmpt_999' });
});
});

it('normalizes systemInstructions so empty strings are omitted', async () => {
await withTrace('test', async () => {
const fakeResponse = {
Expand Down