diff --git a/.changeset/eleven-bananas-grow.md b/.changeset/eleven-bananas-grow.md new file mode 100644 index 00000000..23864f26 --- /dev/null +++ b/.changeset/eleven-bananas-grow.md @@ -0,0 +1,5 @@ +--- +'@openai/agents-openai': patch +--- + +fix: #558 prompt parameter does not work when being passed via an Agent diff --git a/examples/basic/package.json b/examples/basic/package.json index 5530b7f7..57fbc667 100644 --- a/examples/basic/package.json +++ b/examples/basic/package.json @@ -24,6 +24,7 @@ "start:stream-text": "tsx stream-text.ts", "start:json-schema-output-type": "tsx json-schema-output-type.ts", "start:tool-use-behavior": "tsx tool-use-behavior.ts", + "start:prompt-id": "tsx prompt-id.ts", "start:tools": "tsx tools.ts", "start:reasoning": "tsx reasoning.ts", "start:local-file": "tsx local-file.ts", diff --git a/examples/basic/prompt-id.ts b/examples/basic/prompt-id.ts index 5bb419be..94c77281 100644 --- a/examples/basic/prompt-id.ts +++ b/examples/basic/prompt-id.ts @@ -4,15 +4,13 @@ async function main() { const agent = new Agent({ name: 'Assistant', prompt: { - promptId: 'pmpt_684b3b772e648193b92404d7d0101d8a07f7a7903e519946', + promptId: 'pmpt_68d50b26524c81958c1425070180b5e10ab840669e470fc7', version: '1', - variables: { - poem_style: 'limerick', - }, + variables: { name: 'Kaz' }, }, }); - const result = await run(agent, 'Write about unrequited love.'); + const result = await run(agent, 'What is your name?'); console.log(result.finalOutput); } diff --git a/packages/agents-openai/src/openaiResponsesModel.ts b/packages/agents-openai/src/openaiResponsesModel.ts index bf3b0594..772a74d7 100644 --- a/packages/agents-openai/src/openaiResponsesModel.ts +++ b/packages/agents-openai/src/openaiResponsesModel.ts @@ -878,8 +878,8 @@ export class OpenAIResponsesModel implements Model { } const requestData = { - instructions: request.systemInstructions, model: this.#model, + instructions: normalizeInstructions(request.systemInstructions), input, include, tools, @@ -1051,3 +1051,21 @@ export class OpenAIResponsesModel implements Model { } } } + +/** + * Sending an empty string for instructions can override the prompt parameter. + * Thus, this method checks if the instructions is an empty string and returns undefined if it is. + * @param instructions - The instructions to normalize. + * @returns The normalized instructions. + */ +function normalizeInstructions( + instructions: string | undefined, +): string | undefined { + if (typeof instructions === 'string') { + if (instructions.trim() === '') { + return undefined; + } + return instructions; + } + return undefined; +} diff --git a/packages/agents-openai/test/openaiResponsesModel.test.ts b/packages/agents-openai/test/openaiResponsesModel.test.ts index f45bdbd4..5d33d76b 100644 --- a/packages/agents-openai/test/openaiResponsesModel.test.ts +++ b/packages/agents-openai/test/openaiResponsesModel.test.ts @@ -13,7 +13,7 @@ describe('OpenAIResponsesModel', () => { setTracingDisabled(true); }); it('getResponse returns correct ModelResponse and calls client with right parameters', async () => { - withTrace('test', async () => { + await withTrace('test', async () => { const fakeResponse = { id: 'res1', usage: { @@ -74,8 +74,67 @@ describe('OpenAIResponsesModel', () => { }); }); + it('normalizes systemInstructions so empty strings are omitted', async () => { + await withTrace('test', async () => { + const fakeResponse = { + id: 'res-empty-instructions', + usage: { + input_tokens: 0, + output_tokens: 0, + total_tokens: 0, + }, + output: [], + }; + for (const systemInstructions of ['', ' ']) { + const request = { + systemInstructions, + input: 'hello', + modelSettings: {}, + tools: [], + outputType: 'text', + handoffs: [], + tracing: false, + signal: undefined, + }; + const createMock = vi.fn().mockResolvedValue(fakeResponse); + await new OpenAIResponsesModel( + { responses: { create: createMock } } as unknown as OpenAI, + 'gpt-test', + ).getResponse(request as any); + + expect(createMock).toHaveBeenCalledTimes(1); + const [args] = createMock.mock.calls[0]; + expect('instructions' in args).toBe(true); + expect(args.instructions).toBeUndefined(); + } + + for (const systemInstructions of [' a ', 'foo']) { + const request = { + systemInstructions, + input: 'hello', + modelSettings: {}, + tools: [], + outputType: 'text', + handoffs: [], + tracing: false, + signal: undefined, + }; + const createMock = vi.fn().mockResolvedValue(fakeResponse); + await new OpenAIResponsesModel( + { responses: { create: createMock } } as unknown as OpenAI, + 'gpt-test', + ).getResponse(request as any); + + expect(createMock).toHaveBeenCalledTimes(1); + const [args] = createMock.mock.calls[0]; + expect('instructions' in args).toBe(true); + expect(args.instructions).toBe(systemInstructions); + } + }); + }); + it('merges top-level reasoning and text settings into provider data for Responses API', async () => { - withTrace('test', async () => { + await withTrace('test', async () => { const fakeResponse = { id: 'res-settings', usage: { @@ -134,7 +193,7 @@ describe('OpenAIResponsesModel', () => { }); it('getStreamedResponse yields events and calls client with stream flag', async () => { - withTrace('test', async () => { + await withTrace('test', async () => { const fakeResponse = { id: 'res2', usage: {}, output: [] }; const events: ResponseStreamEvent[] = [ { type: 'response.created', response: fakeResponse as any },