Skip to content

Commit 5f28b2c

Browse files
arvinxxclaude
andauthored
🐛 fix: filter out reasoning fields from messages in ChatCompletion API (#10203)
* fix max tokens issue * 🐛 fix: filter out reasoning fields from messages in ChatCompletion API Explicitly map only valid ChatCompletionMessageParam fields and exclude reasoning/reasoning_content to prevent JSON unmarshaling errors when conversation history contains reasoning objects. Fixes #10193 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com> --------- Co-authored-by: Claude <noreply@anthropic.com>
1 parent 428f05a commit 5f28b2c

File tree

4 files changed

+129
-12
lines changed

4 files changed

+129
-12
lines changed

packages/model-runtime/src/core/contextBuilders/openai.test.ts

Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -150,6 +150,64 @@ describe('convertOpenAIMessages', () => {
150150

151151
expect(Promise.all).toHaveBeenCalledTimes(2); // 一次用于消息数组,一次用于内容数组
152152
});
153+
154+
it('should filter out reasoning field from messages', async () => {
155+
const messages = [
156+
{
157+
role: 'assistant',
158+
content: 'Hello',
159+
reasoning: { content: 'some reasoning', duration: 100 },
160+
},
161+
{ role: 'user', content: 'Hi' },
162+
] as any;
163+
164+
const result = await convertOpenAIMessages(messages);
165+
166+
expect(result).toEqual([
167+
{ role: 'assistant', content: 'Hello' },
168+
{ role: 'user', content: 'Hi' },
169+
]);
170+
// Ensure reasoning field is removed
171+
expect((result[0] as any).reasoning).toBeUndefined();
172+
});
173+
174+
it('should filter out reasoning_content field from messages', async () => {
175+
const messages = [
176+
{
177+
role: 'assistant',
178+
content: 'Hello',
179+
reasoning_content: 'some reasoning content',
180+
},
181+
{ role: 'user', content: 'Hi' },
182+
] as any;
183+
184+
const result = await convertOpenAIMessages(messages);
185+
186+
expect(result).toEqual([
187+
{ role: 'assistant', content: 'Hello' },
188+
{ role: 'user', content: 'Hi' },
189+
]);
190+
// Ensure reasoning_content field is removed
191+
expect((result[0] as any).reasoning_content).toBeUndefined();
192+
});
193+
194+
it('should filter out both reasoning and reasoning_content fields from messages', async () => {
195+
const messages = [
196+
{
197+
role: 'assistant',
198+
content: 'Hello',
199+
reasoning: { content: 'some reasoning', duration: 100 },
200+
reasoning_content: 'some reasoning content',
201+
},
202+
] as any;
203+
204+
const result = await convertOpenAIMessages(messages);
205+
206+
expect(result).toEqual([{ role: 'assistant', content: 'Hello' }]);
207+
// Ensure both fields are removed
208+
expect((result[0] as any).reasoning).toBeUndefined();
209+
expect((result[0] as any).reasoning_content).toBeUndefined();
210+
});
153211
});
154212

155213
describe('convertOpenAIResponseInputs', () => {

packages/model-runtime/src/core/contextBuilders/openai.ts

Lines changed: 24 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -26,17 +26,31 @@ export const convertMessageContent = async (
2626

2727
export const convertOpenAIMessages = async (messages: OpenAI.ChatCompletionMessageParam[]) => {
2828
return (await Promise.all(
29-
messages.map(async (message) => ({
30-
...message,
31-
content:
32-
typeof message.content === 'string'
33-
? message.content
34-
: await Promise.all(
35-
(message.content || []).map((c) =>
36-
convertMessageContent(c as OpenAI.ChatCompletionContentPart),
29+
messages.map(async (message) => {
30+
const msg = message as any;
31+
32+
// Explicitly map only valid ChatCompletionMessageParam fields
33+
// Exclude reasoning and reasoning_content fields as they should not be sent in requests
34+
const result: any = {
35+
content:
36+
typeof message.content === 'string'
37+
? message.content
38+
: await Promise.all(
39+
(message.content || []).map((c) =>
40+
convertMessageContent(c as OpenAI.ChatCompletionContentPart),
41+
),
3742
),
38-
),
39-
})),
43+
role: msg.role,
44+
};
45+
46+
// Add optional fields if they exist
47+
if (msg.name !== undefined) result.name = msg.name;
48+
if (msg.tool_calls !== undefined) result.tool_calls = msg.tool_calls;
49+
if (msg.tool_call_id !== undefined) result.tool_call_id = msg.tool_call_id;
50+
if (msg.function_call !== undefined) result.function_call = msg.function_call;
51+
52+
return result;
53+
}),
4054
)) as OpenAI.ChatCompletionMessageParam[];
4155
};
4256

packages/model-runtime/src/core/openaiCompatibleFactory/index.ts

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -766,12 +766,12 @@ export const createOpenAICompatibleRuntime = <T extends Record<string, any> = an
766766

767767
const inputStartAt = Date.now();
768768

769-
const { messages, reasoning_effort, tools, reasoning, responseMode, ...res } =
769+
const { messages, reasoning_effort, tools, reasoning, responseMode, max_tokens, ...res } =
770770
responses?.handlePayload
771771
? (responses?.handlePayload(payload, this._options) as ChatStreamPayload)
772772
: payload;
773773

774-
// remove penalty params
774+
// remove penalty params and chat completion specific params
775775
delete res.apiMode;
776776
delete res.frequency_penalty;
777777
delete res.presence_penalty;
@@ -797,6 +797,7 @@ export const createOpenAICompatibleRuntime = <T extends Record<string, any> = an
797797
}
798798
: {}),
799799
input,
800+
...(max_tokens && { max_output_tokens: max_tokens }),
800801
store: false,
801802
stream: !isStreaming ? undefined : isStreaming,
802803
tools: tools?.map((tool) => this.convertChatCompletionToolToResponseTool(tool)),

packages/model-runtime/src/providers/openai/index.test.ts

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -409,6 +409,50 @@ describe('LobeOpenAI', () => {
409409
const createCall = (instance['client'].responses.create as Mock).mock.calls[0][0];
410410
expect(createCall.reasoning).toEqual({ effort: 'high', summary: 'auto' });
411411
});
412+
413+
it('should convert max_tokens to max_output_tokens for responses API', async () => {
414+
const payload = {
415+
max_tokens: 2048,
416+
messages: [{ content: 'Hello', role: 'user' as const }],
417+
model: 'o1-pro',
418+
temperature: 0.7,
419+
};
420+
421+
await instance.chat(payload);
422+
423+
const createCall = (instance['client'].responses.create as Mock).mock.calls[0][0];
424+
expect(createCall.max_output_tokens).toBe(2048);
425+
expect(createCall.max_tokens).toBeUndefined();
426+
});
427+
428+
it('should not include max_output_tokens when max_tokens is undefined', async () => {
429+
const payload = {
430+
messages: [{ content: 'Hello', role: 'user' as const }],
431+
model: 'o1-pro',
432+
temperature: 0.7,
433+
};
434+
435+
await instance.chat(payload);
436+
437+
const createCall = (instance['client'].responses.create as Mock).mock.calls[0][0];
438+
expect(createCall.max_output_tokens).toBeUndefined();
439+
});
440+
441+
it('should convert max_tokens to max_output_tokens for search-enabled models', async () => {
442+
const payload = {
443+
enabledSearch: true,
444+
max_tokens: 4096,
445+
messages: [{ content: 'Hello', role: 'user' as const }],
446+
model: 'gpt-4o',
447+
temperature: 0.7,
448+
};
449+
450+
await instance.chat(payload);
451+
452+
const createCall = (instance['client'].responses.create as Mock).mock.calls[0][0];
453+
expect(createCall.max_output_tokens).toBe(4096);
454+
expect(createCall.max_tokens).toBeUndefined();
455+
});
412456
});
413457

414458
describe('supportsFlexTier', () => {

0 commit comments

Comments
 (0)