Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,10 @@ function getOpenAIMetadata(message: { providerOptions?: SharedV3ProviderOptions
return message?.providerOptions?.copilot ?? {}
}

function getOpenAICompatibleMetadata(message: { providerOptions?: SharedV3ProviderOptions }) {
return message?.providerOptions?.openaiCompatible ?? {}
}

export function convertToOpenAICompatibleChatMessages(prompt: LanguageModelV3Prompt): OpenAICompatibleChatPrompt {
const messages: OpenAICompatibleChatPrompt = []
for (const { role, content, ...message } of prompt) {
Expand Down Expand Up @@ -74,6 +78,10 @@ export function convertToOpenAICompatibleChatMessages(prompt: LanguageModelV3Pro
let text = ""
let reasoningText: string | undefined
let reasoningOpaque: string | undefined
const openAICompatibleMetadata = getOpenAICompatibleMetadata({ ...message }) as {
reasoning_content?: string
reasoning_details?: string
}
const toolCalls: Array<{
id: string
type: "function"
Expand Down Expand Up @@ -117,6 +125,14 @@ export function convertToOpenAICompatibleChatMessages(prompt: LanguageModelV3Pro
role: "assistant",
content: text || null,
tool_calls: toolCalls.length > 0 ? toolCalls : undefined,
reasoning_content:
typeof openAICompatibleMetadata.reasoning_content === "string"
? openAICompatibleMetadata.reasoning_content
: undefined,
reasoning_details:
typeof openAICompatibleMetadata.reasoning_details === "string"
? openAICompatibleMetadata.reasoning_details
: undefined,
reasoning_text: reasoningOpaque ? reasoningText : undefined,
reasoning_opaque: reasoningOpaque,
...metadata,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,9 @@ export interface OpenAICompatibleAssistantMessage extends JsonRecord<OpenAICompa
role: "assistant"
content?: string | null
tool_calls?: Array<OpenAICompatibleMessageToolCall>
// Generic interleaved reasoning fields used by OpenAI-compatible providers.
reasoning_content?: string
reasoning_details?: string
// Copilot-specific reasoning fields
reasoning_text?: string
reasoning_opaque?: string
Expand Down
9 changes: 9 additions & 0 deletions packages/opencode/src/provider/transform.ts
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,15 @@ function mimeToModality(mime: string): Modality | undefined {
export namespace ProviderTransform {
export const OUTPUT_TOKEN_MAX = Flag.OPENCODE_EXPERIMENTAL_OUTPUT_TOKEN_MAX || 32_000

export function toolChoice(
model: Provider.Model,
format: { type: "text" | "json_schema" },
): "auto" | "required" | undefined {
if (format.type !== "json_schema") return undefined
if (model.family?.startsWith("kimi")) return "auto"
return "required"
}

// Maps npm package to the key the AI SDK expects for providerOptions
function sdkKey(npm: string): string | undefined {
switch (npm) {
Expand Down
2 changes: 1 addition & 1 deletion packages/opencode/src/session/prompt.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1484,7 +1484,7 @@ NOTE: At any point in time through this workflow you should feel free to ask the
messages: [...modelMsgs, ...(isLastStep ? [{ role: "assistant" as const, content: MAX_STEPS }] : [])],
tools,
model,
toolChoice: format.type === "json_schema" ? "required" : undefined,
toolChoice: ProviderTransform.toolChoice(model, format),
})

if (structured !== undefined) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -475,6 +475,74 @@ describe("reasoning (copilot-specific)", () => {
},
])
})

test("should include generic reasoning_content from openaiCompatible providerOptions", () => {
const result = convertToCopilotMessages([
{
role: "assistant",
providerOptions: {
openaiCompatible: {
reasoning_content: "Let me think before I call the tool.",
},
},
content: [
{
type: "tool-call",
toolCallId: "call1",
toolName: "calculator",
input: { a: 1, b: 2 },
},
],
},
])

expect(result).toEqual([
{
role: "assistant",
content: null,
tool_calls: [
{
id: "call1",
type: "function",
function: {
name: "calculator",
arguments: JSON.stringify({ a: 1, b: 2 }),
},
},
],
reasoning_content: "Let me think before I call the tool.",
reasoning_details: undefined,
reasoning_text: undefined,
reasoning_opaque: undefined,
},
])
})

test("should include generic reasoning_details from openaiCompatible providerOptions", () => {
const result = convertToCopilotMessages([
{
role: "assistant",
providerOptions: {
openaiCompatible: {
reasoning_details: "Step-by-step replay payload",
},
},
content: [{ type: "text", text: "Done!" }],
},
])

expect(result).toEqual([
{
role: "assistant",
content: "Done!",
tool_calls: undefined,
reasoning_content: undefined,
reasoning_details: "Step-by-step replay payload",
reasoning_text: undefined,
reasoning_opaque: undefined,
},
])
})
})

describe("full conversation", () => {
Expand Down
90 changes: 90 additions & 0 deletions packages/opencode/test/session/prompt.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,10 @@ import { ModelID, ProviderID } from "../../src/provider/schema"
import { Session } from "../../src/session"
import { MessageV2 } from "../../src/session/message-v2"
import { SessionPrompt } from "../../src/session/prompt"
import { ProviderTransform } from "../../src/provider/transform"
import { Log } from "../../src/util/log"
import { tmpdir } from "../fixture/fixture"
import type { Provider } from "../../src/provider/provider"

Log.init({ print: false })

Expand Down Expand Up @@ -520,3 +522,91 @@ describe("session.agent-resolution", () => {
})
}, 30000)
})

describe("session.prompt structured output tool choice", () => {
test("uses auto for Kimi json_schema requests", () => {
const model: Provider.Model = {
id: ModelID.make("kimi-k2.5"),
providerID: ProviderID.make("opencode"),
api: {
id: "kimi-k2.5",
url: "https://example.com",
npm: "@ai-sdk/openai-compatible",
},
name: "Kimi K2.5",
family: "kimi",
capabilities: {
temperature: true,
reasoning: true,
attachment: false,
toolcall: true,
input: {
text: true,
audio: false,
image: false,
video: false,
pdf: false,
},
output: {
text: true,
audio: false,
image: false,
video: false,
pdf: false,
},
interleaved: { field: "reasoning_content" },
},
cost: { input: 0, output: 0, cache: { read: 0, write: 0 } },
limit: { context: 100000, output: 10000 },
status: "active",
headers: {},
release_date: "2026-01-01",
options: {},
}

expect(ProviderTransform.toolChoice(model, { type: "json_schema" })).toBe("auto")
})

test("keeps required for non-Kimi json_schema requests", () => {
const model: Provider.Model = {
id: ModelID.make("gpt-5.2"),
providerID: ProviderID.make("openai"),
api: {
id: "gpt-5.2",
url: "https://example.com",
npm: "@ai-sdk/openai",
},
name: "GPT-5.2",
capabilities: {
temperature: true,
reasoning: true,
attachment: false,
toolcall: true,
input: {
text: true,
audio: false,
image: false,
video: false,
pdf: false,
},
output: {
text: true,
audio: false,
image: false,
video: false,
pdf: false,
},
interleaved: false,
},
cost: { input: 0, output: 0, cache: { read: 0, write: 0 } },
limit: { context: 100000, output: 10000 },
status: "active",
headers: {},
release_date: "2026-01-01",
options: {},
}

expect(ProviderTransform.toolChoice(model, { type: "json_schema" })).toBe("required")
expect(ProviderTransform.toolChoice(model, { type: "text" })).toBeUndefined()
})
})
Loading