diff --git a/bun.lock b/bun.lock
index 77ab24240bb9..64b32feac4eb 100644
--- a/bun.lock
+++ b/bun.lock
@@ -688,7 +688,7 @@
"@tailwindcss/vite": "4.1.11",
"@tsconfig/bun": "1.0.9",
"@tsconfig/node22": "22.0.2",
- "@types/bun": "1.3.11",
+ "@types/bun": "1.3.12",
"@types/cross-spawn": "6.0.6",
"@types/luxon": "3.7.1",
"@types/node": "22.13.9",
@@ -2302,7 +2302,7 @@
"@types/braces": ["@types/braces@3.0.5", "", {}, "sha512-SQFof9H+LXeWNz8wDe7oN5zu7ket0qwMu5vZubW4GCJ8Kkeh6nBWUz87+KTz/G3Kqsrp0j/W253XJb3KMEeg3w=="],
- "@types/bun": ["@types/bun@1.3.11", "", { "dependencies": { "bun-types": "1.3.11" } }, "sha512-5vPne5QvtpjGpsGYXiFyycfpDF2ECyPcTSsFBMa0fraoxiQyMJ3SmuQIGhzPg2WJuWxVBoxWJ2kClYTcw/4fAg=="],
+ "@types/bun": ["@types/bun@1.3.12", "", { "dependencies": { "bun-types": "1.3.12" } }, "sha512-DBv81elK+/VSwXHDlnH3Qduw+KxkTIWi7TXkAeh24zpi5l0B2kUg9Ga3tb4nJaPcOFswflgi/yAvMVBPrxMB+A=="],
"@types/cacache": ["@types/cacache@20.0.1", "", { "dependencies": { "@types/node": "*", "minipass": "*" } }, "sha512-QlKW3AFoFr/hvPHwFHMIVUH/ZCYeetBNou3PCmxu5LaNDvrtBlPJtIA6uhmU9JRt9oxj7IYoqoLcpxtzpPiTcw=="],
@@ -2720,7 +2720,7 @@
"bun-pty": ["bun-pty@0.4.8", "", {}, "sha512-rO70Mrbr13+jxHHHu2YBkk2pNqrJE5cJn29WE++PUr+GFA0hq/VgtQPZANJ8dJo6d7XImvBk37Innt8GM7O28w=="],
- "bun-types": ["bun-types@1.3.11", "", { "dependencies": { "@types/node": "*" } }, "sha512-1KGPpoxQWl9f6wcZh57LvrPIInQMn2TQ7jsgxqpRzg+l0QPOFvJVH7HmvHo/AiPgwXy+/Thf6Ov3EdVn1vOabg=="],
+ "bun-types": ["bun-types@1.3.12", "", { "dependencies": { "@types/node": "*" } }, "sha512-HqOLj5PoFajAQciOMRiIZGNoKxDJSr6qigAttOX40vJuSp6DN/CxWp9s3C1Xwm4oH7ybueITwiaOcWXoYVoRkA=="],
"bun-webgpu": ["bun-webgpu@0.1.5", "", { "dependencies": { "@webgpu/types": "^0.1.60" }, "optionalDependencies": { "bun-webgpu-darwin-arm64": "^0.1.5", "bun-webgpu-darwin-x64": "^0.1.5", "bun-webgpu-linux-x64": "^0.1.5", "bun-webgpu-win32-x64": "^0.1.5" } }, "sha512-91/K6S5whZKX7CWAm9AylhyKrLGRz6BUiiPiM/kXadSnD4rffljCD/q9cNFftm5YXhx4MvLqw33yEilxogJvwA=="],
diff --git a/flake.lock b/flake.lock
index 805be8739bb3..1c8e62bd825d 100644
--- a/flake.lock
+++ b/flake.lock
@@ -2,11 +2,11 @@
"nodes": {
"nixpkgs": {
"locked": {
- "lastModified": 1773909469,
- "narHash": "sha256-vglVrLfHjFIzIdV9A27Ugul6rh3I1qHbbitGW7dk420=",
+ "lastModified": 1776683584,
+ "narHash": "sha256-NuTLMrr10Tng72hurYG8jYQ4XKK8wnpJmOGcPiis96g=",
"owner": "NixOS",
"repo": "nixpkgs",
- "rev": "7149c06513f335be57f26fcbbbe34afda923882b",
+ "rev": "9dd5558b06dbdacbf635a3dd36dce1b1a7ee3a89",
"type": "github"
},
"original": {
diff --git a/nix/hashes.json b/nix/hashes.json
index 21279a327d0a..c09604610638 100644
--- a/nix/hashes.json
+++ b/nix/hashes.json
@@ -1,8 +1,8 @@
{
"nodeModules": {
- "x86_64-linux": "sha256-NczRp8MPppkqP8PQfWMUWJ/Wofvf2YVy5m4i22Pi3jg=",
- "aarch64-linux": "sha256-QIxGOu8Fj+sWgc9hKvm1BLiIErxEtd17SPlwZGac9sQ=",
- "aarch64-darwin": "sha256-Rb9qbMM+ARn0iBCaZurwcoUBCplbMXEZwrXVKextp3I=",
- "x86_64-darwin": "sha256-KVxOKkaVV7W+K4reEk14MTLgmtoqwCYDqDNXNeS6ync="
+ "x86_64-linux": "sha256-AgHhYsiygxbsBo3JN4HqHXKAwh8n1qeuSCe2qqxlxW4=",
+ "aarch64-linux": "sha256-h2lpWRQ5EDYnjpqZXtUAp1mxKLQxJ4m8MspgSY8Ev78=",
+ "aarch64-darwin": "sha256-xnd91+WyeAqn06run2ajsekxJvTMiLsnqNPe/rR8VTM=",
+ "x86_64-darwin": "sha256-rXpz45IOjGEk73xhP9VY86eOj2CZBg2l1vzwzTIOOOQ="
}
}
diff --git a/package.json b/package.json
index 06bf9c91aef0..f918bcd025f5 100644
--- a/package.json
+++ b/package.json
@@ -4,7 +4,7 @@
"description": "AI-powered development tool",
"private": true,
"type": "module",
- "packageManager": "bun@1.3.11",
+ "packageManager": "bun@1.3.13",
"scripts": {
"dev": "bun run --cwd packages/opencode --conditions=browser src/index.ts",
"dev:desktop": "bun --cwd packages/desktop-electron dev",
@@ -30,7 +30,7 @@
"@effect/opentelemetry": "4.0.0-beta.48",
"@effect/platform-node": "4.0.0-beta.48",
"@npmcli/arborist": "9.4.0",
- "@types/bun": "1.3.11",
+ "@types/bun": "1.3.12",
"@types/cross-spawn": "6.0.6",
"@octokit/rest": "22.0.0",
"@hono/zod-validator": "0.4.2",
diff --git a/packages/app/src/components/dialog-edit-project.tsx b/packages/app/src/components/dialog-edit-project.tsx
index ea5d70065adc..8eb12daf52e5 100644
--- a/packages/app/src/components/dialog-edit-project.tsx
+++ b/packages/app/src/components/dialog-edit-project.tsx
@@ -12,6 +12,7 @@ import { type LocalProject, getAvatarColors } from "@/context/layout"
import { getFilename } from "@opencode-ai/shared/util/path"
import { Avatar } from "@opencode-ai/ui/avatar"
import { useLanguage } from "@/context/language"
+import { getProjectAvatarSource } from "@/pages/layout/sidebar-items"
const AVATAR_COLOR_KEYS = ["pink", "mint", "orange", "purple", "cyan", "lime"] as const
@@ -26,8 +27,8 @@ export function DialogEditProject(props: { project: LocalProject }) {
const [store, setStore] = createStore({
name: defaultName(),
- color: props.project.icon?.color || "pink",
- iconUrl: props.project.icon?.override || "",
+ color: props.project.icon?.color,
+ iconOverride: props.project.icon?.override,
startup: props.project.commands?.start ?? "",
dragOver: false,
iconHover: false,
@@ -39,7 +40,7 @@ export function DialogEditProject(props: { project: LocalProject }) {
if (!file.type.startsWith("image/")) return
const reader = new FileReader()
reader.onload = (e) => {
- setStore("iconUrl", e.target?.result as string)
+ setStore("iconOverride", e.target?.result as string)
setStore("iconHover", false)
}
reader.readAsDataURL(file)
@@ -68,7 +69,7 @@ export function DialogEditProject(props: { project: LocalProject }) {
}
function clearIcon() {
- setStore("iconUrl", "")
+ setStore("iconOverride", "")
}
const saveMutation = useMutation(() => ({
@@ -81,17 +82,17 @@ export function DialogEditProject(props: { project: LocalProject }) {
projectID: props.project.id,
directory: props.project.worktree,
name,
- icon: { color: store.color, override: store.iconUrl },
+ icon: { color: store.color || "", override: store.iconOverride || "" },
commands: { start },
})
- globalSync.project.icon(props.project.worktree, store.iconUrl || undefined)
+ globalSync.project.icon(props.project.worktree, store.iconOverride || undefined)
dialog.close()
return
}
globalSync.project.meta(props.project.worktree, {
name,
- icon: { color: store.color, override: store.iconUrl || undefined },
+ icon: { color: store.color || undefined, override: store.iconOverride || undefined },
commands: { start: start || undefined },
})
dialog.close()
@@ -130,13 +131,13 @@ export function DialogEditProject(props: { project: LocalProject }) {
classList={{
"border-text-interactive-base bg-surface-info-base/20": store.dragOver,
"border-border-base hover:border-border-strong": !store.dragOver,
- "overflow-hidden": !!store.iconUrl,
+ "overflow-hidden": !!store.iconOverride,
}}
onDrop={handleDrop}
onDragOver={handleDragOver}
onDragLeave={handleDragLeave}
onClick={() => {
- if (store.iconUrl && store.iconHover) {
+ if (store.iconOverride && store.iconHover) {
clearIcon()
} else {
iconInput?.click()
@@ -144,7 +145,11 @@ export function DialogEditProject(props: { project: LocalProject }) {
}}
>
}
>
-
+ {(src) => (
+
+ )}
@@ -174,8 +181,8 @@ export function DialogEditProject(props: { project: LocalProject }) {
@@ -198,7 +205,7 @@ export function DialogEditProject(props: { project: LocalProject }) {
-
+
@@ -215,7 +222,10 @@ export function DialogEditProject(props: { project: LocalProject }) {
"bg-transparent border border-transparent hover:bg-surface-base-hover hover:border-border-weak-base":
store.color !== color,
}}
- onClick={() => setStore("color", color)}
+ onClick={() => {
+ if (store.color === color && !props.project.icon?.url) return
+ setStore("color", store.color === color ? undefined : color)
+ }}
>
{
const globalSync = useGlobalSync()
const notification = useNotification()
@@ -42,11 +50,7 @@ export const ProjectIcon = (props: { project: LocalProject; class?: string; noti
Kimi K2.6
GLM-5
GLM-5.1
- Mimo-V2-Pro
- Mimo-V2-Omni
+ MiMo-V2-Pro
+ MiMo-V2-Omni
+ MiMo-V2.5-Pro
+ MiMo-V2.5
MiniMax M2.5
MiniMax M2.7
Qwen3.5 Plus
diff --git a/packages/containers/bun-node/Dockerfile b/packages/containers/bun-node/Dockerfile
index 485375dd9f61..d6f4729bf51e 100644
--- a/packages/containers/bun-node/Dockerfile
+++ b/packages/containers/bun-node/Dockerfile
@@ -4,7 +4,7 @@ FROM ${REGISTRY}/build/base:24.04
SHELL ["/bin/bash", "-lc"]
ARG NODE_VERSION=24.4.0
-ARG BUN_VERSION=1.3.11
+ARG BUN_VERSION=1.3.13
ENV BUN_INSTALL=/opt/bun
ENV PATH=/opt/bun/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
diff --git a/packages/opencode/src/agent/prompt/compaction.txt b/packages/opencode/src/agent/prompt/compaction.txt
index c5831bb30eda..c7cb838bbaa0 100644
--- a/packages/opencode/src/agent/prompt/compaction.txt
+++ b/packages/opencode/src/agent/prompt/compaction.txt
@@ -1,16 +1,9 @@
-You are a helpful AI assistant tasked with summarizing conversations.
+You are an anchored context summarization assistant for coding sessions.
-When asked to summarize, provide a detailed but concise summary of the older conversation history.
-The most recent turns may be preserved verbatim outside your summary, so focus on information that would still be needed to continue the work with that recent context available.
-Focus on information that would be helpful for continuing the conversation, including:
-- What was done
-- What is currently being worked on
-- Which files are being modified
-- What needs to be done next
-- Key user requests, constraints, or preferences that should persist
-- Important technical decisions and why they were made
+Summarize only the conversation history you are given. The newest turns may be kept verbatim outside your summary, so focus on the older context that still matters for continuing the work.
-Your summary should be comprehensive enough to provide context but concise enough to be quickly understood.
+If the prompt includes a block, treat it as the current anchored summary. Update it with the new history by preserving still-true details, removing stale details, and merging in new facts.
-Do not respond to any questions in the conversation, only output the summary.
-Respond in the same language the user used in the conversation.
+Always follow the exact output structure requested by the user prompt. Keep every section, preserve exact file paths and identifiers when known, and prefer terse bullets over paragraphs.
+
+Do not answer the conversation itself. Do not mention that you are summarizing, compacting, or merging context. Respond in the same language as the conversation.
diff --git a/packages/opencode/src/cli/cmd/debug/lsp.ts b/packages/opencode/src/cli/cmd/debug/lsp.ts
index 185cab9c7587..47db6358b6e7 100644
--- a/packages/opencode/src/cli/cmd/debug/lsp.ts
+++ b/packages/opencode/src/cli/cmd/debug/lsp.ts
@@ -23,8 +23,7 @@ const DiagnosticsCommand = cmd({
const out = await AppRuntime.runPromise(
LSP.Service.use((lsp) =>
Effect.gen(function* () {
- yield* lsp.touchFile(args.file, true)
- yield* Effect.sleep(1000)
+ yield* lsp.touchFile(args.file, "full")
return yield* lsp.diagnostics()
}),
),
diff --git a/packages/opencode/src/cli/cmd/tui/attach.ts b/packages/opencode/src/cli/cmd/tui/attach.ts
index 9a93f3f57a63..cb6b95a56cb6 100644
--- a/packages/opencode/src/cli/cmd/tui/attach.ts
+++ b/packages/opencode/src/cli/cmd/tui/attach.ts
@@ -3,6 +3,8 @@ import { UI } from "@/cli/ui"
import { tui } from "./app"
import { win32DisableProcessedInput, win32InstallCtrlCGuard } from "./win32"
import { TuiConfig } from "@/cli/cmd/tui/config/tui"
+import { errorMessage } from "@/util/error"
+import { validateSession } from "./validate-session"
export const AttachCommand = cmd({
command: "attach ",
@@ -65,6 +67,20 @@ export const AttachCommand = cmd({
return { Authorization: auth }
})()
const config = await TuiConfig.get()
+
+ try {
+ await validateSession({
+ url: args.url,
+ sessionID: args.session,
+ directory,
+ headers,
+ })
+ } catch (error) {
+ UI.error(errorMessage(error))
+ process.exitCode = 1
+ return
+ }
+
await tui({
url: args.url,
config,
diff --git a/packages/opencode/src/cli/cmd/tui/routes/session/index.tsx b/packages/opencode/src/cli/cmd/tui/routes/session/index.tsx
index 06be5dfbefbf..2f5da1d23154 100644
--- a/packages/opencode/src/cli/cmd/tui/routes/session/index.tsx
+++ b/packages/opencode/src/cli/cmd/tui/routes/session/index.tsx
@@ -68,6 +68,7 @@ import { Flag } from "@/flag/flag"
import { LANGUAGE_EXTENSIONS } from "@/lsp/language"
import parsers from "../../../../../../parsers-config.ts"
import * as Clipboard from "../../util/clipboard"
+import { errorMessage } from "@/util/error"
import { Toast, useToast } from "../../ui/toast"
import { useKV } from "../../context/kv.tsx"
import * as Editor from "../../util/editor"
@@ -180,31 +181,43 @@ export function Session() {
const toast = useToast()
const sdk = useSDK()
- createEffect(async () => {
- const previousWorkspace = project.workspace.current()
- const result = await sdk.client.session.get({ sessionID: route.sessionID }, { throwOnError: true })
- if (!result.data) {
+ createEffect(() => {
+ const sessionID = route.sessionID
+ void (async () => {
+ const previousWorkspace = project.workspace.current()
+ const result = await sdk.client.session.get({ sessionID }, { throwOnError: true })
+ if (!result.data) {
+ toast.show({
+ message: `Session not found: ${sessionID}`,
+ variant: "error",
+ duration: 5000,
+ })
+ navigate({ type: "home" })
+ return
+ }
+
+ if (result.data.workspaceID !== previousWorkspace) {
+ project.workspace.set(result.data.workspaceID)
+
+ // Sync all the data for this workspace. Note that this
+ // workspace may not exist anymore which is why this is not
+ // fatal. If it doesn't we still want to show the session
+ // (which will be non-interactive)
+ try {
+ await sync.bootstrap({ fatal: false })
+ } catch {}
+ }
+ await sync.session.sync(sessionID)
+ if (route.sessionID === sessionID && scroll) scroll.scrollBy(100_000)
+ })().catch((error) => {
+ if (route.sessionID !== sessionID) return
toast.show({
- message: `Session not found: ${route.sessionID}`,
+ message: errorMessage(error),
variant: "error",
+ duration: 5000,
})
navigate({ type: "home" })
- return
- }
-
- if (result.data.workspaceID !== previousWorkspace) {
- project.workspace.set(result.data.workspaceID)
-
- // Sync all the data for this workspace. Note that this
- // workspace may not exist anymore which is why this is not
- // fatal. If it doesn't we still want to show the session
- // (which will be non-interactive)
- try {
- await sync.bootstrap({ fatal: false })
- } catch (e) {}
- }
- await sync.session.sync(route.sessionID)
- if (scroll) scroll.scrollBy(100_000)
+ })
})
let lastSwitch: string | undefined = undefined
diff --git a/packages/opencode/src/cli/cmd/tui/thread.ts b/packages/opencode/src/cli/cmd/tui/thread.ts
index e3e9eb811779..a2a53ecafa0d 100644
--- a/packages/opencode/src/cli/cmd/tui/thread.ts
+++ b/packages/opencode/src/cli/cmd/tui/thread.ts
@@ -16,6 +16,7 @@ import { win32DisableProcessedInput, win32InstallCtrlCGuard } from "./win32"
import { writeHeapSnapshot } from "v8"
import { TuiConfig } from "./config/tui"
import { OPENCODE_PROCESS_ROLE, OPENCODE_RUN_ID, ensureRunID, sanitizedProcessEnv } from "@/util/opencode-process"
+import { validateSession } from "./validate-session"
declare global {
const OPENCODE_WORKER_PATH: string
@@ -202,6 +203,19 @@ export const TuiThreadCommand = cmd({
events: createEventSource(client),
}
+ try {
+ await validateSession({
+ url: transport.url,
+ sessionID: args.session,
+ directory: cwd,
+ fetch: transport.fetch,
+ })
+ } catch (error) {
+ UI.error(errorMessage(error))
+ process.exitCode = 1
+ return
+ }
+
setTimeout(() => {
client.call("checkUpgrade", { directory: cwd }).catch(() => {})
}, 1000).unref?.()
diff --git a/packages/opencode/src/cli/cmd/tui/validate-session.ts b/packages/opencode/src/cli/cmd/tui/validate-session.ts
new file mode 100644
index 000000000000..e2a21d51e14c
--- /dev/null
+++ b/packages/opencode/src/cli/cmd/tui/validate-session.ts
@@ -0,0 +1,24 @@
+import { createOpencodeClient } from "@opencode-ai/sdk/v2"
+import { SessionID } from "@/session/schema"
+
+export async function validateSession(input: {
+ url: string
+ sessionID?: string
+ directory?: string
+ fetch?: typeof fetch
+ headers?: RequestInit["headers"]
+}) {
+ if (!input.sessionID) return
+
+ const result = SessionID.zod.safeParse(input.sessionID)
+ if (!result.success) {
+ throw new Error(`Invalid session ID: ${result.error.issues.at(0)?.message ?? "unknown error"}`)
+ }
+
+ await createOpencodeClient({
+ baseUrl: input.url,
+ directory: input.directory,
+ fetch: input.fetch,
+ headers: input.headers,
+ }).session.get({ sessionID: result.data }, { throwOnError: true })
+}
diff --git a/packages/opencode/src/config/config.ts b/packages/opencode/src/config/config.ts
index 5423ba3baf5f..dd4ab24f68c6 100644
--- a/packages/opencode/src/config/config.ts
+++ b/packages/opencode/src/config/config.ts
@@ -430,6 +430,14 @@ export const layer = Layer.effect(
})
const ensureGitignore = Effect.fn("Config.ensureGitignore")(function* (dir: string) {
+ // Some config dirs may be read-only or not yet exist.
+ // Writing .gitignore there will fail; skip in that case.
+ const writable = yield* fs.isWritable(dir)
+ if (!writable) {
+ log.debug("config dir is not writable, skipping .gitignore", { dir })
+ return
+ }
+
const gitignore = path.join(dir, ".gitignore")
const hasIgnore = yield* fs.existsSafe(gitignore)
if (!hasIgnore) {
diff --git a/packages/opencode/src/format/index.ts b/packages/opencode/src/format/index.ts
index 85934ce9c9a3..53a2c10119b1 100644
--- a/packages/opencode/src/format/index.ts
+++ b/packages/opencode/src/format/index.ts
@@ -25,7 +25,7 @@ export type Status = z.infer
export interface Interface {
readonly init: () => Effect.Effect
readonly status: () => Effect.Effect
- readonly file: (filepath: string) => Effect.Effect
+ readonly file: (filepath: string) => Effect.Effect
}
export class Service extends Context.Service()("@opencode/Format") {}
@@ -70,16 +70,19 @@ export const layer = Layer.effect(
}
}),
)
- return checks.filter((x) => x.cmd).map((x) => ({ item: x.item, cmd: x.cmd! }))
+ return checks
+ .filter((x): x is { item: Formatter.Info; cmd: string[] } => x.cmd !== false)
+ .map((x) => ({ item: x.item, cmd: x.cmd }))
}
function formatFile(filepath: string) {
return Effect.gen(function* () {
log.info("formatting", { file: filepath })
- const ext = path.extname(filepath)
+ const formatters = yield* Effect.promise(() => getFormatter(path.extname(filepath)))
- for (const { item, cmd } of yield* Effect.promise(() => getFormatter(ext))) {
- if (cmd === false) continue
+ if (!formatters.length) return false
+
+ for (const { item, cmd } of formatters) {
log.info("running", { command: cmd })
const replaced = cmd.map((x) => x.replace("$FILE", filepath))
const dir = yield* InstanceState.directory
@@ -113,6 +116,8 @@ export const layer = Layer.effect(
})
}
}
+
+ return true
})
}
@@ -188,7 +193,7 @@ export const layer = Layer.effect(
const file = Effect.fn("Format.file")(function* (filepath: string) {
const { formatFile } = yield* InstanceState.get(state)
- yield* formatFile(filepath)
+ return yield* formatFile(filepath)
})
return Service.of({ init, status, file })
diff --git a/packages/opencode/src/lsp/client.ts b/packages/opencode/src/lsp/client.ts
index b20e8ae7f00c..f6d5110a6c49 100644
--- a/packages/opencode/src/lsp/client.ts
+++ b/packages/opencode/src/lsp/client.ts
@@ -14,6 +14,16 @@ import { withTimeout } from "../util/timeout"
import { Filesystem } from "../util"
const DIAGNOSTICS_DEBOUNCE_MS = 150
+const DIAGNOSTICS_DOCUMENT_WAIT_TIMEOUT_MS = 5_000
+const DIAGNOSTICS_FULL_WAIT_TIMEOUT_MS = 10_000
+const DIAGNOSTICS_REQUEST_TIMEOUT_MS = 3_000
+
+const INITIALIZE_TIMEOUT_MS = 45_000
+
+// LSP spec constants
+const FILE_CHANGE_CREATED = 1
+const FILE_CHANGE_CHANGED = 2
+const TEXT_DOCUMENT_SYNC_INCREMENTAL = 2
const log = Log.create({ service: "lsp.client" })
@@ -38,48 +48,194 @@ export const Event = {
),
}
+type DocumentDiagnosticReport = {
+ items?: Diagnostic[]
+ relatedDocuments?: Record
+}
+
+type WorkspaceDiagnosticReport = {
+ items?: {
+ uri?: string
+ items?: Diagnostic[]
+ }[]
+}
+
+type DiagnosticRequestResult = {
+ handled: boolean
+ matched: boolean
+ byFile: Map
+}
+
+type CapabilityRegistration = {
+ id: string
+ method: string
+ registerOptions?: {
+ identifier?: string
+ workspaceDiagnostics?: boolean
+ }
+}
+
+type ServerCapabilities = {
+ textDocumentSync?:
+ | number
+ | {
+ change?: number
+ }
+ diagnosticProvider?: unknown
+ [key: string]: unknown
+}
+
+function getFilePath(uri: string) {
+ if (!uri.startsWith("file://")) return
+ return Filesystem.normalizePath(fileURLToPath(uri))
+}
+
+function getSyncKind(capabilities?: ServerCapabilities) {
+ if (!capabilities) return
+ const sync = capabilities.textDocumentSync
+ if (typeof sync === "number") return sync
+ return sync?.change
+}
+
+function endPosition(text: string) {
+ const lines = text.split(/\r\n|\r|\n/)
+ return {
+ line: lines.length - 1,
+ character: lines.at(-1)?.length ?? 0,
+ }
+}
+
+function dedupeDiagnostics(items: Diagnostic[]) {
+ const seen = new Set()
+ return items.filter((item) => {
+ const key = JSON.stringify({
+ code: item.code,
+ severity: item.severity,
+ message: item.message,
+ source: item.source,
+ range: item.range,
+ })
+ if (seen.has(key)) return false
+ seen.add(key)
+ return true
+ })
+}
+
+function configurationValue(settings: unknown, section?: string) {
+ if (!section) return settings ?? null
+ const result = section.split(".").reduce((acc, key) => {
+ if (!acc || typeof acc !== "object" || !(key in acc)) return undefined
+ return (acc as Record)[key]
+ }, settings)
+ return result ?? null
+}
+
+// TypeScript's built-in LSP pushes diagnostics aggressively on first open.
+// We seed the push cache on the very first publish so waitForFreshPush can
+// resolve immediately instead of waiting for a second debounced push.
+function shouldSeedDiagnosticsOnFirstPush(serverID: string) {
+ return serverID === "typescript"
+}
+
export async function create(input: { serverID: string; server: LSPServer.Handle; root: string; directory: string }) {
- const l = log.clone().tag("serverID", input.serverID)
- l.info("starting client")
+ const logger = log.clone().tag("serverID", input.serverID)
+ logger.info("starting client")
const connection = createMessageConnection(
new StreamMessageReader(input.server.process.stdout as any),
new StreamMessageWriter(input.server.process.stdin as any),
)
+ // Server stderr can contain both real errors and routine informational logs,
+ // which is normal stderr practice for some tools. Keep the raw stream at
+ // debug so users can opt in with --print-logs --log-level DEBUG without
+ // polluting normal logs.
+ input.server.process.stderr?.on("data", (data: Buffer) => {
+ const text = data.toString().trim()
+ if (text) logger.debug("server stderr", { text: text.slice(0, 1000) })
+ })
+
+ // --- Connection state ---
+
+ const pushDiagnostics = new Map()
+ const pullDiagnostics = new Map()
+ const published = new Map()
+ const diagnosticRegistrations = new Map()
+ const registrationListeners = new Set<() => void>()
+ const mergedDiagnostics = (filePath: string) =>
+ dedupeDiagnostics([...(pushDiagnostics.get(filePath) ?? []), ...(pullDiagnostics.get(filePath) ?? [])])
+ const updatePushDiagnostics = (filePath: string, next: Diagnostic[]) => {
+ pushDiagnostics.set(filePath, next)
+ Bus.publish(Event.Diagnostics, { path: filePath, serverID: input.serverID })
+ }
+ const updatePullDiagnostics = (filePath: string, next: Diagnostic[]) => {
+ pullDiagnostics.set(filePath, next)
+ }
+ const emitRegistrationChange = () => {
+ for (const listener of [...registrationListeners]) listener()
+ }
+
+ // --- LSP connection handlers ---
- const diagnostics = new Map()
connection.onNotification("textDocument/publishDiagnostics", (params) => {
- const filePath = Filesystem.normalizePath(fileURLToPath(params.uri))
- l.info("textDocument/publishDiagnostics", {
+ const filePath = getFilePath(params.uri)
+ if (!filePath) return
+ logger.info("textDocument/publishDiagnostics", {
path: filePath,
count: params.diagnostics.length,
+ version: params.version,
})
- const exists = diagnostics.has(filePath)
- diagnostics.set(filePath, params.diagnostics)
- if (!exists && input.serverID === "typescript") return
- Bus.publish(Event.Diagnostics, { path: filePath, serverID: input.serverID })
+ published.set(filePath, {
+ at: Date.now(),
+ version: typeof params.version === "number" ? params.version : undefined,
+ })
+ if (shouldSeedDiagnosticsOnFirstPush(input.serverID) && !pushDiagnostics.has(filePath)) {
+ pushDiagnostics.set(filePath, params.diagnostics)
+ return
+ }
+ updatePushDiagnostics(filePath, params.diagnostics)
})
connection.onRequest("window/workDoneProgress/create", (params) => {
- l.info("window/workDoneProgress/create", params)
+ logger.info("window/workDoneProgress/create", params)
return null
})
- connection.onRequest("workspace/configuration", async () => {
- // Return server initialization options
- return [input.server.initialization ?? {}]
+ connection.onRequest("workspace/configuration", async (params) => {
+ const items = (params as { items?: { section?: string }[] }).items ?? []
+ return items.map((item) => configurationValue(input.server.initialization, item.section))
+ })
+ connection.onRequest("client/registerCapability", async (params) => {
+ const registrations = (params as { registrations?: CapabilityRegistration[] }).registrations ?? []
+ let changed = false
+ for (const registration of registrations) {
+ if (registration.method !== "textDocument/diagnostic") continue
+ diagnosticRegistrations.set(registration.id, registration)
+ changed = true
+ }
+ if (changed) emitRegistrationChange()
+ })
+ connection.onRequest("client/unregisterCapability", async (params) => {
+ const registrations = (params as { unregisterations?: { id: string; method: string }[] }).unregisterations ?? []
+ let changed = false
+ for (const registration of registrations) {
+ if (registration.method !== "textDocument/diagnostic") continue
+ diagnosticRegistrations.delete(registration.id)
+ changed = true
+ }
+ if (changed) emitRegistrationChange()
})
- connection.onRequest("client/registerCapability", async () => {})
- connection.onRequest("client/unregisterCapability", async () => {})
connection.onRequest("workspace/workspaceFolders", async () => [
{
name: "workspace",
uri: pathToFileURL(input.root).href,
},
])
+ connection.onRequest("workspace/diagnostic/refresh", async () => null)
connection.listen()
- l.info("sending initialize")
- await withTimeout(
- connection.sendRequest("initialize", {
+ // --- Initialize handshake ---
+
+ logger.info("sending initialize")
+ const initialized = await withTimeout(
+ connection.sendRequest<{ capabilities?: ServerCapabilities }>("initialize", {
rootUri: pathToFileURL(input.root).href,
processId: input.server.process.pid,
workspaceFolders: [
@@ -100,21 +256,28 @@ export async function create(input: { serverID: string; server: LSPServer.Handle
didChangeWatchedFiles: {
dynamicRegistration: true,
},
+ diagnostics: {
+ refreshSupport: false,
+ },
},
textDocument: {
synchronization: {
didOpen: true,
didChange: true,
},
+ diagnostic: {
+ dynamicRegistration: true,
+ relatedDocumentSupport: true,
+ },
publishDiagnostics: {
- versionSupport: true,
+ versionSupport: false,
},
},
},
}),
- 45_000,
+ INITIALIZE_TIMEOUT_MS,
).catch((err) => {
- l.error("initialize error", { error: err })
+ logger.error("initialize error", { error: err })
throw new InitializeError(
{ serverID: input.serverID },
{
@@ -123,6 +286,9 @@ export async function create(input: { serverID: string; server: LSPServer.Handle
)
})
+ const syncKind = getSyncKind(initialized.capabilities)
+ const hasStaticPullDiagnostics = Boolean(initialized.capabilities?.diagnosticProvider)
+
await connection.sendNotification("initialized", {})
if (input.server.initialization) {
@@ -131,9 +297,280 @@ export async function create(input: { serverID: string; server: LSPServer.Handle
})
}
- const files: {
- [path: string]: number
- } = {}
+ const files: Record = {}
+
+ // --- Diagnostic helpers ---
+
+ const mergeResults = (filePath: string, results: DiagnosticRequestResult[]) => {
+ const handled = results.some((result) => result.handled)
+ const matched = results.some((result) => result.matched)
+ if (!handled) return { handled: false, matched: false }
+
+ const merged = new Map()
+ for (const result of results) {
+ for (const [target, items] of result.byFile.entries()) {
+ const existing = merged.get(target) ?? []
+ merged.set(target, existing.concat(items))
+ }
+ }
+
+ if (matched && !merged.has(filePath)) merged.set(filePath, [])
+ for (const [target, items] of merged.entries()) {
+ updatePullDiagnostics(target, dedupeDiagnostics(items))
+ }
+
+ return { handled, matched }
+ }
+
+ async function requestDiagnosticReport(filePath: string, identifier?: string): Promise {
+ const report = await withTimeout(
+ connection.sendRequest("textDocument/diagnostic", {
+ ...(identifier ? { identifier } : {}),
+ textDocument: {
+ uri: pathToFileURL(filePath).href,
+ },
+ }),
+ DIAGNOSTICS_REQUEST_TIMEOUT_MS,
+ ).catch(() => null)
+ if (!report) return { handled: false, matched: false, byFile: new Map() }
+
+ const byFile = new Map()
+ const push = (target: string, items: Diagnostic[]) => {
+ const existing = byFile.get(target) ?? []
+ byFile.set(target, existing.concat(items))
+ }
+
+ let handled = false
+ let matched = false
+ if (Array.isArray(report.items)) {
+ push(filePath, report.items)
+ handled = true
+ matched = true
+ }
+ for (const [uri, related] of Object.entries(report.relatedDocuments ?? {})) {
+ const relatedPath = getFilePath(uri)
+ if (!relatedPath || !Array.isArray(related.items)) continue
+ push(relatedPath, related.items)
+ handled = true
+ matched = matched || relatedPath === filePath
+ }
+
+ return { handled, matched, byFile }
+ }
+
+ async function requestWorkspaceDiagnosticReport(
+ filePath: string,
+ identifier?: string,
+ ): Promise {
+ const report = await withTimeout(
+ connection.sendRequest("workspace/diagnostic", {
+ ...(identifier ? { identifier } : {}),
+ previousResultIds: [],
+ }),
+ DIAGNOSTICS_REQUEST_TIMEOUT_MS,
+ ).catch(() => null)
+ if (!report) return { handled: false, matched: false, byFile: new Map() }
+
+ const byFile = new Map()
+ let matched = false
+ for (const item of report.items ?? []) {
+ const relatedPath = item.uri ? getFilePath(item.uri) : undefined
+ if (!relatedPath || !Array.isArray(item.items)) continue
+ const existing = byFile.get(relatedPath) ?? []
+ byFile.set(relatedPath, existing.concat(item.items))
+ matched = matched || relatedPath === filePath
+ }
+
+ return { handled: true, matched, byFile }
+ }
+
+ function documentPullState() {
+ const documentRegistrations = [...diagnosticRegistrations.values()].filter(
+ (registration) => registration.registerOptions?.workspaceDiagnostics !== true,
+ )
+ return {
+ documentIdentifiers: [
+ ...new Set(documentRegistrations.flatMap((registration) => registration.registerOptions?.identifier ?? [])),
+ ],
+ supported: hasStaticPullDiagnostics || documentRegistrations.length > 0,
+ }
+ }
+
+ function workspacePullState() {
+ const workspaceRegistrations = [...diagnosticRegistrations.values()].filter(
+ (registration) => registration.registerOptions?.workspaceDiagnostics === true,
+ )
+ return {
+ workspaceIdentifiers: [
+ ...new Set(workspaceRegistrations.flatMap((registration) => registration.registerOptions?.identifier ?? [])),
+ ],
+ supported: workspaceRegistrations.length > 0,
+ }
+ }
+
+ const hasCurrentFileDiagnostics = (filePath: string, results: DiagnosticRequestResult[]) =>
+ results.some((result) => (result.byFile.get(filePath)?.length ?? 0) > 0)
+
+ async function requestDiagnostics(
+ filePath: string,
+ requests: Promise[],
+ done: (results: DiagnosticRequestResult[]) => boolean,
+ ) {
+ if (!requests.length) return { handled: false, matched: false }
+
+ const results: DiagnosticRequestResult[] = []
+ return new Promise<{ handled: boolean; matched: boolean }>((resolve) => {
+ let pending = requests.length
+ let resolved = false
+ const finish = (merged: { handled: boolean; matched: boolean }, force = false) => {
+ if (resolved) return
+ if (!force && !done(results)) return
+ resolved = true
+ resolve(merged)
+ }
+
+ for (const request of requests) {
+ request.then((result) => {
+ results.push(result)
+ pending -= 1
+ const merged = mergeResults(filePath, results)
+ finish(merged)
+ if (pending === 0) finish(merged, true)
+ })
+ }
+ })
+ }
+
+ // LATENCY-CRITICAL: dispatch identifier pulls in parallel and unblock once one
+ // batch already produced diagnostics for the current file. Let slower pulls keep
+ // merging in the background; do not sequence identifier-by-identifier, and do
+ // not add a post-match settle/debounce delay. See PR #23771.
+ async function requestDocumentDiagnostics(filePath: string) {
+ const state = documentPullState()
+ if (!state.supported) return { handled: false, matched: false }
+ return requestDiagnostics(
+ filePath,
+ [
+ requestDiagnosticReport(filePath),
+ ...state.documentIdentifiers.map((identifier) => requestDiagnosticReport(filePath, identifier)),
+ ],
+ (results) => hasCurrentFileDiagnostics(filePath, results),
+ )
+ }
+
+ async function requestFullDiagnostics(filePath: string) {
+ const documentState = documentPullState()
+ const workspaceState = workspacePullState()
+ if (!documentState.supported && !workspaceState.supported) return { handled: false, matched: false }
+ return mergeResults(
+ filePath,
+ await Promise.all([
+ ...(documentState.supported ? [requestDiagnosticReport(filePath)] : []),
+ ...documentState.documentIdentifiers.map((identifier) => requestDiagnosticReport(filePath, identifier)),
+ ...(workspaceState.supported ? [requestWorkspaceDiagnosticReport(filePath)] : []),
+ ...workspaceState.workspaceIdentifiers.map((identifier) =>
+ requestWorkspaceDiagnosticReport(filePath, identifier),
+ ),
+ ]),
+ )
+ }
+
+ function waitForRegistrationChange(timeout: number) {
+ if (timeout <= 0) return Promise.resolve(false)
+ return new Promise((resolve) => {
+ let finished = false
+ let timer: ReturnType | undefined
+ const finish = (result: boolean) => {
+ if (finished) return
+ finished = true
+ if (timer) clearTimeout(timer)
+ registrationListeners.delete(listener)
+ resolve(result)
+ }
+ const listener = () => finish(true)
+ registrationListeners.add(listener)
+ timer = setTimeout(() => finish(false), timeout)
+ })
+ }
+
+ function waitForFreshPush(request: { path: string; version: number; after: number; timeout: number }) {
+ if (request.timeout <= 0) return Promise.resolve(false)
+ return new Promise((resolve) => {
+ let finished = false
+ let debounceTimer: ReturnType | undefined
+ let timeoutTimer: ReturnType | undefined
+ let unsub: (() => void) | undefined
+ const finish = (result: boolean) => {
+ if (finished) return
+ finished = true
+ if (debounceTimer) clearTimeout(debounceTimer)
+ if (timeoutTimer) clearTimeout(timeoutTimer)
+ unsub?.()
+ resolve(result)
+ }
+ const schedule = () => {
+ const hit = published.get(request.path)
+ if (!hit) return
+ if (typeof hit.version === "number" && hit.version !== request.version) return
+ if (hit.at < request.after && hit.version !== request.version) return
+ if (debounceTimer) clearTimeout(debounceTimer)
+ debounceTimer = setTimeout(() => finish(true), Math.max(0, DIAGNOSTICS_DEBOUNCE_MS - (Date.now() - hit.at)))
+ }
+
+ timeoutTimer = setTimeout(() => finish(false), request.timeout)
+ unsub = Bus.subscribe(Event.Diagnostics, (event) => {
+ if (event.properties.path !== request.path || event.properties.serverID !== input.serverID) return
+ schedule()
+ })
+ schedule()
+ })
+ }
+
+ async function waitForDocumentDiagnostics(request: { path: string; version: number; after?: number }) {
+ const startedAt = request.after ?? Date.now()
+ const pushWait = waitForFreshPush({
+ path: request.path,
+ version: request.version,
+ after: startedAt,
+ timeout: DIAGNOSTICS_DOCUMENT_WAIT_TIMEOUT_MS,
+ })
+
+ while (Date.now() - startedAt < DIAGNOSTICS_DOCUMENT_WAIT_TIMEOUT_MS) {
+ const result = await requestDocumentDiagnostics(request.path)
+ if (result.matched) return
+ const remaining = DIAGNOSTICS_DOCUMENT_WAIT_TIMEOUT_MS - (Date.now() - startedAt)
+ if (remaining <= 0) return
+ const next = await Promise.race([
+ pushWait.then((ready) => (ready ? "push" : ("timeout" as const))),
+ waitForRegistrationChange(remaining).then((changed) => (changed ? "registration" : ("timeout" as const))),
+ ])
+ if (next !== "registration") return
+ }
+ }
+
+ async function waitForFullDiagnostics(request: { path: string; version: number; after?: number }) {
+ const startedAt = request.after ?? Date.now()
+ const pushWait = waitForFreshPush({
+ path: request.path,
+ version: request.version,
+ after: startedAt,
+ timeout: DIAGNOSTICS_FULL_WAIT_TIMEOUT_MS,
+ })
+
+ while (Date.now() - startedAt < DIAGNOSTICS_FULL_WAIT_TIMEOUT_MS) {
+ const result = await requestFullDiagnostics(request.path)
+ if (result.handled || result.matched) return
+ const remaining = DIAGNOSTICS_FULL_WAIT_TIMEOUT_MS - (Date.now() - startedAt)
+ if (remaining <= 0) return
+ const next = await Promise.race([
+ pushWait.then((ready) => (ready ? "push" : ("timeout" as const))),
+ waitForRegistrationChange(remaining).then((changed) => (changed ? "registration" : ("timeout" as const))),
+ ])
+ if (next !== "registration") return
+ }
+ }
+
+ // --- Public API ---
const result = {
root: input.root,
@@ -145,26 +582,32 @@ export async function create(input: { serverID: string; server: LSPServer.Handle
},
notify: {
async open(request: { path: string }) {
- request.path = path.isAbsolute(request.path) ? request.path : path.resolve(input.directory, request.path)
+ request.path = Filesystem.normalizePath(
+ path.isAbsolute(request.path) ? request.path : path.resolve(input.directory, request.path),
+ )
const text = await Filesystem.readText(request.path)
const extension = path.extname(request.path)
const languageId = LANGUAGE_EXTENSIONS[extension] ?? "plaintext"
- const version = files[request.path]
- if (version !== undefined) {
- log.info("workspace/didChangeWatchedFiles", request)
+ const document = files[request.path]
+ if (document !== undefined) {
+ // Do not wipe diagnostics on didChange. Some servers (e.g. clangd) only
+ // re-emit diagnostics when the content actually changes, so clearing
+ // here would lose errors for no-op touchFile calls. Let the server's
+ // next push/pull overwrite naturally.
+ logger.info("workspace/didChangeWatchedFiles", request)
await connection.sendNotification("workspace/didChangeWatchedFiles", {
changes: [
{
uri: pathToFileURL(request.path).href,
- type: 2, // Changed
+ type: FILE_CHANGE_CHANGED,
},
],
})
- const next = version + 1
- files[request.path] = next
- log.info("textDocument/didChange", {
+ const next = document.version + 1
+ files[request.path] = { version: next, text }
+ logger.info("textDocument/didChange", {
path: request.path,
version: next,
})
@@ -173,23 +616,35 @@ export async function create(input: { serverID: string; server: LSPServer.Handle
uri: pathToFileURL(request.path).href,
version: next,
},
- contentChanges: [{ text }],
+ contentChanges:
+ syncKind === TEXT_DOCUMENT_SYNC_INCREMENTAL
+ ? [
+ {
+ range: {
+ start: { line: 0, character: 0 },
+ end: endPosition(document.text),
+ },
+ text,
+ },
+ ]
+ : [{ text }],
})
- return
+ return next
}
- log.info("workspace/didChangeWatchedFiles", request)
+ logger.info("workspace/didChangeWatchedFiles", request)
await connection.sendNotification("workspace/didChangeWatchedFiles", {
changes: [
{
uri: pathToFileURL(request.path).href,
- type: 1, // Created
+ type: FILE_CHANGE_CREATED,
},
],
})
- log.info("textDocument/didOpen", request)
- diagnostics.delete(request.path)
+ logger.info("textDocument/didOpen", request)
+ pushDiagnostics.delete(request.path)
+ pullDiagnostics.delete(request.path)
await connection.sendNotification("textDocument/didOpen", {
textDocument: {
uri: pathToFileURL(request.path).href,
@@ -198,52 +653,42 @@ export async function create(input: { serverID: string; server: LSPServer.Handle
text,
},
})
- files[request.path] = 0
- return
+ files[request.path] = { version: 0, text }
+ return 0
},
},
get diagnostics() {
- return diagnostics
+ const result = new Map()
+ for (const key of new Set([...pushDiagnostics.keys(), ...pullDiagnostics.keys()])) {
+ result.set(key, mergedDiagnostics(key))
+ }
+ return result
},
- async waitForDiagnostics(request: { path: string }) {
+ async waitForDiagnostics(request: { path: string; version: number; mode?: "document" | "full"; after?: number }) {
const normalizedPath = Filesystem.normalizePath(
path.isAbsolute(request.path) ? request.path : path.resolve(input.directory, request.path),
)
- log.info("waiting for diagnostics", { path: normalizedPath })
- let unsub: () => void
- let debounceTimer: ReturnType | undefined
- return await withTimeout(
- new Promise((resolve) => {
- unsub = Bus.subscribe(Event.Diagnostics, (event) => {
- if (event.properties.path === normalizedPath && event.properties.serverID === result.serverID) {
- // Debounce to allow LSP to send follow-up diagnostics (e.g., semantic after syntax)
- if (debounceTimer) clearTimeout(debounceTimer)
- debounceTimer = setTimeout(() => {
- log.info("got diagnostics", { path: normalizedPath })
- unsub?.()
- resolve()
- }, DIAGNOSTICS_DEBOUNCE_MS)
- }
- })
- }),
- 3000,
- )
- .catch(() => {})
- .finally(() => {
- if (debounceTimer) clearTimeout(debounceTimer)
- unsub?.()
- })
+ logger.info("waiting for diagnostics", {
+ path: normalizedPath,
+ mode: request.mode ?? "full",
+ version: request.version,
+ })
+ if (request.mode === "document") {
+ await waitForDocumentDiagnostics({ path: normalizedPath, version: request.version, after: request.after })
+ return
+ }
+ await waitForFullDiagnostics({ path: normalizedPath, version: request.version, after: request.after })
},
async shutdown() {
- l.info("shutting down")
+ logger.info("shutting down")
connection.end()
connection.dispose()
await Process.stop(input.server.process)
- l.info("shutdown")
+ logger.info("shutdown")
},
}
- l.info("initialized")
+ logger.info("initialized")
return result
}
diff --git a/packages/opencode/src/lsp/lsp.ts b/packages/opencode/src/lsp/lsp.ts
index 833285e7b562..4c46cd9aa776 100644
--- a/packages/opencode/src/lsp/lsp.ts
+++ b/packages/opencode/src/lsp/lsp.ts
@@ -136,7 +136,7 @@ export interface Interface {
readonly init: () => Effect.Effect
readonly status: () => Effect.Effect
readonly hasClients: (file: string) => Effect.Effect
- readonly touchFile: (input: string, waitForDiagnostics?: boolean) => Effect.Effect
+ readonly touchFile: (input: string, diagnostics?: "document" | "full") => Effect.Effect
readonly diagnostics: () => Effect.Effect>
readonly hover: (input: LocInput) => Effect.Effect
readonly definition: (input: LocInput) => Effect.Effect
@@ -358,15 +358,21 @@ export const layer = Layer.effect(
})
})
- const touchFile = Effect.fn("LSP.touchFile")(function* (input: string, waitForDiagnostics?: boolean) {
+ const touchFile = Effect.fn("LSP.touchFile")(function* (input: string, diagnostics?: "document" | "full") {
log.info("touching file", { file: input })
const clients = yield* getClients(input)
yield* Effect.promise(() =>
Promise.all(
clients.map(async (client) => {
- const wait = waitForDiagnostics ? client.waitForDiagnostics({ path: input }) : Promise.resolve()
- await client.notify.open({ path: input })
- return wait
+ const after = Date.now()
+ const version = await client.notify.open({ path: input })
+ if (!diagnostics) return
+ return client.waitForDiagnostics({
+ path: input,
+ version,
+ mode: diagnostics,
+ after,
+ })
}),
).catch((err) => {
log.error("failed to touch file", { err, file: input })
diff --git a/packages/opencode/src/lsp/server.ts b/packages/opencode/src/lsp/server.ts
index 8bb70a51166e..a0cb8fe3881f 100644
--- a/packages/opencode/src/lsp/server.ts
+++ b/packages/opencode/src/lsp/server.ts
@@ -490,7 +490,7 @@ export const Pyright: Info = {
const args = []
if (!binary) {
if (Flag.OPENCODE_DISABLE_LSP_DOWNLOAD) return
- const resolved = await Npm.which("pyright")
+ const resolved = await Npm.which("pyright", "pyright-langserver")
if (!resolved) return
binary = resolved
}
diff --git a/packages/opencode/src/npm/index.ts b/packages/opencode/src/npm/index.ts
index 477e99e06afe..fc8497d20b8c 100644
--- a/packages/opencode/src/npm/index.ts
+++ b/packages/opencode/src/npm/index.ts
@@ -34,7 +34,7 @@ export interface Interface {
},
) => Effect.Effect
readonly outdated: (pkg: string, cachedVersion: string) => Effect.Effect
- readonly which: (pkg: string) => Effect.Effect>
+ readonly which: (pkg: string, bin?: string) => Effect.Effect>
}
export class Service extends Context.Service()("@opencode/Npm") {}
@@ -207,7 +207,7 @@ export const layer = Layer.effect(
return
}, Effect.scoped)
- const which = Effect.fn("Npm.which")(function* (pkg: string) {
+ const which = Effect.fn("Npm.which")(function* (pkg: string, bin?: string) {
const dir = directory(pkg)
const binDir = path.join(dir, "node_modules", ".bin")
@@ -215,6 +215,9 @@ export const layer = Layer.effect(
const files = yield* fs.readDirectory(binDir).pipe(Effect.catch(() => Effect.succeed([] as string[])))
if (files.length === 0) return Option.none()
+ // Caller picked a specific bin (e.g. pyright exposes both `pyright` and
+ // `pyright-langserver`); trust the hint if the package provides it.
+ if (bin) return files.includes(bin) ? Option.some(bin) : Option.none()
if (files.length === 1) return Option.some(files[0])
const pkgJson = yield* afs.readJson(path.join(dir, "node_modules", pkg, "package.json")).pipe(Effect.option)
@@ -223,11 +226,11 @@ export const layer = Layer.effect(
const parsed = pkgJson.value as { bin?: string | Record }
if (parsed?.bin) {
const unscoped = pkg.startsWith("@") ? pkg.split("/")[1] : pkg
- const bin = parsed.bin
- if (typeof bin === "string") return Option.some(unscoped)
- const keys = Object.keys(bin)
+ const parsedBin = parsed.bin
+ if (typeof parsedBin === "string") return Option.some(unscoped)
+ const keys = Object.keys(parsedBin)
if (keys.length === 1) return Option.some(keys[0])
- return bin[unscoped] ? Option.some(unscoped) : Option.some(keys[0])
+ return parsedBin[unscoped] ? Option.some(unscoped) : Option.some(keys[0])
}
}
diff --git a/packages/opencode/src/patch/index.ts b/packages/opencode/src/patch/index.ts
index 19e1d7555bb0..3662f9e908ae 100644
--- a/packages/opencode/src/patch/index.ts
+++ b/packages/opencode/src/patch/index.ts
@@ -3,6 +3,7 @@ import * as path from "path"
import * as fs from "fs/promises"
import { readFileSync } from "fs"
import { Log } from "../util"
+import * as Bom from "../util/bom"
const log = Log.create({ service: "patch" })
@@ -305,18 +306,19 @@ export function maybeParseApplyPatch(
interface ApplyPatchFileUpdate {
unified_diff: string
content: string
+ bom: boolean
}
export function deriveNewContentsFromChunks(filePath: string, chunks: UpdateFileChunk[]): ApplyPatchFileUpdate {
// Read original file content
- let originalContent: string
+ let originalContent: ReturnType
try {
- originalContent = readFileSync(filePath, "utf-8")
+ originalContent = Bom.split(readFileSync(filePath, "utf-8"))
} catch (error) {
throw new Error(`Failed to read file ${filePath}: ${error}`, { cause: error })
}
- let originalLines = originalContent.split("\n")
+ let originalLines = originalContent.text.split("\n")
// Drop trailing empty element for consistent line counting
if (originalLines.length > 0 && originalLines[originalLines.length - 1] === "") {
@@ -331,14 +333,16 @@ export function deriveNewContentsFromChunks(filePath: string, chunks: UpdateFile
newLines.push("")
}
- const newContent = newLines.join("\n")
+ const next = Bom.split(newLines.join("\n"))
+ const newContent = next.text
// Generate unified diff
- const unifiedDiff = generateUnifiedDiff(originalContent, newContent)
+ const unifiedDiff = generateUnifiedDiff(originalContent.text, newContent)
return {
unified_diff: unifiedDiff,
content: newContent,
+ bom: originalContent.bom || next.bom,
}
}
@@ -553,13 +557,13 @@ export async function applyHunksToFiles(hunks: Hunk[]): Promise {
await fs.mkdir(moveDir, { recursive: true })
}
- await fs.writeFile(hunk.move_path, fileUpdate.content, "utf-8")
+ await fs.writeFile(hunk.move_path, Bom.join(fileUpdate.content, fileUpdate.bom), "utf-8")
await fs.unlink(hunk.path)
modified.push(hunk.move_path)
log.info(`Moved file: ${hunk.path} -> ${hunk.move_path}`)
} else {
// Regular update
- await fs.writeFile(hunk.path, fileUpdate.content, "utf-8")
+ await fs.writeFile(hunk.path, Bom.join(fileUpdate.content, fileUpdate.bom), "utf-8")
modified.push(hunk.path)
log.info(`Updated file: ${hunk.path}`)
}
diff --git a/packages/opencode/src/plugin/codex.ts b/packages/opencode/src/plugin/codex.ts
index c61cb7850900..84d314f476ff 100644
--- a/packages/opencode/src/plugin/codex.ts
+++ b/packages/opencode/src/plugin/codex.ts
@@ -374,6 +374,7 @@ export async function CodexAuthPlugin(input: PluginInput): Promise {
"gpt-5.3-codex",
"gpt-5.4",
"gpt-5.4-mini",
+ "gpt-5.5",
])
for (const [modelId, model] of Object.entries(provider.models)) {
if (modelId.includes("codex")) continue
diff --git a/packages/opencode/src/project/project.ts b/packages/opencode/src/project/project.ts
index 6a2132274adf..d628f87f9782 100644
--- a/packages/opencode/src/project/project.ts
+++ b/packages/opencode/src/project/project.ts
@@ -207,13 +207,13 @@ export const layer: Layer.Layer<
vcs: fakeVcs,
}
}
- const worktree = (() => {
- const common = resolveGitPath(sandbox, commonDir.text.trim())
- return common === sandbox ? sandbox : pathSvc.dirname(common)
- })()
+ const common = resolveGitPath(sandbox, commonDir.text.trim())
+ const bareCheck = yield* git(["config", "--bool", "core.bare"], { cwd: sandbox })
+ const isBareRepo = bareCheck.code === 0 && bareCheck.text.trim() === "true"
+ const worktree = common === sandbox ? sandbox : isBareRepo ? common : pathSvc.dirname(common)
if (id == null) {
- id = yield* readCachedProjectId(pathSvc.join(worktree, ".git"))
+ id = yield* readCachedProjectId(common)
}
if (!id) {
@@ -226,7 +226,7 @@ export const layer: Layer.Layer<
id = roots[0] ? ProjectID.make(roots[0]) : undefined
if (id) {
- yield* fs.writeFileString(pathSvc.join(worktree, ".git", "opencode"), id).pipe(Effect.ignore)
+ yield* fs.writeFileString(pathSvc.join(common, "opencode"), id).pipe(Effect.ignore)
}
}
diff --git a/packages/opencode/src/session/compaction.ts b/packages/opencode/src/session/compaction.ts
index 037543064e23..defdb870d7d9 100644
--- a/packages/opencode/src/session/compaction.ts
+++ b/packages/opencode/src/session/compaction.ts
@@ -32,16 +32,105 @@ export const Event = {
export const PRUNE_MINIMUM = 20_000
export const PRUNE_PROTECT = 40_000
+const TOOL_OUTPUT_MAX_CHARS = 2_000
const PRUNE_PROTECTED_TOOLS = ["skill"]
const DEFAULT_TAIL_TURNS = 2
const MIN_PRESERVE_RECENT_TOKENS = 2_000
const MAX_PRESERVE_RECENT_TOKENS = 8_000
+const SUMMARY_TEMPLATE = `Output exactly this Markdown structure and keep the section order unchanged:
+---
+## Goal
+- [single-sentence task summary]
+
+## Constraints & Preferences
+- [user constraints, preferences, specs, or "(none)"]
+
+## Progress
+### Done
+- [completed work or "(none)"]
+
+### In Progress
+- [current work or "(none)"]
+
+### Blocked
+- [blockers or "(none)"]
+
+## Key Decisions
+- [decision and why, or "(none)"]
+
+## Next Steps
+- [ordered next actions or "(none)"]
+
+## Critical Context
+- [important technical facts, errors, open questions, or "(none)"]
+
+## Relevant Files
+- [file or directory path: why it matters, or "(none)"]
+---
+
+Rules:
+- Keep every section, even when empty.
+- Use terse bullets, not prose paragraphs.
+- Preserve exact file paths, commands, error strings, and identifiers when known.
+- Do not mention the summary process or that context was compacted.`
type Turn = {
start: number
end: number
id: MessageID
}
+type Tail = {
+ start: number
+ id: MessageID
+}
+
+type CompletedCompaction = {
+ userIndex: number
+ assistantIndex: number
+ summary: string | undefined
+}
+
+function summaryText(message: MessageV2.WithParts) {
+ const text = message.parts
+ .filter((part): part is MessageV2.TextPart => part.type === "text")
+ .map((part) => part.text.trim())
+ .filter(Boolean)
+ .join("\n\n")
+ .trim()
+ return text || undefined
+}
+
+function completedCompactions(messages: MessageV2.WithParts[]) {
+ const users = new Map()
+ for (let i = 0; i < messages.length; i++) {
+ const msg = messages[i]
+ if (msg.info.role !== "user") continue
+ if (!msg.parts.some((part) => part.type === "compaction")) continue
+ users.set(msg.info.id, i)
+ }
+
+ return messages.flatMap((msg, assistantIndex): CompletedCompaction[] => {
+ if (msg.info.role !== "assistant") return []
+ if (!msg.info.summary || !msg.info.finish || msg.info.error) return []
+ const userIndex = users.get(msg.info.parentID)
+ if (userIndex === undefined) return []
+ return [{ userIndex, assistantIndex, summary: summaryText(msg) }]
+ })
+}
+
+function buildPrompt(input: { previousSummary?: string; context: string[] }) {
+ const anchor = input.previousSummary
+ ? [
+ "Update the anchored summary below using the conversation history above.",
+ "Preserve still-true details, remove stale details, and merge in the new facts.",
+ "",
+ input.previousSummary,
+ "",
+ ].join("\n")
+ : "Create a new anchored summary from the conversation history above."
+ return [anchor, SUMMARY_TEMPLATE, ...input.context].join("\n\n")
+}
+
function preserveRecentBudget(input: { cfg: Config.Info; model: Provider.Model }) {
return (
input.cfg.compaction?.preserve_recent_tokens ??
@@ -67,6 +156,31 @@ function turns(messages: MessageV2.WithParts[]) {
return result
}
+function splitTurn(input: {
+ messages: MessageV2.WithParts[]
+ turn: Turn
+ model: Provider.Model
+ budget: number
+ estimate: (input: { messages: MessageV2.WithParts[]; model: Provider.Model }) => Effect.Effect
+}) {
+ return Effect.gen(function* () {
+ if (input.budget <= 0) return undefined
+ if (input.turn.end - input.turn.start <= 1) return undefined
+ for (let start = input.turn.start + 1; start < input.turn.end; start++) {
+ const size = yield* input.estimate({
+ messages: input.messages.slice(start, input.turn.end),
+ model: input.model,
+ })
+ if (size > input.budget) continue
+ return {
+ start,
+ id: input.messages[start]!.info.id,
+ } satisfies Tail
+ }
+ return undefined
+ })
+}
+
export interface Interface {
readonly isOverflow: (input: {
tokens: MessageV2.Assistant["tokens"]
@@ -147,18 +261,28 @@ export const layer: Layer.Layer<
}),
{ concurrency: 1 },
)
- if (sizes.at(-1)! > budget) {
- log.info("tail fallback", { budget, size: sizes.at(-1) })
- return { head: input.messages, tail_start_id: undefined }
- }
let total = 0
- let keep: Turn | undefined
+ let keep: Tail | undefined
for (let i = recent.length - 1; i >= 0; i--) {
+ const turn = recent[i]!
const size = sizes[i]
- if (total + size > budget) break
- total += size
- keep = recent[i]
+ if (total + size <= budget) {
+ total += size
+ keep = { start: turn.start, id: turn.id }
+ continue
+ }
+ const remaining = budget - total
+ const split = yield* splitTurn({
+ messages: input.messages,
+ turn,
+ model: input.model,
+ budget: remaining,
+ estimate,
+ })
+ if (split) keep = split
+ else if (!keep) log.info("tail fallback", { budget, size, total })
+ break
}
if (!keep || keep.start === 0) return { head: input.messages, tail_start_id: undefined }
@@ -192,17 +316,15 @@ export const layer: Layer.Layer<
if (msg.info.role === "assistant" && msg.info.summary) break loop
for (let partIndex = msg.parts.length - 1; partIndex >= 0; partIndex--) {
const part = msg.parts[partIndex]
- if (part.type === "tool")
- if (part.state.status === "completed") {
- if (PRUNE_PROTECTED_TOOLS.includes(part.tool)) continue
- if (part.state.time.compacted) break loop
- const estimate = Token.estimate(part.state.output)
- total += estimate
- if (total > PRUNE_PROTECT) {
- pruned += estimate
- toPrune.push(part)
- }
- }
+ if (part.type !== "tool") continue
+ if (part.state.status !== "completed") continue
+ if (PRUNE_PROTECTED_TOOLS.includes(part.tool)) continue
+ if (part.state.time.compacted) break loop
+ const estimate = Token.estimate(part.state.output)
+ total += estimate
+ if (total <= PRUNE_PROTECT) continue
+ pruned += estimate
+ toPrune.push(part)
}
}
@@ -263,8 +385,11 @@ export const layer: Layer.Layer<
: yield* provider.getModel(userMessage.model.providerID, userMessage.model.modelID)
const cfg = yield* config.get()
const history = compactionPart && messages.at(-1)?.info.id === input.parentID ? messages.slice(0, -1) : messages
+ const prior = completedCompactions(history)
+ const hidden = new Set(prior.flatMap((item) => [item.userIndex, item.assistantIndex]))
+ const previousSummary = prior.at(-1)?.summary
const selected = yield* select({
- messages: history,
+ messages: history.filter((_, index) => !hidden.has(index)),
cfg,
model,
})
@@ -274,34 +399,13 @@ export const layer: Layer.Layer<
{ sessionID: input.sessionID },
{ context: [], prompt: undefined },
)
- const defaultPrompt = `When constructing the summary, try to stick to this template:
----
-## Goal
-
-[What goal(s) is the user trying to accomplish?]
-
-## Instructions
-
-- [What important instructions did the user give you that are relevant]
-- [If there is a plan or spec, include information about it so next agent can continue using it]
-
-## Discoveries
-
-[What notable things were learned during this conversation that would be useful for the next agent to know when continuing the work]
-
-## Accomplished
-
-[What work has been completed, what work is still in progress, and what work is left?]
-
-## Relevant files / directories
-
-[Construct a structured list of relevant files that have been read, edited, or created that pertain to the task at hand. If all the files in a directory are relevant, include the path to the directory.]
----`
-
- const prompt = compacting.prompt ?? [defaultPrompt, ...compacting.context].join("\n\n")
+ const nextPrompt = compacting.prompt ?? buildPrompt({ previousSummary, context: compacting.context })
const msgs = structuredClone(selected.head)
yield* plugin.trigger("experimental.chat.messages.transform", {}, { messages: msgs })
- const modelMessages = yield* MessageV2.toModelMessagesEffect(msgs, model, { stripMedia: true })
+ const modelMessages = yield* MessageV2.toModelMessagesEffect(msgs, model, {
+ stripMedia: true,
+ toolOutputMaxChars: TOOL_OUTPUT_MAX_CHARS,
+ })
const ctx = yield* InstanceState.context
const msg: MessageV2.Assistant = {
id: MessageID.ascending(),
@@ -345,7 +449,7 @@ export const layer: Layer.Layer<
...modelMessages,
{
role: "user",
- content: [{ type: "text", text: prompt }],
+ content: [{ type: "text", text: nextPrompt }],
},
],
model,
diff --git a/packages/opencode/src/session/message-v2.ts b/packages/opencode/src/session/message-v2.ts
index 123f7b5401cb..980dd4da844f 100644
--- a/packages/opencode/src/session/message-v2.ts
+++ b/packages/opencode/src/session/message-v2.ts
@@ -319,6 +319,12 @@ export const ToolStateCompleted = Schema.Struct({
.pipe(withStatics((s) => ({ zod: zod(s) })))
export type ToolStateCompleted = Types.DeepMutable>
+function truncateToolOutput(text: string, maxChars?: number) {
+ if (!maxChars || text.length <= maxChars) return text
+ const omitted = text.length - maxChars
+ return `${text.slice(0, maxChars)}\n[Tool output truncated for compaction: omitted ${omitted} chars]`
+}
+
export const ToolStateError = Schema.Struct({
status: Schema.Literal("error"),
input: Schema.Record(Schema.String, Schema.Any),
@@ -700,7 +706,7 @@ function providerMeta(metadata: Record | undefined) {
export const toModelMessagesEffect = Effect.fnUntraced(function* (
input: WithParts[],
model: Provider.Model,
- options?: { stripMedia?: boolean },
+ options?: { stripMedia?: boolean; toolOutputMaxChars?: number },
) {
const result: UIMessage[] = []
const toolNames = new Set()
@@ -839,7 +845,9 @@ export const toModelMessagesEffect = Effect.fnUntraced(function* (
if (part.type === "tool") {
toolNames.add(part.tool)
if (part.state.status === "completed") {
- const outputText = part.state.time.compacted ? "[Old tool result content cleared]" : part.state.output
+ const outputText = part.state.time.compacted
+ ? "[Old tool result content cleared]"
+ : truncateToolOutput(part.state.output, options?.toolOutputMaxChars)
const attachments = part.state.time.compacted || options?.stripMedia ? [] : (part.state.attachments ?? [])
// For providers that don't support media in tool results, extract media files
@@ -955,7 +963,7 @@ export const toModelMessagesEffect = Effect.fnUntraced(function* (
export function toModelMessages(
input: WithParts[],
model: Provider.Model,
- options?: { stripMedia?: boolean },
+ options?: { stripMedia?: boolean; toolOutputMaxChars?: number },
): Promise {
return Effect.runPromise(toModelMessagesEffect(input, model, options).pipe(Effect.provide(EffectLogger.layer)))
}
diff --git a/packages/opencode/src/tool/apply_patch.ts b/packages/opencode/src/tool/apply_patch.ts
index 7da7dd255c52..33112c43c58e 100644
--- a/packages/opencode/src/tool/apply_patch.ts
+++ b/packages/opencode/src/tool/apply_patch.ts
@@ -14,6 +14,7 @@ import { AppFileSystem } from "@opencode-ai/shared/filesystem"
import DESCRIPTION from "./apply_patch.txt"
import { File } from "../file"
import { Format } from "../format"
+import * as Bom from "@/util/bom"
const PatchParams = z.object({
patchText: z.string().describe("The full patch text that describes all changes to be made"),
@@ -59,6 +60,7 @@ export const ApplyPatchTool = Tool.define(
diff: string
additions: number
deletions: number
+ bom: boolean
}> = []
let totalDiff = ""
@@ -72,11 +74,12 @@ export const ApplyPatchTool = Tool.define(
const oldContent = ""
const newContent =
hunk.contents.length === 0 || hunk.contents.endsWith("\n") ? hunk.contents : `${hunk.contents}\n`
- const diff = trimDiff(createTwoFilesPatch(filePath, filePath, oldContent, newContent))
+ const next = Bom.split(newContent)
+ const diff = trimDiff(createTwoFilesPatch(filePath, filePath, oldContent, next.text))
let additions = 0
let deletions = 0
- for (const change of diffLines(oldContent, newContent)) {
+ for (const change of diffLines(oldContent, next.text)) {
if (change.added) additions += change.count || 0
if (change.removed) deletions += change.count || 0
}
@@ -84,11 +87,12 @@ export const ApplyPatchTool = Tool.define(
fileChanges.push({
filePath,
oldContent,
- newContent,
+ newContent: next.text,
type: "add",
diff,
additions,
deletions,
+ bom: next.bom,
})
totalDiff += diff + "\n"
@@ -104,13 +108,16 @@ export const ApplyPatchTool = Tool.define(
)
}
- const oldContent = yield* afs.readFileString(filePath)
+ const source = yield* Bom.readFile(afs, filePath)
+ const oldContent = source.text
let newContent = oldContent
+ let bom = source.bom
// Apply the update chunks to get new content
try {
const fileUpdate = Patch.deriveNewContentsFromChunks(filePath, hunk.chunks)
newContent = fileUpdate.content
+ bom = fileUpdate.bom
} catch (error) {
return yield* Effect.fail(new Error(`apply_patch verification failed: ${error}`))
}
@@ -136,6 +143,7 @@ export const ApplyPatchTool = Tool.define(
diff,
additions,
deletions,
+ bom,
})
totalDiff += diff + "\n"
@@ -143,17 +151,16 @@ export const ApplyPatchTool = Tool.define(
}
case "delete": {
- const contentToDelete = yield* afs
- .readFileString(filePath)
- .pipe(
- Effect.catch((error) =>
- Effect.fail(
- new Error(
- `apply_patch verification failed: ${error instanceof Error ? error.message : String(error)}`,
- ),
+ const source = yield* Bom.readFile(afs, filePath).pipe(
+ Effect.catch((error) =>
+ Effect.fail(
+ new Error(
+ `apply_patch verification failed: ${error instanceof Error ? error.message : String(error)}`,
),
),
- )
+ ),
+ )
+ const contentToDelete = source.text
const deleteDiff = trimDiff(createTwoFilesPatch(filePath, filePath, contentToDelete, ""))
const deletions = contentToDelete.split("\n").length
@@ -166,6 +173,7 @@ export const ApplyPatchTool = Tool.define(
diff: deleteDiff,
additions: 0,
deletions,
+ bom: source.bom,
})
totalDiff += deleteDiff + "\n"
@@ -207,12 +215,12 @@ export const ApplyPatchTool = Tool.define(
case "add":
// Create parent directories (recursive: true is safe on existing/root dirs)
- yield* afs.writeWithDirs(change.filePath, change.newContent)
+ yield* afs.writeWithDirs(change.filePath, Bom.join(change.newContent, change.bom))
updates.push({ file: change.filePath, event: "add" })
break
case "update":
- yield* afs.writeWithDirs(change.filePath, change.newContent)
+ yield* afs.writeWithDirs(change.filePath, Bom.join(change.newContent, change.bom))
updates.push({ file: change.filePath, event: "change" })
break
@@ -220,7 +228,7 @@ export const ApplyPatchTool = Tool.define(
if (change.movePath) {
// Create parent directories (recursive: true is safe on existing/root dirs)
- yield* afs.writeWithDirs(change.movePath!, change.newContent)
+ yield* afs.writeWithDirs(change.movePath!, Bom.join(change.newContent, change.bom))
yield* afs.remove(change.filePath)
updates.push({ file: change.filePath, event: "unlink" })
updates.push({ file: change.movePath, event: "add" })
@@ -234,7 +242,9 @@ export const ApplyPatchTool = Tool.define(
}
if (edited) {
- yield* format.file(edited)
+ if (yield* format.file(edited)) {
+ yield* Bom.syncFile(afs, edited, change.bom)
+ }
yield* bus.publish(File.Event.Edited, { file: edited })
}
}
@@ -248,7 +258,7 @@ export const ApplyPatchTool = Tool.define(
for (const change of fileChanges) {
if (change.type === "delete") continue
const target = change.movePath ?? change.filePath
- yield* lsp.touchFile(target, true)
+ yield* lsp.touchFile(target, "document")
}
const diagnostics = yield* lsp.diagnostics()
diff --git a/packages/opencode/src/tool/edit.ts b/packages/opencode/src/tool/edit.ts
index 2c6c2c13084a..35dd85b4768f 100644
--- a/packages/opencode/src/tool/edit.ts
+++ b/packages/opencode/src/tool/edit.ts
@@ -18,6 +18,7 @@ import { Instance } from "../project/instance"
import { Snapshot } from "@/snapshot"
import { assertExternalDirectoryEffect } from "./external-directory"
import { AppFileSystem } from "@opencode-ai/shared/filesystem"
+import * as Bom from "@/util/bom"
function normalizeLineEndings(text: string): string {
return text.replaceAll("\r\n", "\n")
@@ -84,7 +85,11 @@ export const EditTool = Tool.define(
Effect.gen(function* () {
if (params.oldString === "") {
const existed = yield* afs.existsSafe(filePath)
- contentNew = params.newString
+ const source = existed ? yield* Bom.readFile(afs, filePath) : { bom: false, text: "" }
+ const next = Bom.split(params.newString)
+ const desiredBom = source.bom || next.bom
+ contentOld = source.text
+ contentNew = next.text
diff = trimDiff(createTwoFilesPatch(filePath, filePath, contentOld, contentNew))
yield* ctx.ask({
permission: "edit",
@@ -95,8 +100,10 @@ export const EditTool = Tool.define(
diff,
},
})
- yield* afs.writeWithDirs(filePath, params.newString)
- yield* format.file(filePath)
+ yield* afs.writeWithDirs(filePath, Bom.join(contentNew, desiredBom))
+ if (yield* format.file(filePath)) {
+ contentNew = yield* Bom.syncFile(afs, filePath, desiredBom)
+ }
yield* bus.publish(File.Event.Edited, { file: filePath })
yield* bus.publish(FileWatcher.Event.Updated, {
file: filePath,
@@ -108,13 +115,16 @@ export const EditTool = Tool.define(
const info = yield* afs.stat(filePath).pipe(Effect.catch(() => Effect.succeed(undefined)))
if (!info) throw new Error(`File ${filePath} not found`)
if (info.type === "Directory") throw new Error(`Path is a directory, not a file: ${filePath}`)
- contentOld = yield* afs.readFileString(filePath)
+ const source = yield* Bom.readFile(afs, filePath)
+ contentOld = source.text
const ending = detectLineEnding(contentOld)
const old = convertToLineEnding(normalizeLineEndings(params.oldString), ending)
- const next = convertToLineEnding(normalizeLineEndings(params.newString), ending)
+ const replacement = convertToLineEnding(normalizeLineEndings(params.newString), ending)
- contentNew = replace(contentOld, old, next, params.replaceAll)
+ const next = Bom.split(replace(contentOld, old, replacement, params.replaceAll))
+ const desiredBom = source.bom || next.bom
+ contentNew = next.text
diff = trimDiff(
createTwoFilesPatch(
@@ -134,14 +144,15 @@ export const EditTool = Tool.define(
},
})
- yield* afs.writeWithDirs(filePath, contentNew)
- yield* format.file(filePath)
+ yield* afs.writeWithDirs(filePath, Bom.join(contentNew, desiredBom))
+ if (yield* format.file(filePath)) {
+ contentNew = yield* Bom.syncFile(afs, filePath, desiredBom)
+ }
yield* bus.publish(File.Event.Edited, { file: filePath })
yield* bus.publish(FileWatcher.Event.Updated, {
file: filePath,
event: "change",
})
- contentNew = yield* afs.readFileString(filePath)
diff = trimDiff(
createTwoFilesPatch(
filePath,
@@ -175,7 +186,7 @@ export const EditTool = Tool.define(
})
let output = "Edit applied successfully."
- yield* lsp.touchFile(filePath, true)
+ yield* lsp.touchFile(filePath, "document")
const diagnostics = yield* lsp.diagnostics()
const normalizedFilePath = AppFileSystem.normalizePath(filePath)
const block = LSP.Diagnostic.report(filePath, diagnostics[normalizedFilePath] ?? [])
diff --git a/packages/opencode/src/tool/lsp.ts b/packages/opencode/src/tool/lsp.ts
index 263bfe81d2fc..0a0edc61edde 100644
--- a/packages/opencode/src/tool/lsp.ts
+++ b/packages/opencode/src/tool/lsp.ts
@@ -55,7 +55,7 @@ export const LspTool = Tool.define(
const available = yield* lsp.hasClients(file)
if (!available) throw new Error("No LSP server available for this file type.")
- yield* lsp.touchFile(file, true)
+ yield* lsp.touchFile(file, "document")
const result: unknown[] = yield* (() => {
switch (args.operation) {
diff --git a/packages/opencode/src/tool/read.ts b/packages/opencode/src/tool/read.ts
index c9b304862652..a9b95346a1fb 100644
--- a/packages/opencode/src/tool/read.ts
+++ b/packages/opencode/src/tool/read.ts
@@ -75,7 +75,7 @@ export const ReadTool = Tool.define(
})
const warm = Effect.fn("ReadTool.warm")(function* (filepath: string) {
- yield* lsp.touchFile(filepath, false).pipe(Effect.ignore, Effect.forkIn(scope))
+ yield* lsp.touchFile(filepath).pipe(Effect.ignore, Effect.forkIn(scope))
})
const readSample = Effect.fn("ReadTool.readSample")(function* (
diff --git a/packages/opencode/src/tool/write.ts b/packages/opencode/src/tool/write.ts
index 741091b21d3c..80198f4555d3 100644
--- a/packages/opencode/src/tool/write.ts
+++ b/packages/opencode/src/tool/write.ts
@@ -13,6 +13,7 @@ import { AppFileSystem } from "@opencode-ai/shared/filesystem"
import { Instance } from "../project/instance"
import { trimDiff } from "./edit"
import { assertExternalDirectoryEffect } from "./external-directory"
+import * as Bom from "@/util/bom"
const MAX_PROJECT_DIAGNOSTICS_FILES = 5
@@ -38,9 +39,13 @@ export const WriteTool = Tool.define(
yield* assertExternalDirectoryEffect(ctx, filepath)
const exists = yield* fs.existsSafe(filepath)
- const contentOld = exists ? yield* fs.readFileString(filepath) : ""
+ const source = exists ? yield* Bom.readFile(fs, filepath) : { bom: false, text: "" }
+ const next = Bom.split(params.content)
+ const desiredBom = source.bom || next.bom
+ const contentOld = source.text
+ const contentNew = next.text
- const diff = trimDiff(createTwoFilesPatch(filepath, filepath, contentOld, params.content))
+ const diff = trimDiff(createTwoFilesPatch(filepath, filepath, contentOld, contentNew))
yield* ctx.ask({
permission: "edit",
patterns: [path.relative(Instance.worktree, filepath)],
@@ -51,8 +56,10 @@ export const WriteTool = Tool.define(
},
})
- yield* fs.writeWithDirs(filepath, params.content)
- yield* format.file(filepath)
+ yield* fs.writeWithDirs(filepath, Bom.join(contentNew, desiredBom))
+ if (yield* format.file(filepath)) {
+ yield* Bom.syncFile(fs, filepath, desiredBom)
+ }
yield* bus.publish(File.Event.Edited, { file: filepath })
yield* bus.publish(FileWatcher.Event.Updated, {
file: filepath,
@@ -60,7 +67,7 @@ export const WriteTool = Tool.define(
})
let output = "Wrote file successfully."
- yield* lsp.touchFile(filepath, true)
+ yield* lsp.touchFile(filepath, "document")
const diagnostics = yield* lsp.diagnostics()
const normalizedFilepath = AppFileSystem.normalizePath(filepath)
let projectDiagnosticsCount = 0
diff --git a/packages/opencode/src/util/bom.ts b/packages/opencode/src/util/bom.ts
new file mode 100644
index 000000000000..484228f3d415
--- /dev/null
+++ b/packages/opencode/src/util/bom.ts
@@ -0,0 +1,31 @@
+import { Effect } from "effect"
+import { AppFileSystem } from "@opencode-ai/shared/filesystem"
+
+const BOM_CODE = 0xfeff
+const BOM = String.fromCharCode(BOM_CODE)
+
+export function split(text: string) {
+ if (text.charCodeAt(0) !== BOM_CODE) return { bom: false, text }
+ return { bom: true, text: text.slice(1) }
+}
+
+export function join(text: string, bom: boolean) {
+ const stripped = split(text).text
+ if (!bom) return stripped
+ return BOM + stripped
+}
+
+export const readFile = Effect.fn("Bom.readFile")(function* (fs: AppFileSystem.Interface, filePath: string) {
+ return split(new TextDecoder("utf-8", { ignoreBOM: true }).decode(yield* fs.readFile(filePath)))
+})
+
+export const syncFile = Effect.fn("Bom.syncFile")(function* (
+ fs: AppFileSystem.Interface,
+ filePath: string,
+ bom: boolean,
+) {
+ const current = yield* readFile(fs, filePath)
+ if (current.bom === bom) return current.text
+ yield* fs.writeWithDirs(filePath, join(current.text, bom))
+ return current.text
+})
diff --git a/packages/opencode/src/util/error.ts b/packages/opencode/src/util/error.ts
index 75fef9fc9a04..fbda2dc50e02 100644
--- a/packages/opencode/src/util/error.ts
+++ b/packages/opencode/src/util/error.ts
@@ -26,6 +26,10 @@ export function errorMessage(error: unknown): string {
return error.message
}
+ if (isRecord(error) && isRecord(error.data) && typeof error.data.message === "string" && error.data.message) {
+ return error.data.message
+ }
+
const text = String(error)
if (text && text !== "[object Object]") return text
diff --git a/packages/opencode/test/config/config.test.ts b/packages/opencode/test/config/config.test.ts
index 73dd46e31994..2af7a6f662e4 100644
--- a/packages/opencode/test/config/config.test.ts
+++ b/packages/opencode/test/config/config.test.ts
@@ -2337,3 +2337,70 @@ test("parseManagedPlist handles empty config", async () => {
)
expect(config.$schema).toBe("https://opencode.ai/config.json")
})
+
+describe("ensureGitignore", () => {
+ test("skips .gitignore creation for non-existent config dir", async () => {
+ await using tmp = await tmpdir({
+ init: async (dir) => {
+ // Return a path that does NOT exist on disk
+ return path.join(dir, "does-not-exist")
+ },
+ })
+
+ const prev = process.env.OPENCODE_CONFIG_DIR
+ process.env.OPENCODE_CONFIG_DIR = tmp.extra
+
+ try {
+ await Instance.provide({
+ directory: tmp.path,
+ fn: async () => {
+ // Should not throw even though the dir doesn't exist
+ await load()
+ },
+ })
+
+ // Dir was never created, so no .gitignore should exist
+ expect(await Filesystem.exists(path.join(tmp.extra, ".gitignore"))).toBe(false)
+ } finally {
+ if (prev === undefined) delete process.env.OPENCODE_CONFIG_DIR
+ else process.env.OPENCODE_CONFIG_DIR = prev
+ }
+ })
+
+ test("skips .gitignore creation for read-only config dir", async () => {
+ if (process.platform === "win32") return
+
+ await using tmp = await tmpdir({
+ init: async (dir) => {
+ const ro = path.join(dir, "readonly")
+ await fs.mkdir(ro, { recursive: true })
+ await fs.chmod(ro, 0o555)
+ return ro
+ },
+ dispose: async (dir) => {
+ const ro = path.join(dir, "readonly")
+ await fs.chmod(ro, 0o755).catch(() => {})
+ return ro
+ },
+ })
+
+ const prev = process.env.OPENCODE_CONFIG_DIR
+ process.env.OPENCODE_CONFIG_DIR = tmp.extra
+
+ try {
+ await Instance.provide({
+ directory: tmp.path,
+ fn: async () => {
+ // Should not throw even though the dir is read-only
+ await load()
+ },
+ })
+
+ // .gitignore should NOT have been created
+ expect(await Filesystem.exists(path.join(tmp.extra, ".gitignore"))).toBe(false)
+ } finally {
+ if (prev === undefined) delete process.env.OPENCODE_CONFIG_DIR
+ else process.env.OPENCODE_CONFIG_DIR = prev
+ }
+ })
+})
diff --git a/packages/opencode/test/effect/cross-spawn-spawner.test.ts b/packages/opencode/test/effect/cross-spawn-spawner.test.ts
index 5990635aa211..b4e52529c1de 100644
--- a/packages/opencode/test/effect/cross-spawn-spawner.test.ts
+++ b/packages/opencode/test/effect/cross-spawn-spawner.test.ts
@@ -169,7 +169,9 @@ describe("cross-spawn spawner", () => {
'process.stderr.write("stderr\\n", done)',
].join("\n"),
)
- const [stdout, stderr] = yield* Effect.all([decodeByteStream(handle.stdout), decodeByteStream(handle.stderr)])
+ const [stdout, stderr] = yield* Effect.all([decodeByteStream(handle.stdout), decodeByteStream(handle.stderr)], {
+ concurrency: 2,
+ })
expect(stdout).toBe("stdout")
expect(stderr).toBe("stderr")
}),
diff --git a/packages/opencode/test/fixture/lsp/fake-lsp-server.js b/packages/opencode/test/fixture/lsp/fake-lsp-server.js
index be62f96f38f9..e6818009e1f6 100644
--- a/packages/opencode/test/fixture/lsp/fake-lsp-server.js
+++ b/packages/opencode/test/fixture/lsp/fake-lsp-server.js
@@ -1,7 +1,23 @@
// Simple JSON-RPC 2.0 LSP-like fake server over stdio
-// Implements a minimal LSP handshake and triggers a request upon notification
let nextId = 1
+let readBuffer = Buffer.alloc(0)
+let lastChange = null
+let initializeParams = null
+let diagnosticRequestCount = 0
+let registeredCapability = false
+const pendingClientRequests = new Map()
+let pullConfig = {
+ delayMs: 0,
+ registerOn: undefined,
+ registrations: [],
+ documentDiagnostics: [],
+ documentDiagnosticsByIdentifier: {},
+ documentDelayMsByIdentifier: {},
+ workspaceDiagnostics: [],
+ workspaceDiagnosticsByIdentifier: {},
+ workspaceDelayMsByIdentifier: {},
+}
function encode(message) {
const json = JSON.stringify(message)
@@ -14,29 +30,19 @@ function decodeFrames(buffer) {
let idx
while ((idx = buffer.indexOf("\r\n\r\n")) !== -1) {
const header = buffer.slice(0, idx).toString("utf8")
- const m = /Content-Length:\s*(\d+)/i.exec(header)
- const len = m ? parseInt(m[1], 10) : 0
+ const match = /Content-Length:\s*(\d+)/i.exec(header)
+ const length = match ? parseInt(match[1], 10) : 0
const bodyStart = idx + 4
- const bodyEnd = bodyStart + len
+ const bodyEnd = bodyStart + length
if (buffer.length < bodyEnd) break
- const body = buffer.slice(bodyStart, bodyEnd).toString("utf8")
- results.push(body)
+ results.push(buffer.slice(bodyStart, bodyEnd).toString("utf8"))
buffer = buffer.slice(bodyEnd)
}
return { messages: results, rest: buffer }
}
-let readBuffer = Buffer.alloc(0)
-
-process.stdin.on("data", (chunk) => {
- readBuffer = Buffer.concat([readBuffer, chunk])
- const { messages, rest } = decodeFrames(readBuffer)
- readBuffer = rest
- for (const m of messages) handle(m)
-})
-
-function send(msg) {
- process.stdout.write(encode(msg))
+function send(message) {
+ process.stdout.write(encode(message))
}
function sendRequest(method, params) {
@@ -45,6 +51,50 @@ function sendRequest(method, params) {
return id
}
+function sendResponse(id, result) {
+ send({ jsonrpc: "2.0", id, result })
+}
+
+function sendNotification(method, params) {
+ send({ jsonrpc: "2.0", method, params })
+}
+
+function maybeRegister(method) {
+ if (pullConfig.registerOn !== method || registeredCapability) return
+ registeredCapability = true
+ sendRequest("client/registerCapability", {
+ registrations: pullConfig.registrations.map((registration, index) => ({
+ id: registration.id ?? `pull-${index}`,
+ method: registration.method ?? "textDocument/diagnostic",
+ registerOptions: registration.registerOptions ?? registration,
+ })),
+ })
+}
+
+function delayed(id, result, delayMs = pullConfig.delayMs) {
+ if (!delayMs) {
+ sendResponse(id, result)
+ return
+ }
+ setTimeout(() => sendResponse(id, result), delayMs)
+}
+
+function diagnosticsForIdentifier(identifier) {
+ return pullConfig.documentDiagnosticsByIdentifier[identifier] ?? pullConfig.documentDiagnostics
+}
+
+function workspaceDiagnosticsForIdentifier(identifier) {
+ return pullConfig.workspaceDiagnosticsByIdentifier[identifier] ?? pullConfig.workspaceDiagnostics
+}
+
+function documentDelayForIdentifier(identifier) {
+ return pullConfig.documentDelayMsByIdentifier[identifier] ?? pullConfig.delayMs
+}
+
+function workspaceDelayForIdentifier(identifier) {
+ return pullConfig.workspaceDelayMsByIdentifier[identifier] ?? pullConfig.delayMs
+}
+
function handle(raw) {
let data
try {
@@ -52,24 +102,148 @@ function handle(raw) {
} catch {
return
}
+
+ if (typeof data.method === "undefined" && typeof data.id !== "undefined") {
+ const pending = pendingClientRequests.get(data.id)
+ if (!pending) return
+ pendingClientRequests.delete(data.id)
+ sendResponse(pending, data.result ?? null)
+ return
+ }
+
if (data.method === "initialize") {
- send({ jsonrpc: "2.0", id: data.id, result: { capabilities: {} } })
+ initializeParams = data.params
+ sendResponse(data.id, {
+ capabilities: {
+ textDocumentSync: {
+ change: 2,
+ },
+ },
+ })
return
}
- if (data.method === "initialized") {
+
+ if (data.method === "test/get-initialize-params") {
+ sendResponse(data.id, initializeParams)
return
}
- if (data.method === "workspace/didChangeConfiguration") {
+
+ if (data.method === "test/request-configuration") {
+ const id = sendRequest("workspace/configuration", data.params)
+ pendingClientRequests.set(id, data.id)
+ return
+ }
+
+ if (data.method === "initialized" || data.method === "workspace/didChangeConfiguration") {
return
}
+
+ if (data.method === "textDocument/didOpen") {
+ maybeRegister("didOpen")
+ return
+ }
+
+ if (data.method === "textDocument/didChange") {
+ lastChange = data.params
+ maybeRegister("didChange")
+ return
+ }
+
if (data.method === "test/trigger") {
const method = data.params && data.params.method
+ if (method === "client/registerCapability") {
+ sendRequest(method, {
+ registrations: [
+ {
+ id: "test-diagnostic-registration",
+ method: "textDocument/diagnostic",
+ registerOptions: { identifier: "syntax" },
+ },
+ ],
+ })
+ return
+ }
+ if (method === "client/unregisterCapability") {
+ sendRequest(method, {
+ unregisterations: [{ id: "test-diagnostic-registration", method: "textDocument/diagnostic" }],
+ })
+ return
+ }
if (method) sendRequest(method, {})
return
}
- if (typeof data.id !== "undefined") {
- // Respond OK to any request from client to keep transport flowing
- send({ jsonrpc: "2.0", id: data.id, result: null })
+
+ if (data.method === "test/configure-pull-diagnostics") {
+ pullConfig = {
+ delayMs: data.params?.delayMs ?? 0,
+ registerOn: data.params?.registerOn,
+ registrations: data.params?.registrations ?? [],
+ documentDiagnostics: data.params?.documentDiagnostics ?? [],
+ documentDiagnosticsByIdentifier: data.params?.documentDiagnosticsByIdentifier ?? {},
+ documentDelayMsByIdentifier: data.params?.documentDelayMsByIdentifier ?? {},
+ workspaceDiagnostics: data.params?.workspaceDiagnostics ?? [],
+ workspaceDiagnosticsByIdentifier: data.params?.workspaceDiagnosticsByIdentifier ?? {},
+ workspaceDelayMsByIdentifier: data.params?.workspaceDelayMsByIdentifier ?? {},
+ }
+ registeredCapability = false
+ sendResponse(data.id, null)
+ return
+ }
+
+ if (data.method === "test/register-configured-pull-diagnostics") {
+ maybeRegister(undefined)
+ sendResponse(data.id, null)
+ return
+ }
+
+ if (data.method === "test/publish-diagnostics") {
+ sendNotification("textDocument/publishDiagnostics", data.params)
+ return
+ }
+
+ if (data.method === "test/get-last-change") {
+ sendResponse(data.id, lastChange)
return
}
+
+ if (data.method === "test/get-diagnostic-request-count") {
+ sendResponse(data.id, diagnosticRequestCount)
+ return
+ }
+
+ if (data.method === "textDocument/diagnostic") {
+ diagnosticRequestCount += 1
+ delayed(
+ data.id,
+ {
+ kind: "full",
+ items: diagnosticsForIdentifier(data.params?.identifier ?? ""),
+ },
+ documentDelayForIdentifier(data.params?.identifier ?? ""),
+ )
+ return
+ }
+
+ if (data.method === "workspace/diagnostic") {
+ diagnosticRequestCount += 1
+ delayed(
+ data.id,
+ {
+ items: workspaceDiagnosticsForIdentifier(data.params?.identifier ?? ""),
+ },
+ workspaceDelayForIdentifier(data.params?.identifier ?? ""),
+ )
+ return
+ }
+
+ if (typeof data.id !== "undefined") {
+ sendResponse(data.id, null)
+ }
}
+
+process.stdin.on("data", (chunk) => {
+ readBuffer = Buffer.concat([readBuffer, chunk])
+ const { messages, rest } = decodeFrames(readBuffer)
+ readBuffer = rest
+ for (const message of messages) handle(message)
+})
diff --git a/packages/opencode/test/format/format.test.ts b/packages/opencode/test/format/format.test.ts
index 5530e195b268..2f6f235aa165 100644
--- a/packages/opencode/test/format/format.test.ts
+++ b/packages/opencode/test/format/format.test.ts
@@ -126,6 +126,24 @@ describe("Format", () => {
it.live("service initializes without error", () => provideTmpdirInstance(() => Format.Service.use(() => Effect.void)))
+ it.live("file() returns false when no formatter runs", () =>
+ provideTmpdirInstance(
+ (dir) =>
+ Effect.gen(function* () {
+ const file = `${dir}/test.txt`
+ yield* Effect.promise(() => Bun.write(file, "x"))
+
+ const formatted = yield* Format.Service.use((fmt) => fmt.file(file))
+ expect(formatted).toBe(false)
+ }),
+ {
+ config: {
+ formatter: false,
+ },
+ },
+ ),
+ )
+
it.live("status() initializes formatter state per directory", () =>
Effect.gen(function* () {
const a = yield* provideTmpdirInstance(() => Format.Service.use((fmt) => fmt.status()), {
@@ -219,7 +237,7 @@ describe("Format", () => {
yield* Format.Service.use((fmt) =>
Effect.gen(function* () {
yield* fmt.init()
- yield* fmt.file(file)
+ expect(yield* fmt.file(file)).toBe(true)
}),
)
@@ -229,11 +247,21 @@ describe("Format", () => {
config: {
formatter: {
first: {
- command: ["sh", "-c", 'sleep 0.05; v=$(cat "$1"); printf \'%sA\' "$v" > "$1"', "sh", "$FILE"],
+ command: [
+ "node",
+ "-e",
+ "const fs = require('fs'); const file = process.argv[1]; fs.writeFileSync(file, fs.readFileSync(file, 'utf8') + 'A')",
+ "$FILE",
+ ],
extensions: [".seq"],
},
second: {
- command: ["sh", "-c", 'v=$(cat "$1"); printf \'%sB\' "$v" > "$1"', "sh", "$FILE"],
+ command: [
+ "node",
+ "-e",
+ "const fs = require('fs'); const file = process.argv[1]; fs.writeFileSync(file, fs.readFileSync(file, 'utf8') + 'B')",
+ "$FILE",
+ ],
extensions: [".seq"],
},
},
diff --git a/packages/opencode/test/lsp/client.test.ts b/packages/opencode/test/lsp/client.test.ts
index d6eaa317f945..4862f6839490 100644
--- a/packages/opencode/test/lsp/client.test.ts
+++ b/packages/opencode/test/lsp/client.test.ts
@@ -1,11 +1,12 @@
-import { describe, expect, test, beforeEach } from "bun:test"
+import { beforeEach, describe, expect, test } from "bun:test"
import path from "path"
+import { pathToFileURL } from "url"
+import { tmpdir } from "../fixture/fixture"
import { LSPClient } from "../../src/lsp"
import { LSPServer } from "../../src/lsp"
import { Instance } from "../../src/project/instance"
import { Log } from "../../src/util"
-// Minimal fake LSP server that speaks JSON-RPC over stdio
function spawnFakeServer() {
const { spawn } = require("child_process")
const serverPath = path.join(__dirname, "../fixture/lsp/fake-lsp-server.js")
@@ -39,10 +40,8 @@ describe("LSPClient interop", () => {
method: "workspace/workspaceFolders",
})
- await new Promise((r) => setTimeout(r, 100))
-
+ await new Promise((resolve) => setTimeout(resolve, 100))
expect(client.connection).toBeDefined()
-
await client.shutdown()
})
@@ -64,10 +63,8 @@ describe("LSPClient interop", () => {
method: "client/registerCapability",
})
- await new Promise((r) => setTimeout(r, 100))
-
+ await new Promise((resolve) => setTimeout(resolve, 100))
expect(client.connection).toBeDefined()
-
await client.shutdown()
})
@@ -89,10 +86,397 @@ describe("LSPClient interop", () => {
method: "client/unregisterCapability",
})
- await new Promise((r) => setTimeout(r, 100))
-
+ await new Promise((resolve) => setTimeout(resolve, 100))
expect(client.connection).toBeDefined()
+ await client.shutdown()
+ })
+
+ test("initialize does not overclaim unsupported diagnostics capabilities", async () => {
+ const handle = spawnFakeServer() as any
+
+ const client = await Instance.provide({
+ directory: process.cwd(),
+ fn: () =>
+ LSPClient.create({
+ serverID: "fake",
+ server: handle as unknown as LSPServer.Handle,
+ root: process.cwd(),
+ directory: process.cwd(),
+ }),
+ })
+
+ const params = await client.connection.sendRequest("test/get-initialize-params", {})
+ expect(params.capabilities.workspace.diagnostics.refreshSupport).toBe(false)
+ expect(params.capabilities.textDocument.publishDiagnostics.versionSupport).toBe(false)
await client.shutdown()
})
+
+ test("workspace/configuration returns one result per requested item", async () => {
+ const handle = spawnFakeServer() as any
+ const initialization = {
+ alpha: {
+ beta: 1,
+ },
+ gamma: true,
+ }
+
+ const client = await Instance.provide({
+ directory: process.cwd(),
+ fn: () =>
+ LSPClient.create({
+ serverID: "fake",
+ server: {
+ ...(handle as unknown as LSPServer.Handle),
+ initialization,
+ },
+ root: process.cwd(),
+ directory: process.cwd(),
+ }),
+ })
+
+ const response = await client.connection.sendRequest("test/request-configuration", {
+ items: [{ section: "alpha" }, { section: "alpha.beta" }, { section: "missing" }, {}],
+ })
+
+ expect(response).toEqual([{ beta: 1 }, 1, null, initialization])
+
+ await client.shutdown()
+ })
+
+ test("sends ranged didChange for incremental sync servers", async () => {
+ const handle = spawnFakeServer() as any
+ await using tmp = await tmpdir()
+ const file = path.join(tmp.path, "client.ts")
+ await Bun.write(file, "first\n")
+
+ await Instance.provide({
+ directory: tmp.path,
+ fn: async () => {
+ const client = await LSPClient.create({
+ serverID: "fake",
+ server: handle as unknown as LSPServer.Handle,
+ root: tmp.path,
+ directory: tmp.path,
+ })
+
+ await client.notify.open({ path: file })
+ await Bun.write(file, "second\nthird\n")
+ await client.notify.open({ path: file })
+
+ const change = await client.connection.sendRequest<{
+ textDocument: { version: number }
+ contentChanges: {
+ range?: { start: { line: number; character: number }; end: { line: number; character: number } }
+ text: string
+ }[]
+ }>("test/get-last-change", {})
+ expect(change.textDocument.version).toBe(1)
+ expect(change.contentChanges).toEqual([
+ {
+ range: {
+ start: { line: 0, character: 0 },
+ end: { line: 1, character: 0 },
+ },
+ text: "second\nthird\n",
+ },
+ ])
+
+ await client.shutdown()
+ },
+ })
+ })
+
+ test("document mode falls back to push diagnostics", async () => {
+ const handle = spawnFakeServer() as any
+ await using tmp = await tmpdir()
+ const file = path.join(tmp.path, "client.ts")
+ await Bun.write(file, "const x = 1\n")
+
+ await Instance.provide({
+ directory: tmp.path,
+ fn: async () => {
+ const client = await LSPClient.create({
+ serverID: "fake",
+ server: handle as unknown as LSPServer.Handle,
+ root: tmp.path,
+ directory: tmp.path,
+ })
+
+ const version = await client.notify.open({ path: file })
+ const wait = client.waitForDiagnostics({ path: file, version, mode: "document" })
+ await client.connection.sendNotification("test/publish-diagnostics", {
+ uri: pathToFileURL(file).href,
+ version,
+ diagnostics: [
+ {
+ range: {
+ start: { line: 0, character: 0 },
+ end: { line: 0, character: 5 },
+ },
+ message: "push diagnostic",
+ severity: 1,
+ },
+ ],
+ })
+ await wait
+
+ const diagnostics = client.diagnostics.get(file) ?? []
+ expect(diagnostics).toHaveLength(1)
+ expect(diagnostics[0]?.message).toBe("push diagnostic")
+
+ const count = await client.connection.sendRequest("test/get-diagnostic-request-count", {})
+ expect(count).toBe(0)
+
+ await client.shutdown()
+ },
+ })
+ })
+
+ test("document mode accepts matching push diagnostics published before waiting", async () => {
+ const handle = spawnFakeServer() as any
+ await using tmp = await tmpdir()
+ const file = path.join(tmp.path, "client.ts")
+ await Bun.write(file, "const x = 1\n")
+
+ await Instance.provide({
+ directory: tmp.path,
+ fn: async () => {
+ const client = await LSPClient.create({
+ serverID: "fake",
+ server: handle as unknown as LSPServer.Handle,
+ root: tmp.path,
+ directory: tmp.path,
+ })
+
+ const version = await client.notify.open({ path: file })
+ await client.connection.sendNotification("test/publish-diagnostics", {
+ uri: pathToFileURL(file).href,
+ version,
+ diagnostics: [
+ {
+ range: {
+ start: { line: 0, character: 0 },
+ end: { line: 0, character: 5 },
+ },
+ message: "push diagnostic",
+ severity: 1,
+ },
+ ],
+ })
+
+ for (let i = 0; i < 20 && (client.diagnostics.get(file)?.length ?? 0) === 0; i++) {
+ await new Promise((resolve) => setTimeout(resolve, 25))
+ }
+
+ expect(client.diagnostics.get(file)?.[0]?.message).toBe("push diagnostic")
+
+ const started = Date.now()
+ await client.waitForDiagnostics({ path: file, version, mode: "document" })
+ expect(Date.now() - started).toBeLessThan(1_000)
+
+ await client.shutdown()
+ },
+ })
+ })
+
+ test("document mode waits for pull diagnostics", async () => {
+ const handle = spawnFakeServer() as any
+ await using tmp = await tmpdir()
+ const file = path.join(tmp.path, "client.cs")
+ await Bun.write(file, "class C {}\n")
+
+ await Instance.provide({
+ directory: tmp.path,
+ fn: async () => {
+ const client = await LSPClient.create({
+ serverID: "fake",
+ server: handle as unknown as LSPServer.Handle,
+ root: tmp.path,
+ directory: tmp.path,
+ })
+
+ await client.connection.sendRequest("test/configure-pull-diagnostics", {
+ registerOn: "didOpen",
+ registrations: [{ identifier: "DocumentCompilerSemantic" }],
+ documentDiagnosticsByIdentifier: {
+ DocumentCompilerSemantic: [
+ {
+ range: {
+ start: { line: 0, character: 0 },
+ end: { line: 0, character: 5 },
+ },
+ message: "pull diagnostic",
+ severity: 1,
+ },
+ ],
+ },
+ })
+
+ const version = await client.notify.open({ path: file })
+ await client.waitForDiagnostics({ path: file, version, mode: "document" })
+
+ const diagnostics = client.diagnostics.get(file) ?? []
+ expect(diagnostics).toHaveLength(1)
+ expect(diagnostics[0]?.message).toBe("pull diagnostic")
+
+ const count = await client.connection.sendRequest("test/get-diagnostic-request-count", {})
+ expect(count).toBeGreaterThan(0)
+
+ await client.shutdown()
+ },
+ })
+ })
+
+ test("document mode does not wait for the slowest pull identifier after current-file diagnostics arrive", async () => {
+ const handle = spawnFakeServer() as any
+ await using tmp = await tmpdir()
+ const file = path.join(tmp.path, "client.cs")
+ await Bun.write(file, "class C {}\n")
+
+ await Instance.provide({
+ directory: tmp.path,
+ fn: async () => {
+ const client = await LSPClient.create({
+ serverID: "fake",
+ server: handle as unknown as LSPServer.Handle,
+ root: tmp.path,
+ directory: tmp.path,
+ })
+
+ await client.connection.sendRequest("test/configure-pull-diagnostics", {
+ registrations: [{ identifier: "fast" }, { identifier: "slow" }],
+ documentDiagnosticsByIdentifier: {
+ fast: [
+ {
+ range: {
+ start: { line: 0, character: 0 },
+ end: { line: 0, character: 5 },
+ },
+ message: "fast diagnostic",
+ severity: 1,
+ },
+ ],
+ slow: [],
+ },
+ documentDelayMsByIdentifier: {
+ slow: 2_500,
+ },
+ })
+
+ const version = await client.notify.open({ path: file })
+ await client.connection.sendRequest("test/register-configured-pull-diagnostics", {})
+ await new Promise((resolve) => setTimeout(resolve, 100))
+ const started = Date.now()
+ await client.waitForDiagnostics({ path: file, version, mode: "document" })
+
+ expect(Date.now() - started).toBeLessThan(1_000)
+ expect(client.diagnostics.get(file)?.[0]?.message).toBe("fast diagnostic")
+ expect(await client.connection.sendRequest("test/get-diagnostic-request-count", {})).toBeGreaterThan(1)
+
+ await client.shutdown()
+ },
+ })
+ })
+
+ test("full mode includes workspace pull diagnostics", async () => {
+ const handle = spawnFakeServer() as any
+ await using tmp = await tmpdir()
+ const file = path.join(tmp.path, "client.cs")
+ const related = path.join(tmp.path, "other.cs")
+ await Bun.write(file, "class C {}\n")
+ await Bun.write(related, "class D {}\n")
+
+ await Instance.provide({
+ directory: tmp.path,
+ fn: async () => {
+ const client = await LSPClient.create({
+ serverID: "fake",
+ server: handle as unknown as LSPServer.Handle,
+ root: tmp.path,
+ directory: tmp.path,
+ })
+
+ await client.connection.sendRequest("test/configure-pull-diagnostics", {
+ registerOn: "didOpen",
+ registrations: [
+ { identifier: "DocumentCompilerSemantic" },
+ { identifier: "WorkspaceDocumentsAndProject", workspaceDiagnostics: true },
+ ],
+ documentDiagnosticsByIdentifier: {
+ DocumentCompilerSemantic: [
+ {
+ range: {
+ start: { line: 0, character: 0 },
+ end: { line: 0, character: 5 },
+ },
+ message: "current file",
+ severity: 1,
+ },
+ ],
+ },
+ workspaceDiagnosticsByIdentifier: {
+ WorkspaceDocumentsAndProject: [
+ {
+ uri: pathToFileURL(related).href,
+ items: [
+ {
+ range: {
+ start: { line: 0, character: 0 },
+ end: { line: 0, character: 5 },
+ },
+ message: "workspace file",
+ severity: 1,
+ },
+ ],
+ },
+ ],
+ },
+ })
+
+ const version = await client.notify.open({ path: file })
+ await client.waitForDiagnostics({ path: file, version, mode: "full" })
+
+ expect(client.diagnostics.get(file)?.[0]?.message).toBe("current file")
+ expect(client.diagnostics.get(related)?.[0]?.message).toBe("workspace file")
+
+ await client.shutdown()
+ },
+ })
+ })
+
+ test("full mode treats an empty workspace pull response as handled", async () => {
+ const handle = spawnFakeServer() as any
+ await using tmp = await tmpdir()
+ const file = path.join(tmp.path, "client.cs")
+ await Bun.write(file, "class C {}\n")
+
+ await Instance.provide({
+ directory: tmp.path,
+ fn: async () => {
+ const client = await LSPClient.create({
+ serverID: "fake",
+ server: handle as unknown as LSPServer.Handle,
+ root: tmp.path,
+ directory: tmp.path,
+ })
+
+ await client.connection.sendRequest("test/configure-pull-diagnostics", {
+ registerOn: "didOpen",
+ registrations: [{ identifier: "WorkspaceDocumentsAndProject", workspaceDiagnostics: true }],
+ workspaceDiagnosticsByIdentifier: {
+ WorkspaceDocumentsAndProject: [],
+ },
+ })
+
+ const version = await client.notify.open({ path: file })
+ const started = Date.now()
+ await client.waitForDiagnostics({ path: file, version, mode: "full" })
+
+ expect(Date.now() - started).toBeLessThan(1_000)
+
+ await client.shutdown()
+ },
+ })
+ })
})
diff --git a/packages/opencode/test/project/project.test.ts b/packages/opencode/test/project/project.test.ts
index 4dc9ee5efac7..4664b6c258ab 100644
--- a/packages/opencode/test/project/project.test.ts
+++ b/packages/opencode/test/project/project.test.ts
@@ -472,3 +472,87 @@ describe("Project.addSandbox and Project.removeSandbox", () => {
expect(events.some((e) => e.payload.type === Project.Event.Updated.type)).toBe(true)
})
})
+
+describe("Project.fromDirectory with bare repos", () => {
+ test("worktree from bare repo should cache in bare repo, not parent", async () => {
+ await using tmp = await tmpdir({ git: true })
+
+ const parentDir = path.dirname(tmp.path)
+ const barePath = path.join(parentDir, `bare-${Date.now()}.git`)
+ const worktreePath = path.join(parentDir, `worktree-${Date.now()}`)
+
+ try {
+ await $`git clone --bare ${tmp.path} ${barePath}`.quiet()
+ await $`git worktree add ${worktreePath} HEAD`.cwd(barePath).quiet()
+
+ const { project } = await run((svc) => svc.fromDirectory(worktreePath))
+
+ expect(project.id).not.toBe(ProjectID.global)
+ expect(project.worktree).toBe(barePath)
+
+ const correctCache = path.join(barePath, "opencode")
+ const wrongCache = path.join(parentDir, ".git", "opencode")
+
+ expect(await Bun.file(correctCache).exists()).toBe(true)
+ expect(await Bun.file(wrongCache).exists()).toBe(false)
+ } finally {
+ await $`rm -rf ${barePath} ${worktreePath}`.quiet().nothrow()
+ }
+ })
+
+ test("different bare repos under same parent should not share project ID", async () => {
+ await using tmp1 = await tmpdir({ git: true })
+ await using tmp2 = await tmpdir({ git: true })
+
+ const parentDir = path.dirname(tmp1.path)
+ const bareA = path.join(parentDir, `bare-a-${Date.now()}.git`)
+ const bareB = path.join(parentDir, `bare-b-${Date.now()}.git`)
+ const worktreeA = path.join(parentDir, `wt-a-${Date.now()}`)
+ const worktreeB = path.join(parentDir, `wt-b-${Date.now()}`)
+
+ try {
+ await $`git clone --bare ${tmp1.path} ${bareA}`.quiet()
+ await $`git clone --bare ${tmp2.path} ${bareB}`.quiet()
+ await $`git worktree add ${worktreeA} HEAD`.cwd(bareA).quiet()
+ await $`git worktree add ${worktreeB} HEAD`.cwd(bareB).quiet()
+
+ const { project: projA } = await run((svc) => svc.fromDirectory(worktreeA))
+ const { project: projB } = await run((svc) => svc.fromDirectory(worktreeB))
+
+ expect(projA.id).not.toBe(projB.id)
+
+ const cacheA = path.join(bareA, "opencode")
+ const cacheB = path.join(bareB, "opencode")
+ const wrongCache = path.join(parentDir, ".git", "opencode")
+
+ expect(await Bun.file(cacheA).exists()).toBe(true)
+ expect(await Bun.file(cacheB).exists()).toBe(true)
+ expect(await Bun.file(wrongCache).exists()).toBe(false)
+ } finally {
+ await $`rm -rf ${bareA} ${bareB} ${worktreeA} ${worktreeB}`.quiet().nothrow()
+ }
+ })
+
+ test("bare repo without .git suffix is still detected via core.bare", async () => {
+ await using tmp = await tmpdir({ git: true })
+
+ const parentDir = path.dirname(tmp.path)
+ const barePath = path.join(parentDir, `bare-no-suffix-${Date.now()}`)
+ const worktreePath = path.join(parentDir, `worktree-${Date.now()}`)
+
+ try {
+ await $`git clone --bare ${tmp.path} ${barePath}`.quiet()
+ await $`git worktree add ${worktreePath} HEAD`.cwd(barePath).quiet()
+
+ const { project } = await run((svc) => svc.fromDirectory(worktreePath))
+
+ expect(project.id).not.toBe(ProjectID.global)
+ expect(project.worktree).toBe(barePath)
+
+ const correctCache = path.join(barePath, "opencode")
+ expect(await Bun.file(correctCache).exists()).toBe(true)
+ } finally {
+ await $`rm -rf ${barePath} ${worktreePath}`.quiet().nothrow()
+ }
+ })
+})
diff --git a/packages/opencode/test/session/compaction.test.ts b/packages/opencode/test/session/compaction.test.ts
index 0e2b179f0079..037613d469af 100644
--- a/packages/opencode/test/session/compaction.test.ts
+++ b/packages/opencode/test/session/compaction.test.ts
@@ -143,6 +143,45 @@ async function assistant(sessionID: SessionID, parentID: MessageID, root: string
return msg
}
+async function summaryAssistant(sessionID: SessionID, parentID: MessageID, root: string, text: string) {
+ const msg: MessageV2.Assistant = {
+ id: MessageID.ascending(),
+ role: "assistant",
+ sessionID,
+ mode: "compaction",
+ agent: "compaction",
+ path: { cwd: root, root },
+ cost: 0,
+ tokens: {
+ output: 0,
+ input: 0,
+ reasoning: 0,
+ cache: { read: 0, write: 0 },
+ },
+ modelID: ref.modelID,
+ providerID: ref.providerID,
+ parentID,
+ summary: true,
+ time: { created: Date.now() },
+ finish: "end_turn",
+ }
+ await svc.updateMessage(msg)
+ await svc.updatePart({
+ id: PartID.ascending(),
+ messageID: msg.id,
+ sessionID,
+ type: "text",
+ text,
+ })
+ return msg
+}
+
+async function lastCompactionPart(sessionID: SessionID) {
+ return (await svc.messages({ sessionID }))
+ .at(-2)
+ ?.parts.find((item): item is MessageV2.CompactionPart => item.type === "compaction")
+}
+
function fake(
input: Parameters[0],
result: "continue" | "compact",
@@ -946,12 +985,9 @@ describe("session.compaction.process", () => {
),
)
- const part = (await svc.messages({ sessionID: session.id }))
- .at(-2)
- ?.parts.find((item) => item.type === "compaction")
-
+ const part = await lastCompactionPart(session.id)
expect(part?.type).toBe("compaction")
- if (part?.type === "compaction") expect(part.tail_start_id).toBe(keep.id)
+ expect(part?.tail_start_id).toBe(keep.id)
} finally {
await rt.dispose()
}
@@ -991,12 +1027,9 @@ describe("session.compaction.process", () => {
),
)
- const part = (await svc.messages({ sessionID: session.id }))
- .at(-2)
- ?.parts.find((item) => item.type === "compaction")
-
+ const part = await lastCompactionPart(session.id)
expect(part?.type).toBe("compaction")
- if (part?.type === "compaction") expect(part.tail_start_id).toBe(keep.id)
+ expect(part?.tail_start_id).toBe(keep.id)
} finally {
await rt.dispose()
}
@@ -1042,12 +1075,9 @@ describe("session.compaction.process", () => {
),
)
- const part = (await svc.messages({ sessionID: session.id }))
- .at(-2)
- ?.parts.find((item) => item.type === "compaction")
-
+ const part = await lastCompactionPart(session.id)
expect(part?.type).toBe("compaction")
- if (part?.type === "compaction") expect(part.tail_start_id).toBeUndefined()
+ expect(part?.tail_start_id).toBeUndefined()
expect(captured).toContain("yyyy")
} finally {
await rt.dispose()
@@ -1103,12 +1133,9 @@ describe("session.compaction.process", () => {
),
)
- const part = (await svc.messages({ sessionID: session.id }))
- .at(-2)
- ?.parts.find((item) => item.type === "compaction")
-
+ const part = await lastCompactionPart(session.id)
expect(part?.type).toBe("compaction")
- if (part?.type === "compaction") expect(part.tail_start_id).toBeUndefined()
+ expect(part?.tail_start_id).toBeUndefined()
expect(captured).toContain("recent image turn")
expect(captured).toContain("Attached image/png: big.png")
} finally {
@@ -1118,6 +1145,76 @@ describe("session.compaction.process", () => {
})
})
+ test("retains a split turn suffix when a later message fits the preserve token budget", async () => {
+ await using tmp = await tmpdir({ git: true })
+ const stub = llm()
+ let captured = ""
+ stub.push(
+ reply("summary", (input) => {
+ captured = JSON.stringify(input.messages)
+ }),
+ )
+ await Instance.provide({
+ directory: tmp.path,
+ fn: async () => {
+ const session = await svc.create({})
+ await user(session.id, "older")
+ const recent = await user(session.id, "recent turn")
+ const large = await assistant(session.id, recent.id, tmp.path)
+ await svc.updatePart({
+ id: PartID.ascending(),
+ messageID: large.id,
+ sessionID: session.id,
+ type: "text",
+ text: "z".repeat(2_000),
+ })
+ const keep = await assistant(session.id, recent.id, tmp.path)
+ await svc.updatePart({
+ id: PartID.ascending(),
+ messageID: keep.id,
+ sessionID: session.id,
+ type: "text",
+ text: "keep tail",
+ })
+ await SessionCompaction.create({
+ sessionID: session.id,
+ agent: "build",
+ model: ref,
+ auto: false,
+ })
+
+ const rt = liveRuntime(stub.layer, wide(), cfg({ tail_turns: 1, preserve_recent_tokens: 100 }))
+ try {
+ const msgs = await svc.messages({ sessionID: session.id })
+ const parent = msgs.at(-1)?.info.id
+ expect(parent).toBeTruthy()
+ await rt.runPromise(
+ SessionCompaction.Service.use((svc) =>
+ svc.process({
+ parentID: parent!,
+ messages: msgs,
+ sessionID: session.id,
+ auto: false,
+ }),
+ ),
+ )
+
+ const part = await lastCompactionPart(session.id)
+ expect(part?.type).toBe("compaction")
+ expect(part?.tail_start_id).toBe(keep.id)
+ expect(captured).toContain("zzzz")
+ expect(captured).not.toContain("keep tail")
+
+ const filtered = MessageV2.filterCompacted(MessageV2.stream(session.id))
+ expect(filtered[0]?.info.id).toBe(keep.id)
+ expect(filtered.map((msg) => msg.info.id)).not.toContain(large.id)
+ } finally {
+ await rt.dispose()
+ }
+ },
+ })
+ })
+
test("allows plugins to disable synthetic continue prompt", async () => {
await using tmp = await tmpdir()
await Instance.provide({
@@ -1530,6 +1627,80 @@ describe("session.compaction.process", () => {
})
})
+ test("anchors repeated compactions with the previous summary", async () => {
+ const stub = llm()
+ let captured = ""
+ stub.push(reply("summary one"))
+ stub.push(
+ reply("summary two", (input) => {
+ captured = JSON.stringify(input.messages)
+ }),
+ )
+
+ await using tmp = await tmpdir({ git: true })
+ await Instance.provide({
+ directory: tmp.path,
+ fn: async () => {
+ const session = await svc.create({})
+ await user(session.id, "older context")
+ await user(session.id, "keep this turn")
+ await SessionCompaction.create({
+ sessionID: session.id,
+ agent: "build",
+ model: ref,
+ auto: false,
+ })
+
+ const rt = liveRuntime(stub.layer, wide())
+ try {
+ let msgs = await svc.messages({ sessionID: session.id })
+ let parent = msgs.at(-1)?.info.id
+ expect(parent).toBeTruthy()
+ await rt.runPromise(
+ SessionCompaction.Service.use((svc) =>
+ svc.process({
+ parentID: parent!,
+ messages: msgs,
+ sessionID: session.id,
+ auto: false,
+ }),
+ ),
+ )
+
+ await user(session.id, "latest turn")
+ await SessionCompaction.create({
+ sessionID: session.id,
+ agent: "build",
+ model: ref,
+ auto: false,
+ })
+
+ msgs = MessageV2.filterCompacted(MessageV2.stream(session.id))
+ parent = msgs.at(-1)?.info.id
+ expect(parent).toBeTruthy()
+ await rt.runPromise(
+ SessionCompaction.Service.use((svc) =>
+ svc.process({
+ parentID: parent!,
+ messages: msgs,
+ sessionID: session.id,
+ auto: false,
+ }),
+ ),
+ )
+
+ expect(captured).toContain("")
+ expect(captured).toContain("summary one")
+ expect(captured.match(/summary one/g)?.length).toBe(1)
+ expect(captured).toContain("## Constraints & Preferences")
+ expect(captured).toContain("## Progress")
+ } finally {
+ await rt.dispose()
+ }
+ },
+ })
+ })
+
test("keeps recent pre-compaction turns across repeated compactions", async () => {
const stub = llm()
stub.push(reply("summary one"))
@@ -1604,6 +1775,76 @@ describe("session.compaction.process", () => {
},
})
})
+
+ test("ignores previous summaries when sizing the retained tail", async () => {
+ await using tmp = await tmpdir()
+ await Instance.provide({
+ directory: tmp.path,
+ fn: async () => {
+ const session = await svc.create({})
+ await user(session.id, "older")
+ const keep = await user(session.id, "keep this turn")
+ const keepReply = await assistant(session.id, keep.id, tmp.path)
+ await svc.updatePart({
+ id: PartID.ascending(),
+ messageID: keepReply.id,
+ sessionID: session.id,
+ type: "text",
+ text: "keep reply",
+ })
+
+ await SessionCompaction.create({
+ sessionID: session.id,
+ agent: "build",
+ model: ref,
+ auto: false,
+ })
+ const firstCompaction = (await svc.messages({ sessionID: session.id })).at(-1)?.info.id
+ expect(firstCompaction).toBeTruthy()
+ await summaryAssistant(session.id, firstCompaction!, tmp.path, "summary ".repeat(800))
+
+ const recent = await user(session.id, "recent turn")
+ const recentReply = await assistant(session.id, recent.id, tmp.path)
+ await svc.updatePart({
+ id: PartID.ascending(),
+ messageID: recentReply.id,
+ sessionID: session.id,
+ type: "text",
+ text: "recent reply",
+ })
+
+ await SessionCompaction.create({
+ sessionID: session.id,
+ agent: "build",
+ model: ref,
+ auto: false,
+ })
+
+ const rt = runtime("continue", Plugin.defaultLayer, wide(), cfg({ tail_turns: 2, preserve_recent_tokens: 500 }))
+ try {
+ const msgs = await svc.messages({ sessionID: session.id })
+ const parent = msgs.at(-1)?.info.id
+ expect(parent).toBeTruthy()
+ await rt.runPromise(
+ SessionCompaction.Service.use((svc) =>
+ svc.process({
+ parentID: parent!,
+ messages: msgs,
+ sessionID: session.id,
+ auto: false,
+ }),
+ ),
+ )
+
+ const part = await lastCompactionPart(session.id)
+ expect(part?.type).toBe("compaction")
+ expect(part?.tail_start_id).toBe(keep.id)
+ } finally {
+ await rt.dispose()
+ }
+ },
+ })
+ })
})
describe("util.token.estimate", () => {
diff --git a/packages/opencode/test/session/message-v2.test.ts b/packages/opencode/test/session/message-v2.test.ts
index 55ae65c56029..231d58c21a91 100644
--- a/packages/opencode/test/session/message-v2.test.ts
+++ b/packages/opencode/test/session/message-v2.test.ts
@@ -585,6 +585,76 @@ describe("session.message-v2.toModelMessage", () => {
])
})
+ test("truncates tool output when requested", async () => {
+ const userID = "m-user"
+ const assistantID = "m-assistant"
+
+ const input: MessageV2.WithParts[] = [
+ {
+ info: userInfo(userID),
+ parts: [
+ {
+ ...basePart(userID, "u1"),
+ type: "text",
+ text: "run tool",
+ },
+ ] as MessageV2.Part[],
+ },
+ {
+ info: assistantInfo(assistantID, userID),
+ parts: [
+ {
+ ...basePart(assistantID, "a1"),
+ type: "tool",
+ callID: "call-1",
+ tool: "bash",
+ state: {
+ status: "completed",
+ input: { cmd: "ls" },
+ output: "abcdefghij",
+ title: "Bash",
+ metadata: {},
+ time: { start: 0, end: 1 },
+ },
+ },
+ ] as MessageV2.Part[],
+ },
+ ]
+
+ expect(await MessageV2.toModelMessages(input, model, { toolOutputMaxChars: 4 })).toStrictEqual([
+ {
+ role: "user",
+ content: [{ type: "text", text: "run tool" }],
+ },
+ {
+ role: "assistant",
+ content: [
+ {
+ type: "tool-call",
+ toolCallId: "call-1",
+ toolName: "bash",
+ input: { cmd: "ls" },
+ providerExecuted: undefined,
+ },
+ ],
+ },
+ {
+ role: "tool",
+ content: [
+ {
+ type: "tool-result",
+ toolCallId: "call-1",
+ toolName: "bash",
+ output: {
+ type: "text",
+ value: "abcd\n[Tool output truncated for compaction: omitted 6 chars]",
+ },
+ },
+ ],
+ },
+ ])
+ })
+
test("converts assistant tool error into error-text tool result", async () => {
const userID = "m-user"
const assistantID = "m-assistant"
diff --git a/packages/opencode/test/session/messages-pagination.test.ts b/packages/opencode/test/session/messages-pagination.test.ts
index d8dcf5e7cb1d..df2d18b9f123 100644
--- a/packages/opencode/test/session/messages-pagination.test.ts
+++ b/packages/opencode/test/session/messages-pagination.test.ts
@@ -837,6 +837,70 @@ describe("MessageV2.filterCompacted", () => {
})
})
+ test("retains an assistant tail when compaction starts inside a turn", async () => {
+ await Instance.provide({
+ directory: root,
+ fn: async () => {
+ const session = await svc.create({})
+
+ const u1 = await addUser(session.id, "first")
+ const a1 = await addAssistant(session.id, u1, { finish: "end_turn" })
+ await svc.updatePart({
+ id: PartID.ascending(),
+ sessionID: session.id,
+ messageID: a1,
+ type: "text",
+ text: "first reply",
+ })
+
+ const u2 = await addUser(session.id, "second")
+ const a2 = await addAssistant(session.id, u2, { finish: "end_turn" })
+ await svc.updatePart({
+ id: PartID.ascending(),
+ sessionID: session.id,
+ messageID: a2,
+ type: "text",
+ text: "second reply",
+ })
+ const a3 = await addAssistant(session.id, u2, { finish: "end_turn" })
+ await svc.updatePart({
+ id: PartID.ascending(),
+ sessionID: session.id,
+ messageID: a3,
+ type: "text",
+ text: "tail reply",
+ })
+
+ const c1 = await addUser(session.id)
+ await addCompactionPart(session.id, c1, a3)
+ const s1 = await addAssistant(session.id, c1, { summary: true, finish: "end_turn" })
+ await svc.updatePart({
+ id: PartID.ascending(),
+ sessionID: session.id,
+ messageID: s1,
+ type: "text",
+ text: "summary",
+ })
+
+ const u3 = await addUser(session.id, "third")
+ const a4 = await addAssistant(session.id, u3, { finish: "end_turn" })
+ await svc.updatePart({
+ id: PartID.ascending(),
+ sessionID: session.id,
+ messageID: a4,
+ type: "text",
+ text: "third reply",
+ })
+
+ const result = MessageV2.filterCompacted(MessageV2.stream(session.id))
+
+ expect(result.map((item) => item.info.id)).toEqual([a3, c1, s1, u3, a4])
+
+ await svc.remove(session.id)
+ },
+ })
+ })
+
test("prefers latest compaction boundary when repeated compactions exist", async () => {
await Instance.provide({
directory: root,
diff --git a/packages/opencode/test/tool/apply_patch.test.ts b/packages/opencode/test/tool/apply_patch.test.ts
index ebfa9a531eec..fa88432136a5 100644
--- a/packages/opencode/test/tool/apply_patch.test.ts
+++ b/packages/opencode/test/tool/apply_patch.test.ts
@@ -195,6 +195,35 @@ describe("tool.apply_patch freeform", () => {
})
})
+ test("does not invent a first-line diff for BOM files", async () => {
+ await using fixture = await tmpdir()
+ const { ctx, calls } = makeCtx()
+
+ await Instance.provide({
+ directory: fixture.path,
+ fn: async () => {
+ const bom = String.fromCharCode(0xfeff)
+ const target = path.join(fixture.path, "example.cs")
+ await fs.writeFile(target, `${bom}using System;\n\nclass Test {}\n`, "utf-8")
+
+ const patchText =
+ "*** Begin Patch\n*** Update File: example.cs\n@@\n class Test {}\n+class Next {}\n*** End Patch"
+
+ await execute({ patchText }, ctx)
+
+ expect(calls.length).toBe(1)
+ const shown = calls[0].metadata.files[0]?.patch ?? ""
+ expect(shown).not.toContain(bom)
+ expect(shown).not.toContain("-using System;")
+ expect(shown).not.toContain("+using System;")
+
+ const content = await fs.readFile(target, "utf-8")
+ expect(content.charCodeAt(0)).toBe(0xfeff)
+ expect(content.slice(1)).toBe("using System;\n\nclass Test {}\nclass Next {}\n")
+ },
+ })
+ })
+
test("inserts lines with insert-only hunk", async () => {
await using fixture = await tmpdir()
const { ctx } = makeCtx()
diff --git a/packages/opencode/test/tool/edit.test.ts b/packages/opencode/test/tool/edit.test.ts
index b5fbc0a67dde..82e1b4a7fd4b 100644
--- a/packages/opencode/test/tool/edit.test.ts
+++ b/packages/opencode/test/tool/edit.test.ts
@@ -96,6 +96,37 @@ describe("tool.edit", () => {
})
})
+ test("preserves BOM when oldString is empty on existing files", async () => {
+ await using tmp = await tmpdir()
+ const filepath = path.join(tmp.path, "existing.cs")
+ const bom = String.fromCharCode(0xfeff)
+ await fs.writeFile(filepath, `${bom}using System;\n`, "utf-8")
+
+ await Instance.provide({
+ directory: tmp.path,
+ fn: async () => {
+ const edit = await resolve()
+ const result = await Effect.runPromise(
+ edit.execute(
+ {
+ filePath: filepath,
+ oldString: "",
+ newString: "using Up;\n",
+ },
+ ctx,
+ ),
+ )
+
+ expect(result.metadata.diff).toContain("-using System;")
+ expect(result.metadata.diff).toContain("+using Up;")
+
+ const content = await fs.readFile(filepath, "utf-8")
+ expect(content.charCodeAt(0)).toBe(0xfeff)
+ expect(content.slice(1)).toBe("using Up;\n")
+ },
+ })
+ })
+
test("creates new file with nested directories", async () => {
await using tmp = await tmpdir()
const filepath = path.join(tmp.path, "nested", "dir", "file.txt")
@@ -183,6 +214,38 @@ describe("tool.edit", () => {
})
})
+ test("replaces the first visible line in BOM files", async () => {
+ await using tmp = await tmpdir()
+ const filepath = path.join(tmp.path, "existing.cs")
+ const bom = String.fromCharCode(0xfeff)
+ await fs.writeFile(filepath, `${bom}using System;\nclass Test {}\n`, "utf-8")
+
+ await Instance.provide({
+ directory: tmp.path,
+ fn: async () => {
+ const edit = await resolve()
+ const result = await Effect.runPromise(
+ edit.execute(
+ {
+ filePath: filepath,
+ oldString: "using System;",
+ newString: "using Up;",
+ },
+ ctx,
+ ),
+ )
+
+ expect(result.metadata.diff).toContain("-using System;")
+ expect(result.metadata.diff).toContain("+using Up;")
+ expect(result.metadata.diff).not.toContain(bom)
+
+ const content = await fs.readFile(filepath, "utf-8")
+ expect(content.charCodeAt(0)).toBe(0xfeff)
+ expect(content.slice(1)).toBe("using Up;\nclass Test {}\n")
+ },
+ })
+ })
+
test("throws error when file does not exist", async () => {
await using tmp = await tmpdir()
const filepath = path.join(tmp.path, "nonexistent.txt")
diff --git a/packages/opencode/test/tool/write.test.ts b/packages/opencode/test/tool/write.test.ts
index 50d3b57527f9..36131f9596a3 100644
--- a/packages/opencode/test/tool/write.test.ts
+++ b/packages/opencode/test/tool/write.test.ts
@@ -114,6 +114,54 @@ describe("tool.write", () => {
),
)
+ it.live("preserves BOM when overwriting existing files", () =>
+ provideTmpdirInstance((dir) =>
+ Effect.gen(function* () {
+ const filepath = path.join(dir, "existing.cs")
+ const bom = String.fromCharCode(0xfeff)
+ yield* Effect.promise(() => fs.writeFile(filepath, `${bom}using System;\n`, "utf-8"))
+
+ yield* run({ filePath: filepath, content: "using Up;\n" })
+
+ const content = yield* Effect.promise(() => fs.readFile(filepath, "utf-8"))
+ expect(content.charCodeAt(0)).toBe(0xfeff)
+ expect(content.slice(1)).toBe("using Up;\n")
+ }),
+ ),
+ )
+
+ it.live("restores BOM after formatter strips it", () =>
+ provideTmpdirInstance(
+ (dir) =>
+ Effect.gen(function* () {
+ const filepath = path.join(dir, "formatted.cs")
+ const bom = String.fromCharCode(0xfeff)
+ yield* Effect.promise(() => fs.writeFile(filepath, `${bom}using System;\n`, "utf-8"))
+
+ yield* run({ filePath: filepath, content: "using Up;\n" })
+
+ const content = yield* Effect.promise(() => fs.readFile(filepath, "utf-8"))
+ expect(content.charCodeAt(0)).toBe(0xfeff)
+ expect(content.slice(1)).toBe("using Up;\n")
+ }),
+ {
+ config: {
+ formatter: {
+ stripbom: {
+ extensions: [".cs"],
+ command: [
+ "node",
+ "-e",
+ "const fs = require('fs'); const file = process.argv[1]; let text = fs.readFileSync(file, 'utf8'); if (text.charCodeAt(0) === 0xfeff) text = text.slice(1); fs.writeFileSync(file, text, 'utf8')",
+ "$FILE",
+ ],
+ },
+ },
+ },
+ },
+ ),
+ )
+
it.live("returns diff in metadata for existing files", () =>
provideTmpdirInstance((dir) =>
Effect.gen(function* () {
diff --git a/packages/shared/src/filesystem.ts b/packages/shared/src/filesystem.ts
index 44346be8f942..a077d92716bc 100644
--- a/packages/shared/src/filesystem.ts
+++ b/packages/shared/src/filesystem.ts
@@ -1,6 +1,6 @@
import { NodeFileSystem } from "@effect/platform-node"
import { dirname, join, relative, resolve as pathResolve } from "path"
-import { realpathSync } from "fs"
+import { accessSync, constants, realpathSync } from "fs"
import * as NFS from "fs/promises"
import { lookup } from "mime-types"
import { Effect, FileSystem, Layer, Schema, Context } from "effect"
@@ -23,6 +23,7 @@ export namespace AppFileSystem {
export interface Interface extends FileSystem.FileSystem {
readonly isDir: (path: string) => Effect.Effect
readonly isFile: (path: string) => Effect.Effect
+ readonly isWritable: (path: string) => Effect.Effect
readonly existsSafe: (path: string) => Effect.Effect
readonly readJson: (path: string) => Effect.Effect
readonly writeJson: (path: string, data: unknown, mode?: number) => Effect.Effect
@@ -57,6 +58,17 @@ export namespace AppFileSystem {
return info?.type === "File"
})
+ const isWritable = Effect.fn("FileSystem.isWritable")(function* (path: string) {
+ return yield* Effect.sync(() => {
+ try {
+ accessSync(path, constants.W_OK)
+ return true
+ } catch {
+ return false
+ }
+ })
+ })
+
const readDirectoryEntries = Effect.fn("FileSystem.readDirectoryEntries")(function* (dirPath: string) {
return yield* Effect.tryPromise({
try: async () => {
@@ -165,6 +177,7 @@ export namespace AppFileSystem {
existsSafe,
isDir,
isFile,
+ isWritable,
readDirectoryEntries,
readJson,
writeJson,
diff --git a/packages/shared/test/filesystem/filesystem.test.ts b/packages/shared/test/filesystem/filesystem.test.ts
index b49026bcba9d..3312adae8eae 100644
--- a/packages/shared/test/filesystem/filesystem.test.ts
+++ b/packages/shared/test/filesystem/filesystem.test.ts
@@ -65,6 +65,38 @@ describe("AppFileSystem", () => {
)
})
+ describe("isWritable", () => {
+ it(
+ "returns true for a writable file",
+ Effect.gen(function* () {
+ const fs = yield* AppFileSystem.Service
+ const filesys = yield* FileSystem.FileSystem
+ const tmp = yield* filesys.makeTempDirectoryScoped()
+ const file = path.join(tmp, "writable.txt")
+ yield* filesys.writeFileString(file, "hello")
+
+ expect(yield* fs.isWritable(file)).toBe(true)
+ }),
+ )
+
+ it(
+ "returns false for a read-only file",
+ Effect.gen(function* () {
+ const fs = yield* AppFileSystem.Service
+ const filesys = yield* FileSystem.FileSystem
+ const tmp = yield* filesys.makeTempDirectoryScoped()
+ const file = path.join(tmp, "readonly.txt")
+ yield* filesys.writeFileString(file, "hello")
+ yield* filesys.chmod(file, 0o444)
+
+ expect(yield* fs.isWritable(file)).toBe(false)
+
+ // restore permissions so temp cleanup can succeed
+ yield* filesys.chmod(file, 0o644)
+ }),
+ )
+ })
+
describe("readJson / writeJson", () => {
it(
"round-trips JSON data",
diff --git a/packages/ui/src/components/timeline-playground.stories.tsx b/packages/ui/src/components/timeline-playground.stories.tsx
index c071db303b7a..72f5730612c5 100644
--- a/packages/ui/src/components/timeline-playground.stories.tsx
+++ b/packages/ui/src/components/timeline-playground.stories.tsx
@@ -318,7 +318,7 @@ const TOOL_SAMPLES = {
tool: "bash",
input: { command: "bun test --filter session", description: "Run session tests" },
output:
- "bun test v1.3.11\n\n✓ session-turn.test.tsx (3 tests) 45ms\n✓ message-part.test.tsx (7 tests) 120ms\n\nTest Suites: 2 passed, 2 total\nTests: 10 passed, 10 total\nTime: 0.89s",
+ "bun test v1.3.13\n\n✓ session-turn.test.tsx (3 tests) 45ms\n✓ message-part.test.tsx (7 tests) 120ms\n\nTest Suites: 2 passed, 2 total\nTests: 10 passed, 10 total\nTime: 0.89s",
title: "Run session tests",
metadata: { command: "bun test --filter session" },
},
diff --git a/packages/web/src/content/docs/ar/go.mdx b/packages/web/src/content/docs/ar/go.mdx
index 179999af86e7..785ea35b6610 100644
--- a/packages/web/src/content/docs/ar/go.mdx
+++ b/packages/web/src/content/docs/ar/go.mdx
@@ -59,6 +59,8 @@ OpenCode Go حاليًا في المرحلة التجريبية.
- **Kimi K2.6**
- **MiMo-V2-Pro**
- **MiMo-V2-Omni**
+- **MiMo-V2.5-Pro**
+- **MiMo-V2.5**
- **MiniMax M2.5**
- **Qwen3.5 Plus**
- **Qwen3.6 Plus**
@@ -80,18 +82,20 @@ OpenCode Go حاليًا في المرحلة التجريبية.
يوضح الجدول أدناه عددًا تقديريًا للطلبات بناءً على أنماط استخدام Go المعتادة:
-| Model | الطلبات لكل 5 ساعات | الطلبات في الأسبوع | الطلبات في الشهر |
-| ------------ | ------------------- | ------------------ | ---------------- |
-| GLM-5.1 | 880 | 2,150 | 4,300 |
-| GLM-5 | 1,150 | 2,880 | 5,750 |
-| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
-| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
-| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
-| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
-| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
-| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
-| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
-| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
+| Model | الطلبات لكل 5 ساعات | الطلبات في الأسبوع | الطلبات في الشهر |
+| ------------- | ------------------- | ------------------ | ---------------- |
+| GLM-5.1 | 880 | 2,150 | 4,300 |
+| GLM-5 | 1,150 | 2,880 | 5,750 |
+| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
+| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
+| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
+| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2.5 | 2,150 | 5,450 | 10,900 |
+| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
+| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
+| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
+| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
تستند التقديرات إلى متوسطات أنماط الطلبات المرصودة:
@@ -102,6 +106,8 @@ OpenCode Go حاليًا في المرحلة التجريبية.
- Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request
- MiMo-V2-Pro — 350 input، و41,000 cached، و250 output tokens لكل طلب
- MiMo-V2-Omni — 1000 input، و60,000 cached، و140 output tokens لكل طلب
+- MiMo-V2.5-Pro — 350 input، و41,000 cached، و250 output tokens لكل طلب
+- MiMo-V2.5 — 1000 input، و60,000 cached، و140 output tokens لكل طلب
يمكنك تتبّع استخدامك الحالي في **console**.
@@ -123,18 +129,20 @@ OpenCode Go حاليًا في المرحلة التجريبية.
يمكنك أيضًا الوصول إلى نماذج Go عبر نقاط نهاية API التالية.
-| Model | Model ID | Endpoint | AI SDK Package |
-| ------------ | ------------ | ------------------------------------------------ | --------------------------- |
-| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
-| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| Model | Model ID | Endpoint | AI SDK Package |
+| ------------- | ------------- | ------------------------------------------------ | --------------------------- |
+| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
يستخدم [model id](/docs/config/#models) في إعدادات OpenCode لديك التنسيق `opencode-go/`. على سبيل المثال، بالنسبة إلى Kimi K2.6، ستستخدم `opencode-go/kimi-k2.6` في إعداداتك.
diff --git a/packages/web/src/content/docs/bs/go.mdx b/packages/web/src/content/docs/bs/go.mdx
index b94a78ee75a5..523f1ef8ed8b 100644
--- a/packages/web/src/content/docs/bs/go.mdx
+++ b/packages/web/src/content/docs/bs/go.mdx
@@ -69,6 +69,8 @@ Trenutna lista modela uključuje:
- **Kimi K2.6**
- **MiMo-V2-Pro**
- **MiMo-V2-Omni**
+- **MiMo-V2.5-Pro**
+- **MiMo-V2.5**
- **MiniMax M2.5**
- **Qwen3.5 Plus**
- **Qwen3.6 Plus**
@@ -90,18 +92,20 @@ Ograničenja su definisana u dolarskoj vrijednosti. To znači da vaš stvarni br
Tabela ispod pruža procijenjeni broj zahtjeva na osnovu tipičnih obrazaca korištenja Go pretplate:
-| Model | zahtjeva na 5 sati | zahtjeva sedmično | zahtjeva mjesečno |
-| ------------ | ------------------ | ----------------- | ----------------- |
-| GLM-5.1 | 880 | 2,150 | 4,300 |
-| GLM-5 | 1,150 | 2,880 | 5,750 |
-| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
-| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
-| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
-| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
-| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
-| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
-| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
-| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
+| Model | zahtjeva na 5 sati | zahtjeva sedmično | zahtjeva mjesečno |
+| ------------- | ------------------ | ----------------- | ----------------- |
+| GLM-5.1 | 880 | 2,150 | 4,300 |
+| GLM-5 | 1,150 | 2,880 | 5,750 |
+| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
+| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
+| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
+| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2.5 | 2,150 | 5,450 | 10,900 |
+| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
+| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
+| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
+| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
Procjene se zasnivaju na zapaženim prosječnim obrascima zahtjeva:
@@ -112,6 +116,8 @@ Procjene se zasnivaju na zapaženim prosječnim obrascima zahtjeva:
- Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request
- MiMo-V2-Pro — 350 ulaznih, 41,000 keširanih, 250 izlaznih tokena po zahtjevu
- MiMo-V2-Omni — 1000 ulaznih, 60,000 keširanih, 140 izlaznih tokena po zahtjevu
+- MiMo-V2.5-Pro — 350 ulaznih, 41,000 keširanih, 250 izlaznih tokena po zahtjevu
+- MiMo-V2.5 — 1000 ulaznih, 60,000 keširanih, 140 izlaznih tokena po zahtjevu
Svoju trenutnu potrošnju možete pratiti u **konzoli**.
@@ -135,18 +141,20 @@ nakon što dostignete ograničenja upotrebe umjesto blokiranja zahtjeva.
Također možete pristupiti Go modelima putem sljedećih API endpointa.
-| Model | Model ID | Endpoint | AI SDK Paket |
-| ------------ | ------------ | ------------------------------------------------ | --------------------------- |
-| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
-| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| Model | Model ID | Endpoint | AI SDK Paket |
+| ------------- | ------------- | ------------------------------------------------ | --------------------------- |
+| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
[Model id](/docs/config/#models) u vašoj OpenCode konfiguraciji
koristi format `opencode-go/`. Na primjer, za Kimi K2.6, koristili biste
diff --git a/packages/web/src/content/docs/da/go.mdx b/packages/web/src/content/docs/da/go.mdx
index 0ef5f12226b4..86a834b984db 100644
--- a/packages/web/src/content/docs/da/go.mdx
+++ b/packages/web/src/content/docs/da/go.mdx
@@ -69,6 +69,8 @@ Den nuværende liste over modeller inkluderer:
- **Kimi K2.6**
- **MiMo-V2-Pro**
- **MiMo-V2-Omni**
+- **MiMo-V2.5-Pro**
+- **MiMo-V2.5**
- **MiniMax M2.5**
- **Qwen3.5 Plus**
- **Qwen3.6 Plus**
@@ -90,18 +92,20 @@ Grænserne er defineret i dollarværdi. Det betyder, at dit faktiske antal anmod
Tabellen nedenfor giver et estimeret antal anmodninger baseret på typiske Go-forbrugsmønstre:
-| Model | anmodninger pr. 5 timer | anmodninger pr. uge | anmodninger pr. måned |
-| ------------ | ----------------------- | ------------------- | --------------------- |
-| GLM-5.1 | 880 | 2,150 | 4,300 |
-| GLM-5 | 1,150 | 2,880 | 5,750 |
-| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
-| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
-| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
-| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
-| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
-| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
-| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
-| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
+| Model | anmodninger pr. 5 timer | anmodninger pr. uge | anmodninger pr. måned |
+| ------------- | ----------------------- | ------------------- | --------------------- |
+| GLM-5.1 | 880 | 2,150 | 4,300 |
+| GLM-5 | 1,150 | 2,880 | 5,750 |
+| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
+| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
+| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
+| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2.5 | 2,150 | 5,450 | 10,900 |
+| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
+| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
+| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
+| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
Estimaterne er baseret på observerede gennemsnitlige anmodningsmønstre:
@@ -112,6 +116,8 @@ Estimaterne er baseret på observerede gennemsnitlige anmodningsmønstre:
- Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request
- MiMo-V2-Pro — 350 input, 41.000 cachelagrede, 250 output-tokens pr. anmodning
- MiMo-V2-Omni — 1000 input, 60.000 cachelagrede, 140 output-tokens pr. anmodning
+- MiMo-V2.5-Pro — 350 input, 41.000 cachelagrede, 250 output-tokens pr. anmodning
+- MiMo-V2.5 — 1000 input, 60.000 cachelagrede, 140 output-tokens pr. anmodning
Du kan spore dit nuværende forbrug i **konsollen**.
@@ -135,18 +141,20 @@ når du har nået dine forbrugsgrænser, i stedet for at blokere anmodninger.
Du kan også få adgang til Go-modeller gennem følgende API-endpoints.
-| Model | Model ID | Endpoint | AI SDK Package |
-| ------------ | ------------ | ------------------------------------------------ | --------------------------- |
-| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
-| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| Model | Model ID | Endpoint | AI SDK Package |
+| ------------- | ------------- | ------------------------------------------------ | --------------------------- |
+| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
Dit [model id](/docs/config/#models) i din OpenCode config
bruger formatet `opencode-go/`. For eksempel for Kimi K2.6, vil du
diff --git a/packages/web/src/content/docs/de/go.mdx b/packages/web/src/content/docs/de/go.mdx
index 269f6231ee17..49c0efda58b4 100644
--- a/packages/web/src/content/docs/de/go.mdx
+++ b/packages/web/src/content/docs/de/go.mdx
@@ -61,6 +61,8 @@ Die aktuelle Liste der Modelle umfasst:
- **Kimi K2.6**
- **MiMo-V2-Pro**
- **MiMo-V2-Omni**
+- **MiMo-V2.5-Pro**
+- **MiMo-V2.5**
- **MiniMax M2.5**
- **Qwen3.5 Plus**
- **Qwen3.6 Plus**
@@ -82,18 +84,20 @@ Limits sind in Dollarwerten definiert. Das bedeutet, dass die tatsächliche Anza
Die folgende Tabelle zeigt eine geschätzte Anzahl von Anfragen basierend auf typischen Go-Nutzungsmustern:
-| Model | Anfragen pro 5 Stunden | Anfragen pro Woche | Anfragen pro Monat |
-| ------------ | ---------------------- | ------------------ | ------------------ |
-| GLM-5.1 | 880 | 2,150 | 4,300 |
-| GLM-5 | 1,150 | 2,880 | 5,750 |
-| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
-| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
-| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
-| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
-| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
-| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
-| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
-| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
+| Model | Anfragen pro 5 Stunden | Anfragen pro Woche | Anfragen pro Monat |
+| ------------- | ---------------------- | ------------------ | ------------------ |
+| GLM-5.1 | 880 | 2,150 | 4,300 |
+| GLM-5 | 1,150 | 2,880 | 5,750 |
+| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
+| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
+| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
+| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2.5 | 2,150 | 5,450 | 10,900 |
+| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
+| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
+| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
+| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
Die Schätzungen basieren auf beobachteten durchschnittlichen Anfragemustern:
@@ -104,6 +108,8 @@ Die Schätzungen basieren auf beobachteten durchschnittlichen Anfragemustern:
- Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request
- MiMo-V2-Pro — 350 Input-, 41.000 Cached-, 250 Output-Tokens pro Anfrage
- MiMo-V2-Omni — 1.000 Input-, 60.000 Cached-, 140 Output-Tokens pro Anfrage
+- MiMo-V2.5-Pro — 350 Input-, 41.000 Cached-, 250 Output-Tokens pro Anfrage
+- MiMo-V2.5 — 1.000 Input-, 60.000 Cached-, 140 Output-Tokens pro Anfrage
Du kannst deine aktuelle Nutzung in der **Console** verfolgen.
@@ -125,18 +131,20 @@ Wenn du auch Guthaben auf deinem Zen-Konto hast, kannst du in der Console die Op
Du kannst auf die Go-Modelle auch über die folgenden API-Endpunkte zugreifen.
-| Modell | Modell-ID | Endpunkt | AI SDK Package |
-| ------------ | ------------ | ------------------------------------------------ | --------------------------- |
-| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
-| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| Modell | Modell-ID | Endpunkt | AI SDK Package |
+| ------------- | ------------- | ------------------------------------------------ | --------------------------- |
+| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
Die [Modell-ID](/docs/config/#models) in deiner OpenCode Config verwendet das Format `opencode-go/`. Für Kimi K2.6 würdest du beispielsweise `opencode-go/kimi-k2.6` in deiner Config verwenden.
diff --git a/packages/web/src/content/docs/es/go.mdx b/packages/web/src/content/docs/es/go.mdx
index e70e3dd1f9d6..a541171cafdd 100644
--- a/packages/web/src/content/docs/es/go.mdx
+++ b/packages/web/src/content/docs/es/go.mdx
@@ -69,6 +69,8 @@ La lista actual de modelos incluye:
- **Kimi K2.6**
- **MiMo-V2-Pro**
- **MiMo-V2-Omni**
+- **MiMo-V2.5-Pro**
+- **MiMo-V2.5**
- **MiniMax M2.5**
- **Qwen3.5 Plus**
- **Qwen3.6 Plus**
@@ -90,18 +92,20 @@ Los límites se definen en valor en dólares. Esto significa que tu cantidad rea
La siguiente tabla proporciona una cantidad estimada de peticiones basada en los patrones típicos de uso de Go:
-| Model | peticiones por 5 horas | peticiones por semana | peticiones por mes |
-| ------------ | ---------------------- | --------------------- | ------------------ |
-| GLM-5.1 | 880 | 2,150 | 4,300 |
-| GLM-5 | 1,150 | 2,880 | 5,750 |
-| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
-| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
-| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
-| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
-| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
-| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
-| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
-| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
+| Model | peticiones por 5 horas | peticiones por semana | peticiones por mes |
+| ------------- | ---------------------- | --------------------- | ------------------ |
+| GLM-5.1 | 880 | 2,150 | 4,300 |
+| GLM-5 | 1,150 | 2,880 | 5,750 |
+| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
+| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
+| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
+| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2.5 | 2,150 | 5,450 | 10,900 |
+| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
+| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
+| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
+| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
Las estimaciones se basan en los patrones de peticiones promedio observados:
@@ -112,6 +116,8 @@ Las estimaciones se basan en los patrones de peticiones promedio observados:
- Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request
- MiMo-V2-Pro — 350 tokens de entrada, 41,000 en caché, 250 tokens de salida por petición
- MiMo-V2-Omni — 1000 tokens de entrada, 60,000 en caché, 140 tokens de salida por petición
+- MiMo-V2.5-Pro — 350 tokens de entrada, 41,000 en caché, 250 tokens de salida por petición
+- MiMo-V2.5 — 1000 tokens de entrada, 60,000 en caché, 140 tokens de salida por petición
Puedes realizar un seguimiento de tu uso actual en la **consola**.
@@ -135,18 +141,20 @@ después de que hayas alcanzado tus límites de uso en lugar de bloquear las pet
También puedes acceder a los modelos de Go a través de los siguientes endpoints de la API.
-| Modelo | ID del modelo | Endpoint | Paquete de AI SDK |
-| ------------ | ------------- | ------------------------------------------------ | --------------------------- |
-| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
-| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| Modelo | ID del modelo | Endpoint | Paquete de AI SDK |
+| ------------- | ------------- | ------------------------------------------------ | --------------------------- |
+| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
El [ID del modelo](/docs/config/#models) en tu configuración de OpenCode
usa el formato `opencode-go/`. Por ejemplo, para Kimi K2.6, usarías
diff --git a/packages/web/src/content/docs/fr/go.mdx b/packages/web/src/content/docs/fr/go.mdx
index 5527d9d865ae..5f55128ed499 100644
--- a/packages/web/src/content/docs/fr/go.mdx
+++ b/packages/web/src/content/docs/fr/go.mdx
@@ -59,6 +59,8 @@ La liste actuelle des modèles comprend :
- **Kimi K2.6**
- **MiMo-V2-Pro**
- **MiMo-V2-Omni**
+- **MiMo-V2.5-Pro**
+- **MiMo-V2.5**
- **MiniMax M2.5**
- **Qwen3.5 Plus**
- **Qwen3.6 Plus**
@@ -80,18 +82,20 @@ Les limites sont définies en valeur monétaire (dollars). Cela signifie que vot
Le tableau ci-dessous fournit une estimation du nombre de requêtes basée sur des modèles d'utilisation typiques de Go :
-| Model | requêtes par 5 heures | requêtes par semaine | requêtes par mois |
-| ------------ | --------------------- | -------------------- | ----------------- |
-| GLM-5.1 | 880 | 2,150 | 4,300 |
-| GLM-5 | 1,150 | 2,880 | 5,750 |
-| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
-| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
-| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
-| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
-| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
-| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
-| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
-| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
+| Model | requêtes par 5 heures | requêtes par semaine | requêtes par mois |
+| ------------- | --------------------- | -------------------- | ----------------- |
+| GLM-5.1 | 880 | 2,150 | 4,300 |
+| GLM-5 | 1,150 | 2,880 | 5,750 |
+| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
+| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
+| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
+| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2.5 | 2,150 | 5,450 | 10,900 |
+| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
+| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
+| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
+| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
Les estimations sont basées sur les modèles de requêtes moyens observés :
@@ -102,6 +106,8 @@ Les estimations sont basées sur les modèles de requêtes moyens observés :
- Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request
- MiMo-V2-Pro — 350 tokens en entrée, 41,000 en cache, 250 tokens en sortie par requête
- MiMo-V2-Omni — 1000 tokens en entrée, 60,000 en cache, 140 tokens en sortie par requête
+- MiMo-V2.5-Pro — 350 tokens en entrée, 41,000 en cache, 250 tokens en sortie par requête
+- MiMo-V2.5 — 1000 tokens en entrée, 60,000 en cache, 140 tokens en sortie par requête
Vous pouvez suivre votre utilisation actuelle dans la **console**.
@@ -123,18 +129,20 @@ Si vous avez également des crédits sur votre solde Zen, vous pouvez activer l'
Vous pouvez également accéder aux modèles Go via les points de terminaison d'API suivants.
-| Modèle | ID de modèle | Point de terminaison | Package AI SDK |
-| ------------ | ------------ | ------------------------------------------------ | --------------------------- |
-| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
-| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| Modèle | ID de modèle | Point de terminaison | Package AI SDK |
+| ------------- | ------------- | ------------------------------------------------ | --------------------------- |
+| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
L'[ID de modèle](/docs/config/#models) dans votre configuration OpenCode utilise le format `opencode-go/`. Par exemple, pour Kimi K2.6, vous utiliseriez `opencode-go/kimi-k2.6` dans votre configuration.
diff --git a/packages/web/src/content/docs/go.mdx b/packages/web/src/content/docs/go.mdx
index a39b6f7d246f..946c70de30f3 100644
--- a/packages/web/src/content/docs/go.mdx
+++ b/packages/web/src/content/docs/go.mdx
@@ -69,6 +69,8 @@ The current list of models includes:
- **Kimi K2.6**
- **MiMo-V2-Pro**
- **MiMo-V2-Omni**
+- **MiMo-V2.5-Pro**
+- **MiMo-V2.5**
- **MiniMax M2.5**
- **MiniMax M2.7**
- **Qwen3.5 Plus**
@@ -90,18 +92,20 @@ Limits are defined in dollar value. This means your actual request count depends
The table below provides an estimated request count based on typical Go usage patterns:
-| Model | requests per 5 hour | requests per week | requests per month |
-| ------------ | ------------------- | ----------------- | ------------------ |
-| GLM-5.1 | 880 | 2,150 | 4,300 |
-| GLM-5 | 1,150 | 2,880 | 5,750 |
-| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
-| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
-| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
-| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
-| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
-| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
-| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
-| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
+| Model | requests per 5 hour | requests per week | requests per month |
+| ------------- | ------------------- | ----------------- | ------------------ |
+| GLM-5.1 | 880 | 2,150 | 4,300 |
+| GLM-5 | 1,150 | 2,880 | 5,750 |
+| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
+| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
+| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
+| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2.5 | 2,150 | 5,450 | 10,900 |
+| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
+| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
+| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
+| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
Estimates are based on observed average request patterns:
@@ -110,6 +114,8 @@ Estimates are based on observed average request patterns:
- MiniMax M2.7/M2.5 — 300 input, 55,000 cached, 125 output tokens per request
- MiMo-V2-Pro — 350 input, 41,000 cached, 250 output tokens per request
- MiMo-V2-Omni — 1000 input, 60,000 cached, 140 output tokens per request
+- MiMo-V2.5-Pro — 350 input, 41,000 cached, 250 output tokens per request
+- MiMo-V2.5 — 1000 input, 60,000 cached, 140 output tokens per request
- Qwen3.5 Plus — 410 input, 47,000 cached, 140 output tokens per request
- Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request
@@ -135,18 +141,20 @@ after you've reached your usage limits instead of blocking requests.
You can also access Go models through the following API endpoints.
-| Model | Model ID | Endpoint | AI SDK Package |
-| ------------ | ------------ | ------------------------------------------------ | --------------------------- |
-| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
-| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| Model | Model ID | Endpoint | AI SDK Package |
+| ------------- | ------------- | ------------------------------------------------ | --------------------------- |
+| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
The [model id](/docs/config/#models) in your OpenCode config
uses the format `opencode-go/`. For example, for Kimi K2.6, you would
diff --git a/packages/web/src/content/docs/it/go.mdx b/packages/web/src/content/docs/it/go.mdx
index 6cdf7ac6cfa8..341a22c4cb27 100644
--- a/packages/web/src/content/docs/it/go.mdx
+++ b/packages/web/src/content/docs/it/go.mdx
@@ -67,6 +67,8 @@ L'elenco attuale dei modelli include:
- **Kimi K2.6**
- **MiMo-V2-Pro**
- **MiMo-V2-Omni**
+- **MiMo-V2.5-Pro**
+- **MiMo-V2.5**
- **MiniMax M2.5**
- **Qwen3.5 Plus**
- **Qwen3.6 Plus**
@@ -88,18 +90,20 @@ I limiti sono definiti in valore in dollari. Questo significa che il conteggio e
La tabella seguente fornisce una stima del conteggio delle richieste in base a pattern di utilizzo tipici di Go:
-| Model | richieste ogni 5 ore | richieste a settimana | richieste al mese |
-| ------------ | -------------------- | --------------------- | ----------------- |
-| GLM-5.1 | 880 | 2,150 | 4,300 |
-| GLM-5 | 1,150 | 2,880 | 5,750 |
-| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
-| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
-| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
-| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
-| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
-| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
-| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
-| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
+| Model | richieste ogni 5 ore | richieste a settimana | richieste al mese |
+| ------------- | -------------------- | --------------------- | ----------------- |
+| GLM-5.1 | 880 | 2,150 | 4,300 |
+| GLM-5 | 1,150 | 2,880 | 5,750 |
+| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
+| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
+| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
+| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2.5 | 2,150 | 5,450 | 10,900 |
+| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
+| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
+| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
+| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
Le stime si basano sui pattern medi di richieste osservati:
@@ -110,6 +114,8 @@ Le stime si basano sui pattern medi di richieste osservati:
- Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request
- MiMo-V2-Pro — 350 di input, 41.000 in cache, 250 token di output per richiesta
- MiMo-V2-Omni — 1000 di input, 60.000 in cache, 140 token di output per richiesta
+- MiMo-V2.5-Pro — 350 di input, 41.000 in cache, 250 token di output per richiesta
+- MiMo-V2.5 — 1000 di input, 60.000 in cache, 140 token di output per richiesta
Puoi monitorare il tuo utilizzo attuale nella **console**.
@@ -133,18 +139,20 @@ dopo che avrai raggiunto i limiti di utilizzo invece di bloccare le richieste.
Puoi anche accedere ai modelli Go tramite i seguenti endpoint API.
-| Modello | ID Modello | Endpoint | Pacchetto AI SDK |
-| ------------ | ------------ | ------------------------------------------------ | --------------------------- |
-| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
-| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| Modello | ID Modello | Endpoint | Pacchetto AI SDK |
+| ------------- | ------------- | ------------------------------------------------ | --------------------------- |
+| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
Il [model id](/docs/config/#models) nella tua OpenCode config
utilizza il formato `opencode-go/`. Ad esempio, per Kimi K2.6, useresti
diff --git a/packages/web/src/content/docs/ja/go.mdx b/packages/web/src/content/docs/ja/go.mdx
index f122d2367bc2..ddd5a66803a4 100644
--- a/packages/web/src/content/docs/ja/go.mdx
+++ b/packages/web/src/content/docs/ja/go.mdx
@@ -59,6 +59,8 @@ OpenCode Goをサブスクライブできるのは、1つのワークスペー
- **Kimi K2.6**
- **MiMo-V2-Pro**
- **MiMo-V2-Omni**
+- **MiMo-V2.5-Pro**
+- **MiMo-V2.5**
- **MiniMax M2.5**
- **Qwen3.5 Plus**
- **Qwen3.6 Plus**
@@ -80,18 +82,20 @@ OpenCode Goには以下の制限が含まれています:
以下の表は、一般的なGoの利用パターンに基づいた推定リクエスト数を示しています:
-| Model | 5時間あたりのリクエスト数 | 週間リクエスト数 | 月間リクエスト数 |
-| ------------ | ------------------------- | ---------------- | ---------------- |
-| GLM-5.1 | 880 | 2,150 | 4,300 |
-| GLM-5 | 1,150 | 2,880 | 5,750 |
-| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
-| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
-| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
-| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
-| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
-| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
-| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
-| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
+| Model | 5時間あたりのリクエスト数 | 週間リクエスト数 | 月間リクエスト数 |
+| ------------- | ------------------------- | ---------------- | ---------------- |
+| GLM-5.1 | 880 | 2,150 | 4,300 |
+| GLM-5 | 1,150 | 2,880 | 5,750 |
+| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
+| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
+| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
+| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2.5 | 2,150 | 5,450 | 10,900 |
+| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
+| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
+| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
+| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
推定値は、観測された平均的なリクエストパターンに基づいています:
@@ -102,6 +106,8 @@ OpenCode Goには以下の制限が含まれています:
- Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request
- MiMo-V2-Pro — リクエストあたり 入力 350トークン、キャッシュ 41,000トークン、出力 250トークン
- MiMo-V2-Omni — リクエストあたり 入力 1000トークン、キャッシュ 60,000トークン、出力 140トークン
+- MiMo-V2.5-Pro — リクエストあたり 入力 350トークン、キャッシュ 41,000トークン、出力 250トークン
+- MiMo-V2.5 — リクエストあたり 入力 1000トークン、キャッシュ 60,000トークン、出力 140トークン
現在の利用状況は**コンソール**で追跡できます。
@@ -123,18 +129,20 @@ Zen残高にクレジットがある場合は、コンソールで**Use balance*
以下のAPIエンドポイントを通じて、Goモデルにアクセスすることもできます。
-| Model | Model ID | Endpoint | AI SDK Package |
-| ------------ | ------------ | ------------------------------------------------ | --------------------------- |
-| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
-| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| Model | Model ID | Endpoint | AI SDK Package |
+| ------------- | ------------- | ------------------------------------------------ | --------------------------- |
+| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
OpenCode設定の[model id](/docs/config/#models)は、`opencode-go/`という形式を使用します。たとえば、Kimi K2.6の場合は、設定で`opencode-go/kimi-k2.6`を使用します。
diff --git a/packages/web/src/content/docs/ko/go.mdx b/packages/web/src/content/docs/ko/go.mdx
index cd0b1b8da2fe..da787040fb5b 100644
--- a/packages/web/src/content/docs/ko/go.mdx
+++ b/packages/web/src/content/docs/ko/go.mdx
@@ -59,6 +59,8 @@ workspace당 한 명의 멤버만 OpenCode Go를 구독할 수 있습니다.
- **Kimi K2.6**
- **MiMo-V2-Pro**
- **MiMo-V2-Omni**
+- **MiMo-V2.5-Pro**
+- **MiMo-V2.5**
- **MiniMax M2.5**
- **Qwen3.5 Plus**
- **Qwen3.6 Plus**
@@ -80,18 +82,20 @@ OpenCode Go에는 다음과 같은 한도가 포함됩니다.
아래 표는 일반적인 Go 사용 패턴을 기준으로 한 예상 요청 횟수를 보여줍니다.
-| Model | 5시간당 요청 횟수 | 주간 요청 횟수 | 월간 요청 횟수 |
-| ------------ | ----------------- | -------------- | -------------- |
-| GLM-5.1 | 880 | 2,150 | 4,300 |
-| GLM-5 | 1,150 | 2,880 | 5,750 |
-| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
-| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
-| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
-| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
-| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
-| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
-| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
-| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
+| Model | 5시간당 요청 횟수 | 주간 요청 횟수 | 월간 요청 횟수 |
+| ------------- | ----------------- | -------------- | -------------- |
+| GLM-5.1 | 880 | 2,150 | 4,300 |
+| GLM-5 | 1,150 | 2,880 | 5,750 |
+| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
+| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
+| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
+| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2.5 | 2,150 | 5,450 | 10,900 |
+| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
+| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
+| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
+| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
예상치는 관찰된 평균 요청 패턴을 기준으로 합니다.
@@ -102,6 +106,8 @@ OpenCode Go에는 다음과 같은 한도가 포함됩니다.
- Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request
- MiMo-V2-Pro — 요청당 입력 350, 캐시 41,000, 출력 토큰 250
- MiMo-V2-Omni — 요청당 입력 1000, 캐시 60,000, 출력 토큰 140
+- MiMo-V2.5-Pro — 요청당 입력 350, 캐시 41,000, 출력 토큰 250
+- MiMo-V2.5 — 요청당 입력 1000, 캐시 60,000, 출력 토큰 140
현재 사용량은 **console**에서 확인할 수 있습니다.
@@ -123,18 +129,20 @@ Zen 잔액에 크레딧도 있다면, console에서 **Use balance** 옵션을
다음 API 엔드포인트를 통해서도 Go 모델에 액세스할 수 있습니다.
-| 모델 | 모델 ID | 엔드포인트 | AI SDK 패키지 |
-| ------------ | ------------ | ------------------------------------------------ | --------------------------- |
-| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
-| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| 모델 | 모델 ID | 엔드포인트 | AI SDK 패키지 |
+| ------------- | ------------- | ------------------------------------------------ | --------------------------- |
+| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
OpenCode config의 [model id](/docs/config/#models)는 `opencode-go/` 형식을 사용합니다. 예를 들어 Kimi K2.6의 경우 config에서 `opencode-go/kimi-k2.6`를 사용하면 됩니다.
diff --git a/packages/web/src/content/docs/nb/go.mdx b/packages/web/src/content/docs/nb/go.mdx
index 776cc0c92d32..95c05417cf12 100644
--- a/packages/web/src/content/docs/nb/go.mdx
+++ b/packages/web/src/content/docs/nb/go.mdx
@@ -69,6 +69,8 @@ Den nåværende listen over modeller inkluderer:
- **Kimi K2.6**
- **MiMo-V2-Pro**
- **MiMo-V2-Omni**
+- **MiMo-V2.5-Pro**
+- **MiMo-V2.5**
- **MiniMax M2.5**
- **Qwen3.5 Plus**
- **Qwen3.6 Plus**
@@ -90,18 +92,20 @@ Grensene er definert i dollarverdi. Dette betyr at ditt faktiske antall forespø
Tabellen nedenfor gir et estimert antall forespørsler basert på typiske bruksmønstre for Go:
-| Model | forespørsler per 5 timer | forespørsler per uke | forespørsler per måned |
-| ------------ | ------------------------ | -------------------- | ---------------------- |
-| GLM-5.1 | 880 | 2,150 | 4,300 |
-| GLM-5 | 1,150 | 2,880 | 5,750 |
-| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
-| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
-| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
-| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
-| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
-| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
-| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
-| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
+| Model | forespørsler per 5 timer | forespørsler per uke | forespørsler per måned |
+| ------------- | ------------------------ | -------------------- | ---------------------- |
+| GLM-5.1 | 880 | 2,150 | 4,300 |
+| GLM-5 | 1,150 | 2,880 | 5,750 |
+| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
+| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
+| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
+| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2.5 | 2,150 | 5,450 | 10,900 |
+| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
+| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
+| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
+| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
Estimatene er basert på observerte gjennomsnittlige forespørselsmønstre:
@@ -112,6 +116,8 @@ Estimatene er basert på observerte gjennomsnittlige forespørselsmønstre:
- Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request
- MiMo-V2-Pro — 350 input, 41 000 bufret, 250 output-tokens per forespørsel
- MiMo-V2-Omni — 1000 input, 60 000 bufret, 140 output-tokens per forespørsel
+- MiMo-V2.5-Pro — 350 input, 41 000 bufret, 250 output-tokens per forespørsel
+- MiMo-V2.5 — 1000 input, 60 000 bufret, 140 output-tokens per forespørsel
Du kan spore din nåværende bruk i **konsollen**.
@@ -135,18 +141,20 @@ etter at du har nådd bruksgrensene dine, i stedet for å blokkere forespørsler
Du kan også få tilgang til Go-modeller gjennom følgende API-endepunkter.
-| Modell | Modell-ID | Endepunkt | AI SDK Package |
-| ------------ | ------------ | ------------------------------------------------ | --------------------------- |
-| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
-| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| Modell | Modell-ID | Endepunkt | AI SDK Package |
+| ------------- | ------------- | ------------------------------------------------ | --------------------------- |
+| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
[Modell-ID-en](/docs/config/#models) i din OpenCode-konfigurasjon
bruker formatet `opencode-go/`. For eksempel, for Kimi K2.6, vil du
diff --git a/packages/web/src/content/docs/pl/go.mdx b/packages/web/src/content/docs/pl/go.mdx
index d99f5e0986b2..9ae3ea34b8bf 100644
--- a/packages/web/src/content/docs/pl/go.mdx
+++ b/packages/web/src/content/docs/pl/go.mdx
@@ -63,6 +63,8 @@ Obecna lista modeli obejmuje:
- **Kimi K2.6**
- **MiMo-V2-Pro**
- **MiMo-V2-Omni**
+- **MiMo-V2.5-Pro**
+- **MiMo-V2.5**
- **MiniMax M2.5**
- **Qwen3.5 Plus**
- **Qwen3.6 Plus**
@@ -84,18 +86,20 @@ Limity są zdefiniowane w wartości w dolarach. Oznacza to, że rzeczywista licz
Poniższa tabela przedstawia szacunkową liczbę żądań na podstawie typowych wzorców korzystania z Go:
-| Model | żądania na 5 godzin | żądania na tydzień | żądania na miesiąc |
-| ------------ | ------------------- | ------------------ | ------------------ |
-| GLM-5.1 | 880 | 2,150 | 4,300 |
-| GLM-5 | 1,150 | 2,880 | 5,750 |
-| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
-| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
-| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
-| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
-| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
-| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
-| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
-| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
+| Model | żądania na 5 godzin | żądania na tydzień | żądania na miesiąc |
+| ------------- | ------------------- | ------------------ | ------------------ |
+| GLM-5.1 | 880 | 2,150 | 4,300 |
+| GLM-5 | 1,150 | 2,880 | 5,750 |
+| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
+| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
+| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
+| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2.5 | 2,150 | 5,450 | 10,900 |
+| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
+| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
+| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
+| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
Szacunki opierają się na zaobserwowanych średnich wzorcach żądań:
@@ -106,6 +110,8 @@ Szacunki opierają się na zaobserwowanych średnich wzorcach żądań:
- Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request
- MiMo-V2-Pro — 350 tokenów wejściowych, 41 000 w pamięci podręcznej, 250 tokenów wyjściowych na żądanie
- MiMo-V2-Omni — 1000 tokenów wejściowych, 60 000 w pamięci podręcznej, 140 tokenów wyjściowych na żądanie
+- MiMo-V2.5-Pro — 350 tokenów wejściowych, 41 000 w pamięci podręcznej, 250 tokenów wyjściowych na żądanie
+- MiMo-V2.5 — 1000 tokenów wejściowych, 60 000 w pamięci podręcznej, 140 tokenów wyjściowych na żądanie
Możesz śledzić swoje bieżące zużycie w **konsoli**.
@@ -127,18 +133,20 @@ Jeśli masz również środki na swoim saldzie Zen, możesz włączyć opcję **
Możesz również uzyskać dostęp do modeli Go za pośrednictwem następujących punktów końcowych API.
-| Model | ID modelu | Punkt końcowy | Pakiet AI SDK |
-| ------------ | ------------ | ------------------------------------------------ | --------------------------- |
-| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
-| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| Model | ID modelu | Punkt końcowy | Pakiet AI SDK |
+| ------------- | ------------- | ------------------------------------------------ | --------------------------- |
+| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
[ID modelu](/docs/config/#models) w Twojej konfiguracji OpenCode
używa formatu `opencode-go/`. Na przykład dla Kimi K2.6 należy użyć
diff --git a/packages/web/src/content/docs/pt-br/go.mdx b/packages/web/src/content/docs/pt-br/go.mdx
index 631038298ae1..7d4d90ed51b2 100644
--- a/packages/web/src/content/docs/pt-br/go.mdx
+++ b/packages/web/src/content/docs/pt-br/go.mdx
@@ -69,6 +69,8 @@ A lista atual de modelos inclui:
- **Kimi K2.6**
- **MiMo-V2-Pro**
- **MiMo-V2-Omni**
+- **MiMo-V2.5-Pro**
+- **MiMo-V2.5**
- **MiniMax M2.5**
- **Qwen3.5 Plus**
- **Qwen3.6 Plus**
@@ -90,18 +92,20 @@ Os limites são definidos em valor em dólares. Isso significa que a sua contage
A tabela abaixo fornece uma contagem estimada de requisições com base nos padrões típicos de uso do Go:
-| Model | requisições por 5 horas | requisições por semana | requisições por mês |
-| ------------ | ----------------------- | ---------------------- | ------------------- |
-| GLM-5.1 | 880 | 2,150 | 4,300 |
-| GLM-5 | 1,150 | 2,880 | 5,750 |
-| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
-| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
-| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
-| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
-| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
-| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
-| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
-| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
+| Model | requisições por 5 horas | requisições por semana | requisições por mês |
+| ------------- | ----------------------- | ---------------------- | ------------------- |
+| GLM-5.1 | 880 | 2,150 | 4,300 |
+| GLM-5 | 1,150 | 2,880 | 5,750 |
+| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
+| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
+| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
+| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2.5 | 2,150 | 5,450 | 10,900 |
+| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
+| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
+| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
+| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
As estimativas baseiam-se nos padrões médios de requisições observados:
@@ -112,6 +116,8 @@ As estimativas baseiam-se nos padrões médios de requisições observados:
- Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request
- MiMo-V2-Pro — 350 tokens de entrada, 41.000 em cache, 250 tokens de saída por requisição
- MiMo-V2-Omni — 1000 tokens de entrada, 60.000 em cache, 140 tokens de saída por requisição
+- MiMo-V2.5-Pro — 350 tokens de entrada, 41.000 em cache, 250 tokens de saída por requisição
+- MiMo-V2.5 — 1000 tokens de entrada, 60.000 em cache, 140 tokens de saída por requisição
Você pode acompanhar o seu uso atual no **console**.
@@ -135,18 +141,20 @@ após você atingir os seus limites de uso em vez de bloquear as requisições.
Você também pode acessar os modelos do Go através dos seguintes endpoints de API.
-| Modelo | ID do Modelo | Endpoint | Pacote do AI SDK |
-| ------------ | ------------ | ------------------------------------------------ | --------------------------- |
-| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
-| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| Modelo | ID do Modelo | Endpoint | Pacote do AI SDK |
+| ------------- | ------------- | ------------------------------------------------ | --------------------------- |
+| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
O [ID do modelo](/docs/config/#models) na sua configuração do OpenCode
usa o formato `opencode-go/`. Por exemplo, para o Kimi K2.6, você usaria
diff --git a/packages/web/src/content/docs/ru/go.mdx b/packages/web/src/content/docs/ru/go.mdx
index 60f01c2b53ce..a8d33f296ddf 100644
--- a/packages/web/src/content/docs/ru/go.mdx
+++ b/packages/web/src/content/docs/ru/go.mdx
@@ -69,6 +69,8 @@ OpenCode Go работает так же, как и любой другой пр
- **Kimi K2.6**
- **MiMo-V2-Pro**
- **MiMo-V2-Omni**
+- **MiMo-V2.5-Pro**
+- **MiMo-V2.5**
- **MiniMax M2.5**
- **Qwen3.5 Plus**
- **Qwen3.6 Plus**
@@ -90,18 +92,20 @@ OpenCode Go включает следующие лимиты:
В таблице ниже приведено примерное количество запросов на основе типичных сценариев использования Go:
-| Model | запросов за 5 часов | запросов в неделю | запросов в месяц |
-| ------------ | ------------------- | ----------------- | ---------------- |
-| GLM-5.1 | 880 | 2,150 | 4,300 |
-| GLM-5 | 1,150 | 2,880 | 5,750 |
-| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
-| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
-| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
-| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
-| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
-| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
-| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
-| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
+| Model | запросов за 5 часов | запросов в неделю | запросов в месяц |
+| ------------- | ------------------- | ----------------- | ---------------- |
+| GLM-5.1 | 880 | 2,150 | 4,300 |
+| GLM-5 | 1,150 | 2,880 | 5,750 |
+| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
+| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
+| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
+| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2.5 | 2,150 | 5,450 | 10,900 |
+| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
+| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
+| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
+| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
Оценки основаны на наблюдаемых средних показателях запросов:
@@ -112,6 +116,8 @@ OpenCode Go включает следующие лимиты:
- Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request
- MiMo-V2-Pro — 350 входных, 41,000 кешированных, 250 выходных токенов на запрос
- MiMo-V2-Omni — 1000 входных, 60,000 кешированных, 140 выходных токенов на запрос
+- MiMo-V2.5-Pro — 350 входных, 41,000 кешированных, 250 выходных токенов на запрос
+- MiMo-V2.5 — 1000 входных, 60,000 кешированных, 140 выходных токенов на запрос
Вы можете отслеживать текущее использование в **консоли**.
@@ -135,18 +141,20 @@ OpenCode Go включает следующие лимиты:
Вы также можете получить доступ к моделям Go через следующие API-эндпоинты.
-| Модель | ID модели | Эндпоинт | Пакет AI SDK |
-| ------------ | ------------ | ------------------------------------------------ | --------------------------- |
-| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
-| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| Модель | ID модели | Эндпоинт | Пакет AI SDK |
+| ------------- | ------------- | ------------------------------------------------ | --------------------------- |
+| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
[ID модели](/docs/config/#models) в вашем конфиге OpenCode
использует формат `opencode-go/`. Например, для Kimi K2.6 вам нужно
diff --git a/packages/web/src/content/docs/th/go.mdx b/packages/web/src/content/docs/th/go.mdx
index 3af1eadc9f69..fb0262c9582a 100644
--- a/packages/web/src/content/docs/th/go.mdx
+++ b/packages/web/src/content/docs/th/go.mdx
@@ -59,6 +59,8 @@ OpenCode Go ทำงานเหมือนกับผู้ให้บร
- **Kimi K2.6**
- **MiMo-V2-Pro**
- **MiMo-V2-Omni**
+- **MiMo-V2.5-Pro**
+- **MiMo-V2.5**
- **MiniMax M2.5**
- **Qwen3.5 Plus**
- **Qwen3.6 Plus**
@@ -80,18 +82,20 @@ OpenCode Go มีขีดจำกัดดังต่อไปนี้:
ตารางด้านล่างแสดงจำนวน request โดยประมาณตามรูปแบบการใช้งานปกติของ Go:
-| Model | requests ต่อ 5 ชั่วโมง | requests ต่อสัปดาห์ | requests ต่อเดือน |
-| ------------ | ---------------------- | ------------------- | ----------------- |
-| GLM-5.1 | 880 | 2,150 | 4,300 |
-| GLM-5 | 1,150 | 2,880 | 5,750 |
-| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
-| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
-| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
-| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
-| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
-| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
-| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
-| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
+| Model | requests ต่อ 5 ชั่วโมง | requests ต่อสัปดาห์ | requests ต่อเดือน |
+| ------------- | ---------------------- | ------------------- | ----------------- |
+| GLM-5.1 | 880 | 2,150 | 4,300 |
+| GLM-5 | 1,150 | 2,880 | 5,750 |
+| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
+| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
+| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
+| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2.5 | 2,150 | 5,450 | 10,900 |
+| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
+| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
+| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
+| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
การประมาณการอ้างอิงจากรูปแบบการใช้งาน request โดยเฉลี่ยที่สังเกตพบ:
@@ -102,6 +106,8 @@ OpenCode Go มีขีดจำกัดดังต่อไปนี้:
- Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request
- MiMo-V2-Pro — 350 input, 41,000 cached, 250 output tokens ต่อ request
- MiMo-V2-Omni — 1000 input, 60,000 cached, 140 output tokens ต่อ request
+- MiMo-V2.5-Pro — 350 input, 41,000 cached, 250 output tokens ต่อ request
+- MiMo-V2.5 — 1000 input, 60,000 cached, 140 output tokens ต่อ request
คุณสามารถติดตามการใช้งานปัจจุบันของคุณได้ใน **console**
@@ -123,18 +129,20 @@ OpenCode Go มีขีดจำกัดดังต่อไปนี้:
คุณสามารถเข้าถึงโมเดลของ Go ผ่าน API endpoints ต่อไปนี้ได้เช่นกัน
-| Model | Model ID | Endpoint | AI SDK Package |
-| ------------ | ------------ | ------------------------------------------------ | --------------------------- |
-| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
-| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| Model | Model ID | Endpoint | AI SDK Package |
+| ------------- | ------------- | ------------------------------------------------ | --------------------------- |
+| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
[model id](/docs/config/#models) ใน OpenCode config ของคุณจะใช้รูปแบบ `opencode-go/` ตัวอย่างเช่น สำหรับ Kimi K2.6 คุณจะใช้ `opencode-go/kimi-k2.6` ใน config ของคุณ
diff --git a/packages/web/src/content/docs/tr/go.mdx b/packages/web/src/content/docs/tr/go.mdx
index e962c06807d3..96a1ca3e2fd1 100644
--- a/packages/web/src/content/docs/tr/go.mdx
+++ b/packages/web/src/content/docs/tr/go.mdx
@@ -59,6 +59,8 @@ Mevcut model listesi şunları içerir:
- **Kimi K2.6**
- **MiMo-V2-Pro**
- **MiMo-V2-Omni**
+- **MiMo-V2.5-Pro**
+- **MiMo-V2.5**
- **MiniMax M2.5**
- **Qwen3.5 Plus**
- **Qwen3.6 Plus**
@@ -80,18 +82,20 @@ Limitler dolar değeri üzerinden belirlenmiştir. Bu, gerçek istek sayınızı
Aşağıdaki tablo, tipik Go kullanım modellerine dayalı tahmini bir istek sayısı sunmaktadır:
-| Model | 5 saatte bir istek | haftalık istek | aylık istek |
-| ------------ | ------------------ | -------------- | ----------- |
-| GLM-5.1 | 880 | 2,150 | 4,300 |
-| GLM-5 | 1,150 | 2,880 | 5,750 |
-| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
-| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
-| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
-| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
-| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
-| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
-| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
-| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
+| Model | 5 saatte bir istek | haftalık istek | aylık istek |
+| ------------- | ------------------ | -------------- | ----------- |
+| GLM-5.1 | 880 | 2,150 | 4,300 |
+| GLM-5 | 1,150 | 2,880 | 5,750 |
+| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
+| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
+| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
+| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2.5 | 2,150 | 5,450 | 10,900 |
+| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
+| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
+| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
+| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
Tahminler, gözlemlenen ortalama istek modellerine dayanmaktadır:
@@ -102,6 +106,8 @@ Tahminler, gözlemlenen ortalama istek modellerine dayanmaktadır:
- Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request
- MiMo-V2-Pro — İstek başına 350 girdi, 41.000 önbelleğe alınmış, 250 çıktı token'ı
- MiMo-V2-Omni — İstek başına 1000 girdi, 60.000 önbelleğe alınmış, 140 çıktı token'ı
+- MiMo-V2.5-Pro — İstek başına 350 girdi, 41.000 önbelleğe alınmış, 250 çıktı token'ı
+- MiMo-V2.5 — İstek başına 1000 girdi, 60.000 önbelleğe alınmış, 140 çıktı token'ı
Mevcut kullanımınızı **konsoldan** takip edebilirsiniz.
@@ -123,18 +129,20 @@ Eğer Zen bakiyenizde kredileriniz varsa, konsoldan **Bakiye kullan (Use balance
Go modellerine aşağıdaki API uç noktaları aracılığıyla da erişebilirsiniz.
-| Model | Model ID | Uç Nokta | AI SDK Paketi |
-| ------------ | ------------ | ------------------------------------------------ | --------------------------- |
-| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
-| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| Model | Model ID | Uç Nokta | AI SDK Paketi |
+| ------------- | ------------- | ------------------------------------------------ | --------------------------- |
+| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
OpenCode yapılandırmanızdaki [model id](/docs/config/#models) formatı `opencode-go/` şeklindedir. Örneğin, Kimi K2.6 için yapılandırmanızda `opencode-go/kimi-k2.6` kullanmalısınız.
diff --git a/packages/web/src/content/docs/zh-cn/go.mdx b/packages/web/src/content/docs/zh-cn/go.mdx
index ac3b5f9bf568..f52f5b572e0b 100644
--- a/packages/web/src/content/docs/zh-cn/go.mdx
+++ b/packages/web/src/content/docs/zh-cn/go.mdx
@@ -59,6 +59,8 @@ OpenCode Go 的工作方式与 OpenCode 中的其他提供商一样。
- **Kimi K2.6**
- **MiMo-V2-Pro**
- **MiMo-V2-Omni**
+- **MiMo-V2.5-Pro**
+- **MiMo-V2.5**
- **MiniMax M2.5**
- **Qwen3.5 Plus**
- **Qwen3.6 Plus**
@@ -80,18 +82,20 @@ OpenCode Go 包含以下限制:
下表提供了基于典型 Go 使用模式的预估请求数:
-| Model | 每 5 小时请求数 | 每周请求数 | 每月请求数 |
-| ------------ | --------------- | ---------- | ---------- |
-| GLM-5.1 | 880 | 2,150 | 4,300 |
-| GLM-5 | 1,150 | 2,880 | 5,750 |
-| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
-| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
-| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
-| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
-| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
-| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
-| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
-| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
+| Model | 每 5 小时请求数 | 每周请求数 | 每月请求数 |
+| ------------- | --------------- | ---------- | ---------- |
+| GLM-5.1 | 880 | 2,150 | 4,300 |
+| GLM-5 | 1,150 | 2,880 | 5,750 |
+| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
+| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
+| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
+| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2.5 | 2,150 | 5,450 | 10,900 |
+| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
+| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
+| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
+| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
预估值基于观察到的平均请求模式:
@@ -99,6 +103,8 @@ OpenCode Go 包含以下限制:
- Kimi K2.5/K2.6 — 每次请求 870 个输入 token,55,000 个缓存 token,200 个输出 token
- MiMo-V2-Pro — 每次请求 350 个输入 token,41,000 个缓存 token,250 个输出 token
- MiMo-V2-Omni — 每次请求 1000 个输入 token,60,000 个缓存 token,140 个输出 token
+- MiMo-V2.5-Pro — 每次请求 350 个输入 token,41,000 个缓存 token,250 个输出 token
+- MiMo-V2.5 — 每次请求 1000 个输入 token,60,000 个缓存 token,140 个输出 token
- MiniMax M2.7/M2.5 — 每次请求 300 个输入 token,55,000 个缓存 token,125 个输出 token
- Qwen3.5 Plus — 410 input, 47,000 cached, 140 output tokens per request
- Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request
@@ -123,18 +129,20 @@ OpenCode Go 包含以下限制:
你也可以通过以下 API 端点访问 Go 模型。
-| 模型 | 模型 ID | 端点 | AI SDK 包 |
-| ------------ | ------------ | ------------------------------------------------ | --------------------------- |
-| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
-| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| 模型 | 模型 ID | 端点 | AI SDK 包 |
+| ------------- | ------------- | ------------------------------------------------ | --------------------------- |
+| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
你 OpenCode 配置中的 [模型 ID](/docs/config/#models) 使用 `opencode-go/` 格式。例如,对于 Kimi K2.6,你将在配置中使用 `opencode-go/kimi-k2.6`。
diff --git a/packages/web/src/content/docs/zh-tw/go.mdx b/packages/web/src/content/docs/zh-tw/go.mdx
index 0621a6694059..481c08cec57f 100644
--- a/packages/web/src/content/docs/zh-tw/go.mdx
+++ b/packages/web/src/content/docs/zh-tw/go.mdx
@@ -59,6 +59,8 @@ OpenCode Go 的運作方式與 OpenCode 中的任何其他供應商相同。
- **Kimi K2.6**
- **MiMo-V2-Pro**
- **MiMo-V2-Omni**
+- **MiMo-V2.5-Pro**
+- **MiMo-V2.5**
- **MiniMax M2.5**
- **Qwen3.5 Plus**
- **Qwen3.6 Plus**
@@ -80,18 +82,20 @@ OpenCode Go 包含以下限制:
下表提供了基於典型 Go 使用模式的預估請求次數:
-| Model | 每 5 小時請求數 | 每週請求數 | 每月請求數 |
-| ------------ | --------------- | ---------- | ---------- |
-| GLM-5.1 | 880 | 2,150 | 4,300 |
-| GLM-5 | 1,150 | 2,880 | 5,750 |
-| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
-| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
-| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
-| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
-| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
-| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
-| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
-| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
+| Model | 每 5 小時請求數 | 每週請求數 | 每月請求數 |
+| ------------- | --------------- | ---------- | ---------- |
+| GLM-5.1 | 880 | 2,150 | 4,300 |
+| GLM-5 | 1,150 | 2,880 | 5,750 |
+| Kimi K2.5 | 1,850 | 4,630 | 9,250 |
+| Kimi K2.6 | 1,150 | 2,880 | 5,750 |
+| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 |
+| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 |
+| MiMo-V2.5 | 2,150 | 5,450 | 10,900 |
+| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 |
+| MiniMax M2.7 | 3,400 | 8,500 | 17,000 |
+| MiniMax M2.5 | 6,300 | 15,900 | 31,800 |
+| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 |
預估值是基於觀察到的平均請求模式:
@@ -102,6 +106,8 @@ OpenCode Go 包含以下限制:
- Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request
- MiMo-V2-Pro — 每次請求 350 個輸入 token、41,000 個快取 token、250 個輸出 token
- MiMo-V2-Omni — 每次請求 1000 個輸入 token、60,000 個快取 token、140 個輸出 token
+- MiMo-V2.5-Pro — 每次請求 350 個輸入 token、41,000 個快取 token、250 個輸出 token
+- MiMo-V2.5 — 每次請求 1000 個輸入 token、60,000 個快取 token、140 個輸出 token
您可以在 **console** 中追蹤您目前的使用量。
@@ -123,18 +129,20 @@ OpenCode Go 包含以下限制:
您也可以透過以下 API 端點存取 Go 模型。
-| 模型 | 模型 ID | 端點 | AI SDK 套件 |
-| ------------ | ------------ | ------------------------------------------------ | --------------------------- |
-| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
-| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
-| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| 模型 | 模型 ID | 端點 | AI SDK 套件 |
+| ------------- | ------------- | ------------------------------------------------ | --------------------------- |
+| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
+| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
+| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` |
您的 OpenCode 設定中的 [model id](/docs/config/#models) 使用 `opencode-go/` 格式。例如,Kimi K2.6 在設定中應使用 `opencode-go/kimi-k2.6`。