Skip to content

Commit 5e96883

Browse files
authored
🤖 feat: add Google provider with Gemini 3 support (#677)
Add Google as a model provider with support for Gemini 3. ## Changes - Add `@ai-sdk/google` dependency - Add Google provider creation in AIService with lazy loading pattern matching Anthropic/OpenAI - Add `GoogleProviderOptions` to `MuxProviderOptions` type system - Add `GOOGLE_API_KEY` and `GOOGLE_BASE_URL` environment variable support - Add `GEMINI_3_PRO` to `KNOWN_MODELS` with `gemini-2.5-pro` tokenizer - Add Google fallback to tokenizer (`google/gemini-2.5-pro`) - Add test coverage for Gemini 3 tokenization and thinking policy ## Usage Configure via `~/.mux/providers.jsonc`: ```json { "google": { "apiKey": "YOUR_GOOGLE_API_KEY" } } ``` Or via environment variable: ```bash export GOOGLE_API_KEY="your-api-key" ``` Reference the model as: - `google:gemini-3-pro-preview` - `gemini-3` (alias) - `gemini-3-pro` (alias) ## Infrastructure The following infrastructure already supported Google: - ✅ Tool definitions (google_search) - ✅ Slash command registry - ✅ Model display formatting ## Testing - ✅ Tokenizer tests pass - ✅ Thinking policy tests pass - ✅ Type checking passes _Generated with `mux`_ Signed-off-by: Thomas Kosiewski <tk@coder.com>
1 parent c134523 commit 5e96883

File tree

15 files changed

+26682
-2794
lines changed

15 files changed

+26682
-2794
lines changed

bun.lock

Lines changed: 179 additions & 366 deletions
Large diffs are not rendered by default.

docs/models.md

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,28 @@ GPT-5 family of models:
2121

2222
- `openai:gpt-5`
2323
- `openai:gpt-5-pro`
24+
25+
#### Google (Cloud)
26+
27+
Access Gemini models directly via Google's generative AI API:
28+
29+
- `google:gemini-3-pro-preview`
30+
- `google:gemini-2.5-pro`
31+
- `google:gemini-2.5-flash`
32+
33+
**Setup:**
34+
35+
1. Get your API key from [Google AI Studio](https://aistudio.google.com/)
36+
2. Add to `~/.mux/providers.jsonc`:
37+
38+
```jsonc
39+
{
40+
"google": {
41+
"apiKey": "AIza...",
42+
},
43+
}
44+
```
45+
2446
- `openai:gpt-5-codex`
2547

2648
**Note:** Anthropic models are better supported than GPT-5 class models due to an outstanding issue in the Vercel AI SDK.
@@ -141,6 +163,10 @@ All providers are configured in `~/.mux/providers.jsonc`. Example configurations
141163
"openai": {
142164
"apiKey": "sk-...",
143165
},
166+
// Required for Google models
167+
"google": {
168+
"apiKey": "AIza...",
169+
},
144170
// Required for OpenRouter models
145171
"openrouter": {
146172
"apiKey": "sk-or-v1-...",

package.json

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@
4646
},
4747
"dependencies": {
4848
"@ai-sdk/anthropic": "^2.0.44",
49+
"@ai-sdk/google": "^2.0.28",
4950
"@ai-sdk/openai": "^2.0.66",
5051
"@openrouter/ai-sdk-provider": "^1.2.2",
5152
"ghostty-web": "^0.1.1",

src/browser/hooks/useModelLRU.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,14 @@ import { usePersistedState, readPersistedState, updatePersistedState } from "./u
33
import { MODEL_ABBREVIATIONS } from "@/browser/utils/slashCommands/registry";
44
import { defaultModel } from "@/common/utils/ai/models";
55

6-
const MAX_LRU_SIZE = 8;
6+
const MAX_LRU_SIZE = 12;
77
const LRU_KEY = "model-lru";
88

99
// Default models from abbreviations (for initial LRU population)
10-
// Ensure defaultModel is first, then fill with other abbreviations
10+
// Ensure defaultModel is first, then fill with other abbreviations (deduplicated)
1111
const DEFAULT_MODELS = [
1212
defaultModel,
13-
...Object.values(MODEL_ABBREVIATIONS).filter((m) => m !== defaultModel),
13+
...Array.from(new Set(Object.values(MODEL_ABBREVIATIONS))).filter((m) => m !== defaultModel),
1414
].slice(0, MAX_LRU_SIZE);
1515
function persistModels(models: string[]): void {
1616
updatePersistedState(LRU_KEY, models.slice(0, MAX_LRU_SIZE));

src/browser/utils/thinking/policy.test.ts

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,12 @@ describe("getThinkingPolicyForModel", () => {
4646
"medium",
4747
"high",
4848
]);
49+
expect(getThinkingPolicyForModel("google:gemini-3-pro-preview-11-2025")).toEqual([
50+
"off",
51+
"low",
52+
"medium",
53+
"high",
54+
]);
4955
});
5056
});
5157

src/common/constants/knownModels.ts

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
* Centralized model metadata. Update model versions here and everywhere else will follow.
33
*/
44

5-
type ModelProvider = "anthropic" | "openai";
5+
type ModelProvider = "anthropic" | "openai" | "google";
66

77
interface KnownModelDefinition {
88
/** Provider identifier used by SDK factories */
@@ -51,6 +51,7 @@ const MODEL_DEFINITIONS = {
5151
providerModelId: "gpt-5.1",
5252
aliases: ["gpt-5.1"],
5353
warm: true,
54+
tokenizerOverride: "openai/gpt-5",
5455
},
5556
GPT_PRO: {
5657
provider: "openai",
@@ -62,12 +63,19 @@ const MODEL_DEFINITIONS = {
6263
providerModelId: "gpt-5.1-codex",
6364
aliases: ["codex"],
6465
warm: true,
66+
tokenizerOverride: "openai/gpt-5",
6567
},
6668
GPT_MINI: {
6769
provider: "openai",
6870
providerModelId: "gpt-5.1-codex-mini",
6971
aliases: ["codex-mini"],
7072
},
73+
GEMINI_3_PRO: {
74+
provider: "google",
75+
providerModelId: "gemini-3-pro-preview",
76+
aliases: ["gemini-3", "gemini-3-pro"],
77+
tokenizerOverride: "google/gemini-2.5-pro",
78+
},
7179
} as const satisfies Record<string, KnownModelDefinition>;
7280

7381
export type KnownModelKey = keyof typeof MODEL_DEFINITIONS;

src/common/constants/providers.ts

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,13 @@ export async function importOllama() {
2727
return await import("ollama-ai-provider-v2");
2828
}
2929

30+
/**
31+
* Dynamically import the Google provider package
32+
*/
33+
export async function importGoogle() {
34+
return await import("@ai-sdk/google");
35+
}
36+
3037
/**
3138
* Dynamically import the OpenRouter provider package
3239
*/
@@ -50,6 +57,7 @@ export async function importOpenRouter() {
5057
export const PROVIDER_REGISTRY = {
5158
anthropic: importAnthropic,
5259
openai: importOpenAI,
60+
google: importGoogle,
5361
ollama: importOllama,
5462
openrouter: importOpenRouter,
5563
} as const;

src/common/types/providerOptions.ts

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,12 @@ export interface OpenAIProviderOptions {
2929
simulateToolPolicyNoop?: boolean;
3030
}
3131

32+
/**
33+
* Google-specific options
34+
*/
35+
// eslint-disable-next-line @typescript-eslint/no-empty-object-type
36+
export interface GoogleProviderOptions {}
37+
3238
/**
3339
* Ollama-specific options
3440
* Currently empty - Ollama is a local service and doesn't require special options.
@@ -52,6 +58,7 @@ export interface MuxProviderOptions {
5258
/** Provider-specific options */
5359
anthropic?: AnthropicProviderOptions;
5460
openai?: OpenAIProviderOptions;
61+
google?: GoogleProviderOptions;
5562
ollama?: OllamaProviderOptions;
5663
openrouter?: OpenRouterProviderOptions;
5764
}

src/common/utils/ai/providerOptions.ts

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -203,6 +203,14 @@ export function buildProviderOptions(
203203
return options;
204204
}
205205

206+
// Build Google-specific options
207+
if (provider === "google") {
208+
// Google Gemini models don't currently support the same thinking/reasoning
209+
// configuration as Anthropic/OpenAI, so return empty options for now
210+
log.debug("buildProviderOptions: Google config - no specific options yet");
211+
return {};
212+
}
213+
206214
// Build OpenRouter-specific options
207215
if (provider === "openrouter") {
208216
const reasoningEffort = OPENROUTER_REASONING_EFFORT[effectiveThinking];

src/common/utils/providers/ensureProvidersConfig.ts

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,18 @@ const buildProvidersFromEnv = (env: NodeJS.ProcessEnv): ProvidersConfig => {
7878
}
7979
}
8080

81+
const googleKey = trim(env.GOOGLE_API_KEY);
82+
if (googleKey.length > 0) {
83+
const entry: ProviderConfig = { apiKey: googleKey };
84+
85+
const baseUrl = trim(env.GOOGLE_BASE_URL);
86+
if (baseUrl.length > 0) {
87+
entry.baseUrl = baseUrl;
88+
}
89+
90+
providers.google = entry;
91+
}
92+
8193
return providers;
8294
};
8395

@@ -102,7 +114,7 @@ export const ensureProvidersConfig = (
102114
const providersFromEnv = buildProvidersFromEnv(env);
103115
if (!hasAnyConfiguredProvider(providersFromEnv)) {
104116
throw new Error(
105-
"No provider credentials found. Configure providers.jsonc or set ANTHROPIC_API_KEY / OPENAI_API_KEY / OPENROUTER_API_KEY."
117+
"No provider credentials found. Configure providers.jsonc or set ANTHROPIC_API_KEY / OPENAI_API_KEY / OPENROUTER_API_KEY / GOOGLE_API_KEY."
106118
);
107119
}
108120

0 commit comments

Comments
 (0)