Skip to content
125 changes: 125 additions & 0 deletions src/constants/knownModels.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,125 @@
/**
* Centralized model metadata. Update model versions here and everywhere else will follow.
*/

type ModelProvider = "anthropic" | "openai";

interface KnownModelDefinition {
/** Provider identifier used by SDK factories */
provider: ModelProvider;
/** Provider-specific model name (no provider prefix) */
providerModelId: string;
/** Aliases that should resolve to this model */
aliases?: string[];
/** Preload tokenizer encodings at startup */
warm?: boolean;
/** Use as global default model */
isDefault?: boolean;
/** Optional tokenizer override for ai-tokenizer */
tokenizerOverride?: string;
}

interface KnownModel extends KnownModelDefinition {
/** Full model id string in the format provider:model */
id: `${ModelProvider}:${string}`;
}

// Model definitions. Note we avoid listing legacy models here. These represent the focal models
// of the community.
const MODEL_DEFINITIONS = {
SONNET: {
provider: "anthropic",
providerModelId: "claude-sonnet-4-5",
aliases: ["sonnet"],
warm: true,
isDefault: true,
tokenizerOverride: "anthropic/claude-sonnet-4.5",
},
HAIKU: {
provider: "anthropic",
providerModelId: "claude-haiku-4-5",
aliases: ["haiku"],
tokenizerOverride: "anthropic/claude-3.5-haiku",
},
OPUS: {
provider: "anthropic",
providerModelId: "claude-opus-4-1",
aliases: ["opus"],
},
GPT: {
provider: "openai",
providerModelId: "gpt-5.1",
aliases: ["gpt-5.1"],
warm: true,
},
GPT_PRO: {
provider: "openai",
providerModelId: "gpt-5-pro",
aliases: ["gpt-5-pro"],
},
GPT_CODEX: {
provider: "openai",
providerModelId: "gpt-5.1-codex",
aliases: ["codex"],
warm: true,
},
GPT_MINI: {
provider: "openai",
providerModelId: "gpt-5.1-codex-mini",
},
} as const satisfies Record<string, KnownModelDefinition>;

export type KnownModelKey = keyof typeof MODEL_DEFINITIONS;

export const KNOWN_MODELS = Object.fromEntries(
Object.entries(MODEL_DEFINITIONS).map(([key, definition]) => [
key,
{
...definition,
id: `${definition.provider}:${definition.providerModelId}` as `${ModelProvider}:${string}`,
},
])
) as Record<KnownModelKey, KnownModel>;

export function getKnownModel(key: KnownModelKey): KnownModel {
return KNOWN_MODELS[key];
}

// ------------------------------------------------------------------------------------
// Derived collections
// ------------------------------------------------------------------------------------

const DEFAULT_MODEL_ENTRY =
Object.values(KNOWN_MODELS).find((model) => model.isDefault) ?? KNOWN_MODELS.SONNET;

export const DEFAULT_MODEL = DEFAULT_MODEL_ENTRY.id;

export const DEFAULT_WARM_MODELS = Object.values(KNOWN_MODELS)
.filter((model) => model.warm)
.map((model) => model.id);

export const MODEL_ABBREVIATIONS: Record<string, string> = Object.fromEntries(
Object.values(KNOWN_MODELS)
.flatMap((model) => (model.aliases ?? []).map((alias) => [alias, model.id] as const))
.sort(([a], [b]) => a.localeCompare(b))
);

export const TOKENIZER_MODEL_OVERRIDES: Record<string, string> = Object.fromEntries(
Object.values(KNOWN_MODELS)
.filter((model) => Boolean(model.tokenizerOverride))
.map((model) => [model.id, model.tokenizerOverride!])
);

export const MODEL_NAMES: Record<ModelProvider, Record<string, string>> = Object.entries(
KNOWN_MODELS
).reduce<Record<ModelProvider, Record<string, string>>>(
(acc, [key, model]) => {
if (!acc[model.provider]) {
const emptyRecord: Record<string, string> = {};
acc[model.provider] = emptyRecord;
}
acc[model.provider][key] = model.providerModelId;
return acc;
},
{} as Record<ModelProvider, Record<string, string>>
);
6 changes: 5 additions & 1 deletion src/hooks/useModelLRU.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,11 @@ const MAX_LRU_SIZE = 8;
const LRU_KEY = "model-lru";

// Default models from abbreviations (for initial LRU population)
const DEFAULT_MODELS = Object.values(MODEL_ABBREVIATIONS);
// Ensure defaultModel is first, then fill with other abbreviations
const DEFAULT_MODELS = [
defaultModel,
...Object.values(MODEL_ABBREVIATIONS).filter((m) => m !== defaultModel),
].slice(0, MAX_LRU_SIZE);

/**
* Get the default model from LRU (non-hook version for use outside React)
Expand Down
3 changes: 2 additions & 1 deletion src/services/historyService.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import type { Config } from "@/config";
import { workspaceFileLocks } from "@/utils/concurrency/workspaceFileLocks";
import { log } from "./log";
import { getTokenizerForModel } from "@/utils/main/tokenizer";
import { KNOWN_MODELS } from "@/constants/knownModels";

/**
* HistoryService - Manages chat history persistence and sequence numbering
Expand Down Expand Up @@ -340,7 +341,7 @@ export class HistoryService {
}

// Get tokenizer for counting (use a default model)
const tokenizer = await getTokenizerForModel("anthropic:claude-sonnet-4-5");
const tokenizer = await getTokenizerForModel(KNOWN_MODELS.SONNET.id);

// Count tokens for each message
// We stringify the entire message for simplicity - only relative weights matter
Expand Down
3 changes: 2 additions & 1 deletion src/services/mock/mockScenarioPlayer.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,9 @@ import type { StreamStartEvent, StreamDeltaEvent, StreamEndEvent } from "@/types
import type { ToolCallStartEvent, ToolCallEndEvent } from "@/types/stream";
import type { ReasoningDeltaEvent } from "@/types/stream";
import { getTokenizerForModel } from "@/utils/main/tokenizer";
import { KNOWN_MODELS } from "@/constants/knownModels";

const MOCK_TOKENIZER_MODEL = "openai:gpt-5";
const MOCK_TOKENIZER_MODEL = KNOWN_MODELS.GPT.id;
const TOKENIZE_TIMEOUT_MS = 150;
let tokenizerFallbackLogged = false;

Expand Down
5 changes: 3 additions & 2 deletions src/services/mock/scenarios/basicChat.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import type { ScenarioTurn } from "../scenarioTypes";
import { STREAM_BASE_DELAY } from "../scenarioTypes";
import { KNOWN_MODELS } from "@/constants/knownModels";

export const LIST_PROGRAMMING_LANGUAGES = "List 3 programming languages";

Expand All @@ -12,7 +13,7 @@ const listProgrammingLanguagesTurn: ScenarioTurn = {
assistant: {
messageId: "msg-basic-1",
events: [
{ kind: "stream-start", delay: 0, messageId: "msg-basic-1", model: "openai:gpt-5" },
{ kind: "stream-start", delay: 0, messageId: "msg-basic-1", model: KNOWN_MODELS.GPT.id },
{
kind: "stream-delta",
delay: STREAM_BASE_DELAY,
Expand All @@ -37,7 +38,7 @@ const listProgrammingLanguagesTurn: ScenarioTurn = {
kind: "stream-end",
delay: STREAM_BASE_DELAY * 5,
metadata: {
model: "openai:gpt-5",
model: KNOWN_MODELS.GPT.id,
inputTokens: 64,
outputTokens: 48,
systemMessageTokens: 12,
Expand Down
9 changes: 5 additions & 4 deletions src/services/mock/scenarios/permissionModes.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import type { ScenarioTurn } from "../scenarioTypes";
import { KNOWN_MODELS } from "@/constants/knownModels";
import { STREAM_BASE_DELAY } from "../scenarioTypes";

export const PERMISSION_MODE_PROMPTS = {
Expand All @@ -19,7 +20,7 @@ const planRefactorTurn: ScenarioTurn = {
kind: "stream-start",
delay: 0,
messageId: "msg-plan-refactor",
model: "openai:gpt-5",
model: KNOWN_MODELS.GPT.id,
},
{
kind: "stream-delta",
Expand All @@ -45,7 +46,7 @@ const planRefactorTurn: ScenarioTurn = {
kind: "stream-end",
delay: STREAM_BASE_DELAY * 5,
metadata: {
model: "openai:gpt-5",
model: KNOWN_MODELS.GPT.id,
inputTokens: 180,
outputTokens: 130,
systemMessageTokens: 24,
Expand Down Expand Up @@ -74,7 +75,7 @@ const executePlanTurn: ScenarioTurn = {
kind: "stream-start",
delay: 0,
messageId: "msg-exec-refactor",
model: "openai:gpt-5",
model: KNOWN_MODELS.GPT.id,
},
{
kind: "tool-start",
Expand Down Expand Up @@ -118,7 +119,7 @@ const executePlanTurn: ScenarioTurn = {
kind: "stream-end",
delay: STREAM_BASE_DELAY * 3,
metadata: {
model: "openai:gpt-5",
model: KNOWN_MODELS.GPT.id,
inputTokens: 220,
outputTokens: 110,
systemMessageTokens: 18,
Expand Down
11 changes: 6 additions & 5 deletions src/services/mock/scenarios/review.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import type { ScenarioTurn } from "../scenarioTypes";
import { KNOWN_MODELS } from "@/constants/knownModels";
import { STREAM_BASE_DELAY } from "../scenarioTypes";

export const REVIEW_PROMPTS = {
Expand All @@ -16,7 +17,7 @@ const summarizeBranchesTurn: ScenarioTurn = {
assistant: {
messageId: "msg-plan-1",
events: [
{ kind: "stream-start", delay: 0, messageId: "msg-plan-1", model: "openai:gpt-5" },
{ kind: "stream-start", delay: 0, messageId: "msg-plan-1", model: KNOWN_MODELS.GPT.id },
{
kind: "reasoning-delta",
delay: STREAM_BASE_DELAY,
Expand Down Expand Up @@ -61,7 +62,7 @@ const summarizeBranchesTurn: ScenarioTurn = {
kind: "stream-end",
delay: STREAM_BASE_DELAY * 6,
metadata: {
model: "openai:gpt-5",
model: KNOWN_MODELS.GPT.id,
inputTokens: 128,
outputTokens: 85,
systemMessageTokens: 32,
Expand All @@ -86,7 +87,7 @@ const openOnboardingDocTurn: ScenarioTurn = {
assistant: {
messageId: "msg-exec-1",
events: [
{ kind: "stream-start", delay: 0, messageId: "msg-exec-1", model: "openai:gpt-5" },
{ kind: "stream-start", delay: 0, messageId: "msg-exec-1", model: KNOWN_MODELS.GPT.id },
{
kind: "tool-start",
delay: STREAM_BASE_DELAY,
Expand Down Expand Up @@ -114,7 +115,7 @@ const showOnboardingDocTurn: ScenarioTurn = {
assistant: {
messageId: "msg-exec-2",
events: [
{ kind: "stream-start", delay: 0, messageId: "msg-exec-2", model: "openai:gpt-5" },
{ kind: "stream-start", delay: 0, messageId: "msg-exec-2", model: KNOWN_MODELS.GPT.id },
{
kind: "tool-start",
delay: STREAM_BASE_DELAY,
Expand Down Expand Up @@ -153,7 +154,7 @@ const showOnboardingDocTurn: ScenarioTurn = {
kind: "stream-end",
delay: STREAM_BASE_DELAY * 3,
metadata: {
model: "openai:gpt-5",
model: KNOWN_MODELS.GPT.id,
inputTokens: 96,
outputTokens: 142,
systemMessageTokens: 32,
Expand Down
5 changes: 3 additions & 2 deletions src/services/mock/scenarios/slashCommands.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import type { ScenarioTurn } from "../scenarioTypes";
import { KNOWN_MODELS } from "@/constants/knownModels";
import { STREAM_BASE_DELAY } from "../scenarioTypes";

export const SLASH_COMMAND_PROMPTS = {
Expand All @@ -24,7 +25,7 @@ const compactConversationTurn: ScenarioTurn = {
kind: "stream-start",
delay: 0,
messageId: "msg-slash-compact-1",
model: "openai:gpt-5",
model: KNOWN_MODELS.GPT.id,
},
{
kind: "stream-delta",
Expand All @@ -35,7 +36,7 @@ const compactConversationTurn: ScenarioTurn = {
kind: "stream-end",
delay: STREAM_BASE_DELAY * 2,
metadata: {
model: "openai:gpt-5",
model: KNOWN_MODELS.GPT.id,
inputTokens: 220,
outputTokens: 96,
systemMessageTokens: 18,
Expand Down
Loading