Skip to content

Commit fe3b251

Browse files
committed
feat: add model-specific instructions
1 parent cb9bdaf commit fe3b251

File tree

5 files changed

+189
-3
lines changed

5 files changed

+189
-3
lines changed

docs/instruction-files.md

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,32 @@ When compacting conversation history:
6262

6363
Customizing the `compact` mode is particularly useful for controlling what information is preserved during automatic history compaction.
6464

65+
## Model Prompts
66+
67+
Similar to modes, mux reads headings titled `Model: <regex>` to scope instructions to specific models or families. The `<regex>` is matched against the full model identifier (for example, `openai:gpt-5.1-codex`).
68+
69+
Rules:
70+
71+
- Workspace instructions are evaluated before global instructions; the first matching section wins.
72+
- Regexes are case-insensitive by default. Use `/pattern/flags` syntax to opt into custom flags (e.g., `/openai:.*codex/i`).
73+
- Invalid regex patterns are ignored instead of breaking the parse.
74+
- Only the content under the first matching heading is injected.
75+
76+
<!-- Developers: See extractModelSection in src/node/utils/main/markdown.ts for the implementation. -->
77+
78+
Example:
79+
80+
```markdown
81+
## Model: sonnet
82+
Anthropic's Claude Sonnet family tends to wax poetic—answer in two sentences max and focus on code changes.
83+
84+
## Model: /openai:.*codex/i
85+
OpenAI's GPT-5.1 Codex models already respond tersely, so no additional instruction is required.
86+
```
87+
88+
The second section documents that OpenAI models (as of `openai:gpt-5.1-codex`) don't need extra prompting, while Sonnet benefits from an explicit "be terse" reminder.
89+
90+
6591
## Practical layout
6692

6793
```

src/node/services/aiService.ts

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -655,7 +655,8 @@ export class AIService extends EventEmitter {
655655
runtime,
656656
workspacePath,
657657
mode,
658-
additionalSystemInstructions
658+
additionalSystemInstructions,
659+
modelString
659660
);
660661

661662
// Count system message tokens for cost tracking

src/node/services/systemMessage.test.ts

Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -203,4 +203,73 @@ Special mode instructions.
203203
expect(systemMessage).toContain("Special mode instructions");
204204
expect(systemMessage).toContain("</my-special_mode->");
205205
});
206+
207+
test("includes model-specific section when regex matches active model", async () => {
208+
await fs.writeFile(
209+
path.join(projectDir, "AGENTS.md"),
210+
`# Instructions
211+
## Model: sonnet
212+
Respond to Sonnet tickets in two sentences max.
213+
`
214+
);
215+
216+
const metadata: WorkspaceMetadata = {
217+
id: "test-workspace",
218+
name: "test-workspace",
219+
projectName: "test-project",
220+
projectPath: projectDir,
221+
runtimeConfig: DEFAULT_RUNTIME_CONFIG,
222+
};
223+
224+
const systemMessage = await buildSystemMessage(
225+
metadata,
226+
runtime,
227+
workspaceDir,
228+
undefined,
229+
undefined,
230+
"anthropic:claude-3.5-sonnet"
231+
);
232+
233+
expect(systemMessage).toContain("<model-anthropic-claude-3-5-sonnet>");
234+
expect(systemMessage).toContain("Respond to Sonnet tickets in two sentences max.");
235+
expect(systemMessage).toContain("</model-anthropic-claude-3-5-sonnet>");
236+
});
237+
238+
test("falls back to global model section when project lacks a match", async () => {
239+
await fs.writeFile(
240+
path.join(globalDir, "AGENTS.md"),
241+
`# Global Instructions
242+
## Model: /openai:.*codex/i
243+
OpenAI's GPT-5.1 Codex models already default to terse replies.
244+
`
245+
);
246+
247+
await fs.writeFile(
248+
path.join(projectDir, "AGENTS.md"),
249+
`# Project Instructions
250+
General details only.
251+
`
252+
);
253+
254+
const metadata: WorkspaceMetadata = {
255+
id: "test-workspace",
256+
name: "test-workspace",
257+
projectName: "test-project",
258+
projectPath: projectDir,
259+
runtimeConfig: DEFAULT_RUNTIME_CONFIG,
260+
};
261+
262+
const systemMessage = await buildSystemMessage(
263+
metadata,
264+
runtime,
265+
workspaceDir,
266+
undefined,
267+
undefined,
268+
"openai:gpt-5.1-codex"
269+
);
270+
271+
expect(systemMessage).toContain("<model-openai-gpt-5-1-codex>");
272+
expect(systemMessage).toContain("OpenAI's GPT-5.1 Codex models already default to terse replies.");
273+
});
274+
206275
});

src/node/services/systemMessage.ts

Lines changed: 20 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ import {
33
readInstructionSet,
44
readInstructionSetFromRuntime,
55
} from "@/node/utils/main/instructionFiles";
6-
import { extractModeSection } from "@/node/utils/main/markdown";
6+
import { extractModeSection, extractModelSection } from "@/node/utils/main/markdown";
77
import type { Runtime } from "@/node/runtime/Runtime";
88
import { getMuxHome } from "@/common/constants/paths";
99

@@ -71,14 +71,16 @@ function getSystemDirectory(): string {
7171
* @param workspacePath - Workspace directory path
7272
* @param mode - Optional mode name (e.g., "plan", "exec")
7373
* @param additionalSystemInstructions - Optional instructions appended last
74+
* @param modelString - Active model identifier used for Model-specific sections
7475
* @throws Error if metadata or workspacePath invalid
7576
*/
7677
export async function buildSystemMessage(
7778
metadata: WorkspaceMetadata,
7879
runtime: Runtime,
7980
workspacePath: string,
8081
mode?: string,
81-
additionalSystemInstructions?: string
82+
additionalSystemInstructions?: string,
83+
modelString?: string
8284
): Promise<string> {
8385
if (!metadata) throw new Error("Invalid workspace metadata: metadata is required");
8486
if (!workspacePath) throw new Error("Invalid workspace path: workspacePath is required");
@@ -101,6 +103,15 @@ export async function buildSystemMessage(
101103
null;
102104
}
103105

106+
// Extract model-specific section based on active model identifier (context first)
107+
let modelContent: string | null = null;
108+
if (modelString) {
109+
modelContent =
110+
(contextInstructions && extractModelSection(contextInstructions, modelString)) ??
111+
(globalInstructions && extractModelSection(globalInstructions, modelString)) ??
112+
null;
113+
}
114+
104115
// Build system message
105116
let systemMessage = `${PRELUDE.trim()}\n\n${buildEnvironmentContext(workspacePath)}`;
106117

@@ -113,6 +124,13 @@ export async function buildSystemMessage(
113124
systemMessage += `\n\n<${tag}>\n${modeContent}\n</${tag}>`;
114125
}
115126

127+
if (modelContent && modelString) {
128+
const sanitizedModelValue =
129+
modelString.toLowerCase().replace(/[^a-z0-9_-]/gi, "-") || "model";
130+
const modelTag = `model-${sanitizedModelValue}`;
131+
systemMessage += `\n\n<${modelTag}>\n${modelContent}\n</${modelTag}>`;
132+
}
133+
116134
if (additionalSystemInstructions) {
117135
systemMessage += `\n\n<additional-instructions>\n${additionalSystemInstructions}\n</additional-instructions>`;
118136
}

src/node/utils/main/markdown.ts

Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,3 +49,75 @@ export function extractModeSection(markdown: string, mode: string): string | nul
4949

5050
return null;
5151
}
52+
53+
/**
54+
* Extract the first section whose heading matches "Model: <regex>" and whose regex matches
55+
* the provided model identifier. Matching is case-insensitive by default unless the regex
56+
* heading explicitly specifies flags via /pattern/flags syntax.
57+
*/
58+
export function extractModelSection(markdown: string, modelId: string): string | null {
59+
if (!markdown || !modelId) return null;
60+
61+
const md = new MarkdownIt({ html: false, linkify: false, typographer: false });
62+
const tokens = md.parse(markdown, {});
63+
const lines = markdown.split(/\r?\n/);
64+
const headingPattern = /^model:\s*(.+)$/i;
65+
66+
const compileRegex = (pattern: string): RegExp | null => {
67+
const trimmed = pattern.trim();
68+
if (!trimmed) return null;
69+
70+
// Allow optional /pattern/flags syntax; default to case-insensitive matching otherwise
71+
if (trimmed.startsWith("/") && trimmed.lastIndexOf("/") > 0) {
72+
const lastSlash = trimmed.lastIndexOf("/");
73+
const source = trimmed.slice(1, lastSlash);
74+
const flags = trimmed.slice(lastSlash + 1);
75+
try {
76+
return new RegExp(source, flags || undefined);
77+
} catch {
78+
return null;
79+
}
80+
}
81+
82+
try {
83+
return new RegExp(trimmed, "i");
84+
} catch {
85+
return null;
86+
}
87+
};
88+
89+
for (let i = 0; i < tokens.length; i++) {
90+
const token = tokens[i];
91+
if (token.type !== "heading_open") continue;
92+
93+
const level = Number(token.tag?.replace(/^h/, "")) || 1;
94+
const inline = tokens[i + 1];
95+
if (inline?.type !== "inline") continue;
96+
97+
const match = headingPattern.exec((inline.content || "").trim());
98+
if (!match) continue;
99+
100+
const regex = compileRegex(match[1] ?? "");
101+
if (!regex) continue;
102+
if (!regex.test(modelId)) continue;
103+
104+
const headingEndLine = inline.map?.[1] ?? token.map?.[1] ?? (token.map?.[0] ?? 0) + 1;
105+
106+
let endLine = lines.length;
107+
for (let j = i + 1; j < tokens.length; j++) {
108+
const nextToken = tokens[j];
109+
if (nextToken.type === "heading_open") {
110+
const nextLevel = Number(nextToken.tag?.replace(/^h/, "")) || 1;
111+
if (nextLevel <= level) {
112+
endLine = nextToken.map?.[0] ?? endLine;
113+
break;
114+
}
115+
}
116+
}
117+
118+
const slice = lines.slice(headingEndLine, endLine).join("\n").trim();
119+
return slice.length > 0 ? slice : null;
120+
}
121+
122+
return null;
123+
}

0 commit comments

Comments
 (0)