diff --git a/.changeset/itchy-hounds-cross.md b/.changeset/itchy-hounds-cross.md new file mode 100644 index 0000000..b385767 --- /dev/null +++ b/.changeset/itchy-hounds-cross.md @@ -0,0 +1,6 @@ +--- +'mycoder-agent': patch +'mycoder': patch +--- + +Add ability to enable/disable token caching via config values diff --git a/packages/agent/src/core/tokens.ts b/packages/agent/src/core/tokens.ts index ebad962..c923a91 100644 --- a/packages/agent/src/core/tokens.ts +++ b/packages/agent/src/core/tokens.ts @@ -73,6 +73,7 @@ export class TokenUsage { export class TokenTracker { public tokenUsage = new TokenUsage(); public children: TokenTracker[] = []; + public tokenCache?: boolean; constructor( public readonly name: string = 'unnamed', diff --git a/packages/agent/src/core/toolAgent/toolAgentCore.ts b/packages/agent/src/core/toolAgent/toolAgentCore.ts index f05fdba..084ceba 100644 --- a/packages/agent/src/core/toolAgent/toolAgentCore.ts +++ b/packages/agent/src/core/toolAgent/toolAgentCore.ts @@ -57,11 +57,20 @@ export const toolAgent = async ( }); }); - // Apply cache control to messages for token caching - const messagesWithCacheControl = [ - createCacheControlMessageFromSystemPrompt(systemPrompt), - ...addCacheControlToMessages(messages), - ]; + // Apply cache control to messages for token caching if enabled + const messagesWithCacheControl = + tokenTracker.tokenCache !== false && context.tokenCache !== false + ? [ + createCacheControlMessageFromSystemPrompt(systemPrompt), + ...addCacheControlToMessages(messages), + ] + : [ + { + role: 'system', + content: systemPrompt, + } as CoreMessage, + ...messages, + ]; const generateTextProps = { model: config.model, diff --git a/packages/agent/src/core/types.ts b/packages/agent/src/core/types.ts index 1479ff7..4d60cfb 100644 --- a/packages/agent/src/core/types.ts +++ b/packages/agent/src/core/types.ts @@ -18,6 +18,7 @@ export type ToolContext = { tokenTracker: TokenTracker; githubMode: boolean; customPrompt?: string; + tokenCache?: boolean; }; export type Tool, TReturn = any> = { diff --git a/packages/cli/README.md b/packages/cli/README.md index e87b280..3f90e4f 100644 --- a/packages/cli/README.md +++ b/packages/cli/README.md @@ -125,6 +125,7 @@ mycoder --modelProvider openai --modelName gpt-4o-2024-05-13 "Your prompt here" - `pageFilter`: Method to process webpage content: 'simple', 'none', or 'readability' (default: `none`) - `ollamaBaseUrl`: Base URL for Ollama API (default: `http://localhost:11434/api`) - `customPrompt`: Custom instructions to append to the system prompt for both main agent and sub-agents (default: `""`) +- `tokenCache`: Enable token caching for LLM API calls (default: `true`) Example: @@ -143,6 +144,9 @@ mycoder config set ollamaBaseUrl http://your-ollama-server:11434/api # Set custom instructions for the agent mycoder config set customPrompt "Always prioritize readability and simplicity in your code. Prefer TypeScript over JavaScript when possible." + +# Disable token caching for LLM API calls +mycoder config set tokenCache false ``` ## Environment Variables diff --git a/packages/cli/src/commands/$default.ts b/packages/cli/src/commands/$default.ts index 0dff70b..e381fca 100644 --- a/packages/cli/src/commands/$default.ts +++ b/packages/cli/src/commands/$default.ts @@ -92,6 +92,10 @@ export const command: CommandModule = { try { // Get configuration for model provider and name const userConfig = getConfig(); + // Use command line option if provided, otherwise use config value + tokenTracker.tokenCache = + argv.tokenCache !== undefined ? argv.tokenCache : userConfig.tokenCache; + const userModelProvider = argv.modelProvider || userConfig.modelProvider; const userModelName = argv.modelName || userConfig.modelName; @@ -177,6 +181,8 @@ export const command: CommandModule = { tokenTracker, githubMode: config.githubMode, customPrompt: config.customPrompt, + tokenCache: + argv.tokenCache !== undefined ? argv.tokenCache : config.tokenCache, }); const output = diff --git a/packages/cli/src/options.ts b/packages/cli/src/options.ts index d03f5b3..b61b58f 100644 --- a/packages/cli/src/options.ts +++ b/packages/cli/src/options.ts @@ -10,6 +10,7 @@ export type SharedOptions = { readonly modelProvider?: string; readonly modelName?: string; readonly profile?: boolean; + readonly tokenCache?: boolean; }; export const sharedOptions = { @@ -72,4 +73,8 @@ export const sharedOptions = { description: 'Custom Sentry DSN for error tracking', hidden: true, } as const, + tokenCache: { + type: 'boolean', + description: 'Enable token caching for LLM API calls', + } as const, }; diff --git a/packages/cli/src/settings/config.ts b/packages/cli/src/settings/config.ts index 796e037..a4d4b1f 100644 --- a/packages/cli/src/settings/config.ts +++ b/packages/cli/src/settings/config.ts @@ -17,6 +17,7 @@ const defaultConfig = { ollamaBaseUrl: 'http://localhost:11434/api', customPrompt: '', profile: false, + tokenCache: true, }; export type Config = typeof defaultConfig; diff --git a/packages/cli/tests/settings/config.test.ts b/packages/cli/tests/settings/config.test.ts index 9af4894..e2f2ae2 100644 --- a/packages/cli/tests/settings/config.test.ts +++ b/packages/cli/tests/settings/config.test.ts @@ -46,6 +46,7 @@ describe('Config', () => { ollamaBaseUrl: 'http://localhost:11434/api', profile: false, customPrompt: '', + tokenCache: true, }); expect(fs.existsSync).toHaveBeenCalledWith(mockConfigFile); }); @@ -80,6 +81,7 @@ describe('Config', () => { ollamaBaseUrl: 'http://localhost:11434/api', profile: false, customPrompt: '', + tokenCache: true, }); }); });