diff --git a/README.md b/README.md index 7281773..f9801c3 100644 --- a/README.md +++ b/README.md @@ -108,11 +108,25 @@ sudo mv ./tmuxai /usr/local/bin/ After installing TmuxAI, you need to configure your API key to start using it: -1. **Set the API Key** - TmuxAI uses the OpenRouter endpoint by default. Set your API key by adding the following to your shell configuration (e.g., `~/.bashrc`, `~/.zshrc`): +1. **Set the API Key** + TmuxAI supports multiple AI providers. Choose one of the following: + **OpenAI:** ```bash - export TMUXAI_OPENROUTER_API_KEY="your-api-key-here" + export TMUXAI_OPENAI_API_KEY="your-openai-api-key-here" + export TMUXAI_OPENAI_MODEL="gpt-5-codex" + ``` + + **OpenRouter (Default):** + ```bash + export TMUXAI_OPENROUTER_API_KEY="your-openrouter-api-key-here" + ``` + + **Azure OpenAI:** + ```bash + export TMUXAI_AZURE_OPENAI_API_KEY="your-azure-api-key-here" + export TMUXAI_AZURE_OPENAI_API_BASE="https://your-resource.openai.azure.com/" + export TMUXAI_AZURE_OPENAI_DEPLOYMENT_NAME="your-deployment" ``` 2. **Start TmuxAI** @@ -121,6 +135,8 @@ After installing TmuxAI, you need to configure your API key to start using it: tmuxai ``` + **Provider Priority:** If multiple API keys are configured, TmuxAI will use them in this order: OpenAI → Azure OpenAI → OpenRouter. + ## TmuxAI Layout ![Panes](https://tmuxai.dev/shots/panes.png?lastmode=1) @@ -331,8 +347,18 @@ All configuration options can also be set via environment variables, which take # Examples export TMUXAI_DEBUG=true export TMUXAI_MAX_CAPTURE_LINES=300 -export TMUXAI_OPENROUTER_API_KEY="your-api-key-here" + +# OpenAI (Responses API) +export TMUXAI_OPENAI_API_KEY="your-openai-api-key-here" +export TMUXAI_OPENAI_MODEL="gpt-5-codex" +export TMUXAI_OPENAI_BASE_URL="https://api.openai.com/v1" # Optional + +# OpenRouter (Chat Completions API) +export TMUXAI_OPENROUTER_API_KEY="your-openrouter-api-key-here" export TMUXAI_OPENROUTER_MODEL="..." +export TMUXAI_OPENROUTER_BASE_URL="https://openrouter.ai/api/v1" # Optional + +# Azure OpenAI (Chat Completions API) export TMUXAI_AZURE_OPENAI_API_KEY="your-azure-api-key" export TMUXAI_AZURE_OPENAI_API_BASE="https://your-resource.openai.azure.com/" export TMUXAI_AZURE_OPENAI_API_VERSION="2025-04-01-preview" @@ -343,8 +369,12 @@ You can also use environment variables directly within your configuration file v ```yaml # Example config.yaml with environment variable expansion -openrouter: +openai: api_key: "${OPENAI_API_KEY}" + model: "gpt-5" + +openrouter: + api_key: "${OPENROUTER_API_KEY}" base_url: https://api.openai.com/v1 ``` @@ -358,6 +388,9 @@ TmuxAI » /config # Override a configuration value for this session TmuxAI » /config set max_capture_lines 300 +TmuxAI » /config set openai.model gpt-5-codex +TmuxAI » /config set openai.api_key "your-api-key" +TmuxAI » /config set openai.base_url "https://api.openai.com/v1" TmuxAI » /config set openrouter.model gpt-4o-mini ``` @@ -365,9 +398,22 @@ These changes will persist only for the current session and won't modify your co ### Using Other AI Providers -OpenRouter is OpenAI API-compatible, so you can direct TmuxAI at OpenAI or any other OpenAI API-compatible endpoint by customizing the `base_url`. +TmuxAI supports multiple AI providers with different API formats: + +#### OpenAI (Responses API) + +For the best experience with new OpenAI models like GPT-5, use the dedicated OpenAI configuration: + +```yaml +openai: + api_key: "your-openai-api-key" + model: "gpt-5-codex" + base_url: "https://api.openai.com/v1" # Optional, defaults to this +``` + +#### OpenRouter (Chat Completions API) -For OpenAI: +OpenRouter is OpenAI API-compatible, so you can direct TmuxAI at OpenAI or any other OpenAI API-compatible endpoint by customizing the `base_url`: ```yaml openrouter: @@ -403,7 +449,9 @@ openrouter: base_url: http://localhost:11434/v1 ``` -For Azure OpenAI: +#### Azure OpenAI (Chat Completions API) + +For Azure OpenAI deployments: ```yaml azure_openai: @@ -413,6 +461,16 @@ azure_openai: deployment_name: "gpt-4o" ``` +#### Provider Selection + +TmuxAI automatically selects the AI provider based on configuration priority: + +1. **OpenAI** (Responses API) - If `openai.api_key` is configured +2. **Azure OpenAI** (Chat Completions) - If `azure_openai.api_key` is configured +3. **OpenRouter** (Chat Completions) - Default fallback + +You can override the provider at runtime by setting the appropriate API key and model configuration. + _Prompts are currently tuned for Gemini 2.5 by default; behavior with other models may vary._ ## Contributing diff --git a/config.example.yaml b/config.example.yaml index 35db5c6..91d9568 100644 --- a/config.example.yaml +++ b/config.example.yaml @@ -6,7 +6,13 @@ send_keys_confirm: true # Confirm before executing send keys paste_multiline_confirm: true # Confirm before pasting multiline content exec_confirm: true # Confirm before executing commands -# Not only OpenRouter, you can use any OpenAI compatible API +# OpenAI Responses API (recommended for new models like GPT-5) +# openai: +# api_key: sk-XXXXXXXXX +# model: gpt-5-codex +# base_url: https://api.openai.com/v1 # Optional, defaults to this + +# With openRouter, you can use any chat completion api openrouter: api_key: sk-or-v1-XXXXXXXXX model: google/gemini-2.5-flash-preview # default model @@ -19,16 +25,10 @@ openrouter: # api_version: 2025-04-01-preview # deployment_name: gpt-4o -# OpenAI example -# openrouter: -# api_key: sk-XXXXXXXXX -# model: o4-mini-2025-04-16 -# base_url: https://api.openai.com/v1 - # Anthropic example # openrouter: # api_key: sk-proj-XXX -# model: +# model: claude-4.5-sonnet # base_url: https://api.anthropic.com/v1 # Local Ollama example diff --git a/config/config.go b/config/config.go index 0de1cdc..1076887 100644 --- a/config/config.go +++ b/config/config.go @@ -22,6 +22,7 @@ type Config struct { WhitelistPatterns []string `mapstructure:"whitelist_patterns"` BlacklistPatterns []string `mapstructure:"blacklist_patterns"` OpenRouter OpenRouterConfig `mapstructure:"openrouter"` + OpenAI OpenAIConfig `mapstructure:"openai"` AzureOpenAI AzureOpenAIConfig `mapstructure:"azure_openai"` Prompts PromptsConfig `mapstructure:"prompts"` } @@ -33,6 +34,13 @@ type OpenRouterConfig struct { BaseURL string `mapstructure:"base_url"` } +// OpenAIConfig holds OpenAI API configuration +type OpenAIConfig struct { + APIKey string `mapstructure:"api_key"` + Model string `mapstructure:"model"` + BaseURL string `mapstructure:"base_url"` +} + // AzureOpenAIConfig holds Azure OpenAI API configuration type AzureOpenAIConfig struct { APIKey string `mapstructure:"api_key"` @@ -65,6 +73,9 @@ func DefaultConfig() *Config { BaseURL: "https://openrouter.ai/api/v1", Model: "google/gemini-2.5-flash-preview", }, + OpenAI: OpenAIConfig{ + BaseURL: "https://api.openai.com/v1", + }, AzureOpenAI: AzureOpenAIConfig{}, Prompts: PromptsConfig{ BaseSystem: ``, diff --git a/internal/ai_client.go b/internal/ai_client.go index 885f3cd..5557803 100644 --- a/internal/ai_client.go +++ b/internal/ai_client.go @@ -47,6 +47,67 @@ type ChatCompletionResponse struct { Choices []ChatCompletionChoice `json:"choices"` } +// Responses API Types + +// ResponseInput represents the input for the Responses API +type ResponseInput interface{} + +// ResponseContent represents content in the Responses API +type ResponseContent struct { + Type string `json:"type"` + Text string `json:"text,omitempty"` + Annotations []interface{} `json:"annotations,omitempty"` +} + +// ResponseOutputItem represents an output item in the Responses API +type ResponseOutputItem struct { + ID string `json:"id"` + Type string `json:"type"` // "message", "reasoning", "function_call", etc. + Status string `json:"status,omitempty"` // "completed", "in_progress", etc. + Content []ResponseContent `json:"content,omitempty"` + Role string `json:"role,omitempty"` // "assistant", "user", etc. + Summary []interface{} `json:"summary,omitempty"` +} + +// ResponseRequest represents a request to the Responses API +type ResponseRequest struct { + Model string `json:"model"` + Input ResponseInput `json:"input"` + Instructions string `json:"instructions,omitempty"` + Tools []interface{} `json:"tools,omitempty"` + PreviousResponseID string `json:"previous_response_id,omitempty"` + Store bool `json:"store,omitempty"` + Include []string `json:"include,omitempty"` + Text map[string]interface{} `json:"text,omitempty"` // for structured outputs +} + +// Response represents a response from the Responses API +type Response struct { + ID string `json:"id"` + Object string `json:"object"` + CreatedAt int64 `json:"created_at"` + Model string `json:"model"` + Output []ResponseOutputItem `json:"output"` + OutputText string `json:"output_text,omitempty"` + Error *ResponseError `json:"error,omitempty"` + Usage *ResponseUsage `json:"usage,omitempty"` +} + +// ResponseError represents an error in the Responses API +type ResponseError struct { + Message string `json:"message"` + Type string `json:"type"` + Code string `json:"code,omitempty"` +} + +// ResponseUsage represents token usage in the Responses API +type ResponseUsage struct { + InputTokens int `json:"input_tokens"` + OutputTokens int `json:"output_tokens"` + ReasoningTokens int `json:"reasoning_tokens,omitempty"` + TotalTokens int `json:"total_tokens"` +} + func NewAiClient(cfg *config.Config) *AiClient { return &AiClient{ config: cfg, @@ -54,6 +115,22 @@ func NewAiClient(cfg *config.Config) *AiClient { } } +// determineAPIType determines which API to use based on the model and configuration +func (c *AiClient) determineAPIType(model string) string { + // If OpenAI API key is configured, use Responses API + if c.config.OpenAI.APIKey != "" { + return "responses" + } + + // If Azure OpenAI is configured, use Azure Chat Completions + if c.config.AzureOpenAI.APIKey != "" { + return "azure" + } + + // Default to OpenRouter Chat Completions + return "openrouter" +} + // GetResponseFromChatMessages gets a response from the AI based on chat messages func (c *AiClient) GetResponseFromChatMessages(ctx context.Context, chatMessages []ChatMessage, model string) (string, error) { // Convert chat messages to AI client format @@ -76,10 +153,27 @@ func (c *AiClient) GetResponseFromChatMessages(ctx context.Context, chatMessages }) } - logger.Info("Sending %d messages to AI", len(aiMessages)) + logger.Info("Sending %d messages to AI using model: %s", len(aiMessages), model) + + // Determine which API to use + apiType := c.determineAPIType(model) + logger.Debug("Using API type: %s for model: %s", apiType, model) + + // Route to appropriate API + var response string + var err error + + switch apiType { + case "responses": + response, err = c.Response(ctx, aiMessages, model) + case "azure": + response, err = c.ChatCompletion(ctx, aiMessages, model) + case "openrouter": + response, err = c.ChatCompletion(ctx, aiMessages, model) + default: + return "", fmt.Errorf("unknown API type: %s", apiType) + } - // Get response from AI - response, err := c.ChatCompletion(ctx, aiMessages, model) if err != nil { return "", err } @@ -187,6 +281,128 @@ func (c *AiClient) ChatCompletion(ctx context.Context, messages []Message, model return "", fmt.Errorf("no completion choices returned (model: %s, status: %d)", model, resp.StatusCode) } +// Response sends a request to the OpenAI Responses API +func (c *AiClient) Response(ctx context.Context, messages []Message, model string) (string, error) { + // Convert messages to Responses API format + var input ResponseInput + var instructions string + + if len(messages) == 0 { + return "", fmt.Errorf("no messages provided") + } + + // Check if first message is a system message + if messages[0].Role == "system" { + instructions = messages[0].Content + if len(messages) > 1 { + input = messages[1:] + } else { + // Only system message provided, no user input + return "", fmt.Errorf("only system message provided, no user message to process") + } + } else { + input = messages + } + + reqBody := ResponseRequest{ + Model: model, + Input: input, + Instructions: instructions, + Store: false, // Default to stateless for better control over API usage and costs + } + + // Use OpenAI configuration + baseURL := strings.TrimSuffix(c.config.OpenAI.BaseURL, "/") + if baseURL == "" { + baseURL = "https://api.openai.com/v1" + } + url := baseURL + "/responses" + + reqJSON, err := json.Marshal(reqBody) + if err != nil { + logger.Error("Failed to marshal Responses API request: %v", err) + return "", fmt.Errorf("failed to marshal request: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(reqJSON)) + if err != nil { + logger.Error("Failed to create Responses API request: %v", err) + return "", fmt.Errorf("failed to create request: %w", err) + } + + // Set headers + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", "Bearer "+c.config.OpenAI.APIKey) + + req.Header.Set("HTTP-Referer", "https://github.com/alvinunreal/tmuxai") + req.Header.Set("X-Title", "TmuxAI") + + // Log the request details for debugging before sending + logger.Debug("Sending Responses API request to: %s with model: %s", url, model) + + // Send the request + resp, err := c.client.Do(req) + if err != nil { + if ctx.Err() == context.Canceled { + return "", fmt.Errorf("request canceled: %w", ctx.Err()) + } + logger.Error("Failed to send Responses API request: %v", err) + return "", fmt.Errorf("failed to send request: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + // Read the response + body, err := io.ReadAll(resp.Body) + if err != nil { + logger.Error("Failed to read Responses API response: %v", err) + return "", fmt.Errorf("failed to read response: %w", err) + } + + // Log the raw response for debugging + logger.Debug("Responses API response status: %d, response size: %d bytes", resp.StatusCode, len(body)) + + // Check for errors + if resp.StatusCode != http.StatusOK { + logger.Error("Responses API returned error: %s", body) + return "", fmt.Errorf("API returned error: %s", body) + } + + // Parse the response + var response Response + if err := json.Unmarshal(body, &response); err != nil { + logger.Error("Failed to unmarshal Responses API response: %v, body: %s", err, body) + return "", fmt.Errorf("failed to unmarshal response: %w", err) + } + + // Check for API errors in response body + if response.Error != nil { + logger.Error("Responses API returned error: %s", response.Error.Message) + return "", fmt.Errorf("API error: %s", response.Error.Message) + } + + // Return the response content + if response.OutputText != "" { + logger.Debug("Received Responses API response (%d characters): %s", len(response.OutputText), response.OutputText) + return response.OutputText, nil + } + + // If no output_text, extract from message items + for _, item := range response.Output { + if item.Type == "message" && item.Status == "completed" { + for _, content := range item.Content { + if (content.Type == "output_text" || content.Type == "text") && content.Text != "" { + logger.Debug("Received Responses API response from output items (%d characters): %s", len(content.Text), content.Text) + return content.Text, nil + } + } + } + } + + // Enhanced error for no response content + logger.Error("No response content returned. Raw response: %s", string(body)) + return "", fmt.Errorf("no response content returned (model: %s, status: %d)", model, resp.StatusCode) +} + func debugChatMessages(chatMessages []ChatMessage, response string) { timestamp := time.Now().Format("20060102-150405") diff --git a/internal/ai_client_test.go b/internal/ai_client_test.go index 2b25e8c..3f4ec8e 100644 --- a/internal/ai_client_test.go +++ b/internal/ai_client_test.go @@ -27,6 +27,7 @@ func TestAzureOpenAIEndpoint(t *testing.T) { cfg := &config.Config{ OpenRouter: config.OpenRouterConfig{}, + OpenAI: config.OpenAIConfig{}, AzureOpenAI: config.AzureOpenAIConfig{ APIKey: "test-key", APIBase: server.URL, @@ -45,3 +46,201 @@ func TestAzureOpenAIEndpoint(t *testing.T) { t.Errorf("unexpected response: %s", resp) } } + +func TestOpenAIResponsesEndpoint(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/responses" { + t.Errorf("unexpected path: %s", r.URL.Path) + } + if r.Header.Get("Authorization") != "Bearer test-key" { + t.Errorf("missing Authorization header: %s", r.Header.Get("Authorization")) + } + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(`{"output_text":"ok from responses api","id":"test-id","object":"response","created_at":1234567890}`)) + })) + defer server.Close() + + cfg := &config.Config{ + OpenRouter: config.OpenRouterConfig{}, + OpenAI: config.OpenAIConfig{ + APIKey: "test-key", + Model: "gpt-5", + BaseURL: server.URL, + }, + AzureOpenAI: config.AzureOpenAIConfig{}, + } + + client := NewAiClient(cfg) + msg := []Message{{Role: "user", Content: "hi"}} + resp, err := client.Response(context.Background(), msg, "gpt-5") + if err != nil { + t.Fatalf("Response error: %v", err) + } + if resp != "ok from responses api" { + t.Errorf("unexpected response: %s", resp) + } +} + +func TestOpenAIResponsesEndpointWithSystemMessage(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/responses" { + t.Errorf("unexpected path: %s", r.URL.Path) + } + if r.Header.Get("Authorization") != "Bearer test-key" { + t.Errorf("missing Authorization header") + } + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(`{"output":[{"type":"message","status":"completed","content":[{"type":"text","text":"ok with system instruction"}]}],"output_text":"ok with system instruction"}`)) + })) + defer server.Close() + + cfg := &config.Config{ + OpenRouter: config.OpenRouterConfig{}, + OpenAI: config.OpenAIConfig{ + APIKey: "test-key", + Model: "gpt-5-codex", + BaseURL: server.URL, + }, + AzureOpenAI: config.AzureOpenAIConfig{}, + } + + client := NewAiClient(cfg) + msg := []Message{ + {Role: "system", Content: "You are a helpful assistant"}, + {Role: "user", Content: "hi"}, + } + resp, err := client.Response(context.Background(), msg, "gpt-5-codex") + if err != nil { + t.Fatalf("Response error: %v", err) + } + if resp != "ok with system instruction" { + t.Errorf("unexpected response: %s", resp) + } +} + +func TestDetermineAPIType(t *testing.T) { + cfg := &config.Config{ + OpenRouter: config.OpenRouterConfig{ + APIKey: "openrouter-key", + Model: "openrouter-model", + }, + OpenAI: config.OpenAIConfig{ + APIKey: "openai-key", + Model: "gpt-5-codex", + }, + AzureOpenAI: config.AzureOpenAIConfig{}, + } + + client := NewAiClient(cfg) + + // Test OpenAI API type (highest priority) - should work with any model when OpenAI key is present + apiType := client.determineAPIType("gpt-5-codex") + if apiType != "responses" { + t.Errorf("expected 'responses', got %s", apiType) + } + + // Test that OpenAI is selected regardless of model when API key is present + apiType = client.determineAPIType("any-model") + if apiType != "responses" { + t.Errorf("expected 'responses' for any model when OpenAI key is present, got %s", apiType) + } + + // Test Azure API type + cfg.OpenAI.APIKey = "" + cfg.AzureOpenAI.APIKey = "azure-key" + client = NewAiClient(cfg) + apiType = client.determineAPIType("any-model") + if apiType != "azure" { + t.Errorf("expected 'azure', got %s", apiType) + } + + // Test OpenRouter API type (default) + cfg.AzureOpenAI.APIKey = "" + client = NewAiClient(cfg) + apiType = client.determineAPIType("openrouter-model") + if apiType != "openrouter" { + t.Errorf("expected 'openrouter', got %s", apiType) + } +} + +func TestSessionOverrides(t *testing.T) { + cfg := &config.Config{ + OpenRouter: config.OpenRouterConfig{ + APIKey: "original-openrouter-key", + Model: "original-openrouter-model", + }, + OpenAI: config.OpenAIConfig{ + APIKey: "original-openai-key", + Model: "original-openai-model", + }, + AzureOpenAI: config.AzureOpenAIConfig{ + APIKey: "original-azure-key", + DeploymentName: "original-deployment", + }, + } + + manager := &Manager{ + Config: cfg, + SessionOverrides: make(map[string]interface{}), + } + + // Test that original values are returned without overrides + if manager.GetOpenAIAPIKey() != "original-openai-key" { + t.Errorf("expected original OpenAI API key, got %s", manager.GetOpenAIAPIKey()) + } + if manager.GetOpenAIModel() != "original-openai-model" { + t.Errorf("expected original OpenAI model, got %s", manager.GetOpenAIModel()) + } + + // Test session overrides for OpenAI + manager.SessionOverrides["openai.api_key"] = "override-openai-key" + manager.SessionOverrides["openai.model"] = "override-openai-model" + manager.SessionOverrides["openai.base_url"] = "https://override.example.com" + + if manager.GetOpenAIAPIKey() != "override-openai-key" { + t.Errorf("expected overridden OpenAI API key, got %s", manager.GetOpenAIAPIKey()) + } + if manager.GetOpenAIModel() != "override-openai-model" { + t.Errorf("expected overridden OpenAI model, got %s", manager.GetOpenAIModel()) + } + if manager.GetOpenAIBaseURL() != "https://override.example.com" { + t.Errorf("expected overridden OpenAI base URL, got %s", manager.GetOpenAIBaseURL()) + } + + // Test session overrides for Azure + manager.SessionOverrides["azure_openai.api_key"] = "override-azure-key" + manager.SessionOverrides["azure_openai.deployment_name"] = "override-deployment" + + if manager.GetAzureOpenAIAPIKey() != "override-azure-key" { + t.Errorf("expected overridden Azure API key, got %s", manager.GetAzureOpenAIAPIKey()) + } + if manager.GetAzureOpenAIDeploymentName() != "override-deployment" { + t.Errorf("expected overridden Azure deployment, got %s", manager.GetAzureOpenAIDeploymentName()) + } + + // Test that GetModel() respects session overrides + // With OpenAI override + if manager.GetModel() != "override-openai-model" { + t.Errorf("expected overridden OpenAI model from GetModel(), got %s", manager.GetModel()) + } + + // Test clearing OpenAI config entirely to fall back to Azure + originalOpenAIKey := manager.Config.OpenAI.APIKey + manager.Config.OpenAI.APIKey = "" // Clear original OpenAI API key + delete(manager.SessionOverrides, "openai.api_key") + if manager.GetModel() != "override-deployment" { + t.Errorf("expected overridden Azure deployment from GetModel(), got %s", manager.GetModel()) + } + + // Clear Azure config entirely to fall back to OpenRouter + originalAzureKey := manager.Config.AzureOpenAI.APIKey + manager.Config.AzureOpenAI.APIKey = "" // Clear original Azure API key + delete(manager.SessionOverrides, "azure_openai.api_key") + if manager.GetModel() != "original-openrouter-model" { + t.Errorf("expected original OpenRouter model from GetModel(), got %s", manager.GetModel()) + } + + // Restore original config for other tests + manager.Config.OpenAI.APIKey = originalOpenAIKey + manager.Config.AzureOpenAI.APIKey = originalAzureKey +} diff --git a/internal/config_helpers.go b/internal/config_helpers.go index f0aa5d2..ba2876b 100644 --- a/internal/config_helpers.go +++ b/internal/config_helpers.go @@ -15,6 +15,13 @@ var AllowedConfigKeys = []string{ "paste_multiline_confirm", "exec_confirm", "openrouter.model", + "openai.api_key", + "openai.model", + "openai.base_url", + "azure_openai.api_key", + "azure_openai.deployment_name", + "azure_openai.api_base", + "azure_openai.api_version", } // GetMaxCaptureLines returns the max capture lines value with session override if present @@ -83,6 +90,83 @@ func (m *Manager) GetOpenRouterModel() string { return m.Config.OpenRouter.Model } +// GetOpenAIModel returns the OpenAI model value with session override if present +func (m *Manager) GetOpenAIModel() string { + if override, exists := m.SessionOverrides["openai.model"]; exists { + if val, ok := override.(string); ok { + return val + } + } + return m.Config.OpenAI.Model +} + +// GetOpenAIAPIKey returns the OpenAI API key value with session override if present +func (m *Manager) GetOpenAIAPIKey() string { + if override, exists := m.SessionOverrides["openai.api_key"]; exists { + if val, ok := override.(string); ok { + return val + } + } + return m.Config.OpenAI.APIKey +} + +// GetOpenAIBaseURL returns the OpenAI base URL value with session override if present +func (m *Manager) GetOpenAIBaseURL() string { + if override, exists := m.SessionOverrides["openai.base_url"]; exists { + if val, ok := override.(string); ok { + return val + } + } + return m.Config.OpenAI.BaseURL +} + +// GetAzureOpenAIAPIKey returns the Azure OpenAI API key value with session override if present +func (m *Manager) GetAzureOpenAIAPIKey() string { + if override, exists := m.SessionOverrides["azure_openai.api_key"]; exists { + if val, ok := override.(string); ok { + return val + } + } + return m.Config.AzureOpenAI.APIKey +} + +// GetAzureOpenAIDeploymentName returns the Azure OpenAI deployment name value with session override if present +func (m *Manager) GetAzureOpenAIDeploymentName() string { + if override, exists := m.SessionOverrides["azure_openai.deployment_name"]; exists { + if val, ok := override.(string); ok { + return val + } + } + return m.Config.AzureOpenAI.DeploymentName +} + +// GetModel returns the appropriate model based on configuration priority +// Priority: OpenAI > Azure > OpenRouter +func (m *Manager) GetModel() string { + // If OpenAI is configured, use OpenAI model + if m.GetOpenAIAPIKey() != "" { + model := m.GetOpenAIModel() + if model != "" { + return model + } + // Default model for OpenAI if not specified + return "gpt-5-codex" + } + + // If Azure is configured, use Azure deployment name + if m.GetAzureOpenAIAPIKey() != "" { + deployment := m.GetAzureOpenAIDeploymentName() + if deployment != "" { + return deployment + } + // Default deployment for Azure if not specified + return "gpt-4o" + } + + // Default to OpenRouter + return m.GetOpenRouterModel() +} + // FormatConfig returns a nicely formatted string of all config values with session overrides applied func (m *Manager) FormatConfig() string { var result strings.Builder diff --git a/internal/manager.go b/internal/manager.go index 2ebc27b..9041d36 100644 --- a/internal/manager.go +++ b/internal/manager.go @@ -50,8 +50,8 @@ type Manager struct { // NewManager creates a new manager agent func NewManager(cfg *config.Config) (*Manager, error) { - if cfg.OpenRouter.APIKey == "" && cfg.AzureOpenAI.APIKey == "" { - fmt.Println("An API key is required. Set OpenRouter or Azure OpenAI credentials in the config file or environment variables.") + if cfg.OpenRouter.APIKey == "" && cfg.AzureOpenAI.APIKey == "" && cfg.OpenAI.APIKey == "" { + fmt.Println("An API key is required. Set OpenAI, OpenRouter, or Azure OpenAI credentials in the config file or environment variables.") return nil, fmt.Errorf("API key required") } diff --git a/internal/process_message.go b/internal/process_message.go index 02d499b..cbc1cb2 100644 --- a/internal/process_message.go +++ b/internal/process_message.go @@ -54,7 +54,7 @@ func (m *Manager) ProcessUserMessage(ctx context.Context, message string) bool { sending := append(history, currentMessage) - response, err := m.AiClient.GetResponseFromChatMessages(ctx, sending, m.GetOpenRouterModel()) + response, err := m.AiClient.GetResponseFromChatMessages(ctx, sending, m.GetModel()) if err != nil { s.Stop() m.Status = ""