diff --git a/lib/active_agent/generation_provider/anthropic_provider.rb b/lib/active_agent/generation_provider/anthropic_provider.rb index dfc9ff6d..9610f724 100644 --- a/lib/active_agent/generation_provider/anthropic_provider.rb +++ b/lib/active_agent/generation_provider/anthropic_provider.rb @@ -36,9 +36,12 @@ def generate(prompt) end def chat_prompt(parameters: prompt_parameters) - parameters[:stream] = provider_stream if prompt.options[:stream] || config["stream"] + if prompt.options[:stream] || config["stream"] + parameters[:stream] = provider_stream + @streaming_request_params = parameters + end - chat_response(@client.messages(parameters: parameters)) + chat_response(@client.messages(parameters: parameters), parameters) end protected @@ -120,7 +123,7 @@ def convert_role(role) end end - def chat_response(response) + def chat_response(response, request_params = nil) return @response if prompt.options[:stream] content = response["content"].first["text"] @@ -137,7 +140,8 @@ def chat_response(response) @response = ActiveAgent::GenerationProvider::Response.new( prompt: prompt, message: message, - raw_response: response + raw_response: response, + raw_request: request_params ) end diff --git a/lib/active_agent/generation_provider/open_ai_provider.rb b/lib/active_agent/generation_provider/open_ai_provider.rb index ac36f1bb..76a38d99 100644 --- a/lib/active_agent/generation_provider/open_ai_provider.rb +++ b/lib/active_agent/generation_provider/open_ai_provider.rb @@ -69,7 +69,12 @@ def process_stream_chunk(chunk, message, agent_stream) elsif chunk.dig("choices", 0, "delta", "tool_calls") && chunk.dig("choices", 0, "delta", "role") message = handle_message(chunk.dig("choices", 0, "delta")) prompt.messages << message - @response = ActiveAgent::GenerationProvider::Response.new(prompt:, message:) + @response = ActiveAgent::GenerationProvider::Response.new( + prompt:, + message:, + raw_response: chunk, + raw_request: @streaming_request_params + ) end if chunk.dig("choices", 0, "finish_reason") @@ -92,7 +97,7 @@ def format_image_content(message) # The format_tools method comes from ToolManagement module # The provider_messages method comes from MessageFormatting module - def chat_response(response) + def chat_response(response, request_params = nil) return @response if prompt.options[:stream] message_json = response.dig("choices", 0, "message") message_json["id"] = response.dig("id") if message_json["id"].blank? @@ -100,10 +105,15 @@ def chat_response(response) update_context(prompt: prompt, message: message, response: response) - @response = ActiveAgent::GenerationProvider::Response.new(prompt: prompt, message: message, raw_response: response) + @response = ActiveAgent::GenerationProvider::Response.new( + prompt: prompt, + message: message, + raw_response: response, + raw_request: request_params + ) end - def responses_response(response) + def responses_response(response, request_params = nil) message_json = response["output"].find { |output_item| output_item["type"] == "message" } message_json["id"] = response.dig("id") if message_json["id"].blank? @@ -116,7 +126,12 @@ def responses_response(response) content_type: prompt.output_schema.present? ? "application/json" : "text/plain", ) - @response = ActiveAgent::GenerationProvider::Response.new(prompt: prompt, message: message, raw_response: response) + @response = ActiveAgent::GenerationProvider::Response.new( + prompt: prompt, + message: message, + raw_response: response, + raw_request: request_params + ) end def handle_message(message_json) @@ -133,13 +148,16 @@ def handle_message(message_json) # handle_actions is now provided by ToolManagement module def chat_prompt(parameters: prompt_parameters) - parameters[:stream] = provider_stream if prompt.options[:stream] || config["stream"] - chat_response(@client.chat(parameters: parameters)) + if prompt.options[:stream] || config["stream"] + parameters[:stream] = provider_stream + @streaming_request_params = parameters + end + chat_response(@client.chat(parameters: parameters), parameters) end def responses_prompt(parameters: responses_parameters) # parameters[:stream] = provider_stream if prompt.options[:stream] || config["stream"] - responses_response(@client.responses.create(parameters: parameters)) + responses_response(@client.responses.create(parameters: parameters), parameters) end def responses_parameters(model: @prompt.options[:model] || @model_name, messages: @prompt.messages, temperature: @prompt.options[:temperature] || @config["temperature"] || 0.7, tools: @prompt.actions, structured_output: @prompt.output_schema) @@ -158,14 +176,20 @@ def embeddings_parameters(input: prompt.message.content, model: "text-embedding- } end - def embeddings_response(response) + def embeddings_response(response, request_params = nil) message = ActiveAgent::ActionPrompt::Message.new(content: response.dig("data", 0, "embedding"), role: "assistant") - @response = ActiveAgent::GenerationProvider::Response.new(prompt: prompt, message: message, raw_response: response) + @response = ActiveAgent::GenerationProvider::Response.new( + prompt: prompt, + message: message, + raw_response: response, + raw_request: request_params + ) end def embeddings_prompt(parameters:) - embeddings_response(@client.embeddings(parameters: embeddings_parameters)) + params = embeddings_parameters + embeddings_response(@client.embeddings(parameters: params), params) end end end diff --git a/lib/active_agent/generation_provider/open_router_provider.rb b/lib/active_agent/generation_provider/open_router_provider.rb index fae73d73..8ecd8419 100644 --- a/lib/active_agent/generation_provider/open_router_provider.rb +++ b/lib/active_agent/generation_provider/open_router_provider.rb @@ -38,7 +38,7 @@ def initialize(config) uri_base: "https://openrouter.ai/api/v1", access_token: @access_token, log_errors: Rails.env.development?, - default_headers: openrouter_headers + extra_headers: openrouter_headers ) end diff --git a/lib/active_agent/generation_provider/response.rb b/lib/active_agent/generation_provider/response.rb index 2cec82fb..fa0dff16 100644 --- a/lib/active_agent/generation_provider/response.rb +++ b/lib/active_agent/generation_provider/response.rb @@ -3,13 +3,14 @@ module ActiveAgent module GenerationProvider class Response - attr_reader :message, :prompt, :raw_response + attr_reader :message, :prompt, :raw_response, :raw_request attr_accessor :metadata - def initialize(prompt:, message: nil, raw_response: nil, metadata: nil) + def initialize(prompt:, message: nil, raw_response: nil, raw_request: nil, metadata: nil) @prompt = prompt @message = message || prompt.message @raw_response = raw_response + @raw_request = sanitize_request(raw_request) @metadata = metadata || {} end @@ -17,14 +18,9 @@ def initialize(prompt:, message: nil, raw_response: nil, metadata: nil) def usage return nil unless @raw_response - # OpenAI/OpenRouter format + # Most providers store usage in the same format if @raw_response.is_a?(Hash) && @raw_response["usage"] @raw_response["usage"] - # Anthropic format - elsif @raw_response.is_a?(Hash) && @raw_response["usage"] - @raw_response["usage"] - else - nil end end @@ -40,6 +36,40 @@ def completion_tokens def total_tokens usage&.dig("total_tokens") end + + private + + def sanitize_request(request) + return nil if request.nil? + return request unless request.is_a?(Hash) + + # Deep clone the request to avoid modifying the original + sanitized = request.deep_dup + + # Sanitize any string values in the request + sanitize_hash_values(sanitized) + end + + def sanitize_hash_values(hash) + hash.each do |key, value| + case value + when String + # Use ActiveAgent's sanitize_credentials to replace sensitive data + hash[key] = ActiveAgent.sanitize_credentials(value) + when Hash + sanitize_hash_values(value) + when Array + value.each_with_index do |item, index| + if item.is_a?(String) + value[index] = ActiveAgent.sanitize_credentials(item) + elsif item.is_a?(Hash) + sanitize_hash_values(item) + end + end + end + end + hash + end end end end diff --git a/test/dummy/config/credentials.yml.enc b/test/dummy/config/credentials.yml.enc index 280f5940..924c149c 100644 --- a/test/dummy/config/credentials.yml.enc +++ b/test/dummy/config/credentials.yml.enc @@ -1 +1 @@ -7RXYXaZ+9Xqohw1oMp++FYX/bKiNI9tBusC5oBNAPdj12WYZmBn+d4GZk1whE0gqwJjscVDk0dJgIXt3sZlqDHIe7pd8n4EQsnZ1mPXk2R5QVyEPuiIfISBhx1skCgqXI0ga9HBGalGoUxQLtEO2rbCYea7YCfOnwFqLsZ9ZD8ciiL7hLY3jHafhRo7CuRYcBpzOlaZLlB574nLphtxgsL0xxAi8t7bdueLpDegAxSHmpZNzWmMkNcC7W9UmCVlyieP1jCAhFkuS5JMG3WpPYo1Ft6PsvYf8rcFJEhr8s/L75B6MFZFW/45YdHRWBron9CJTNxMIdfzY3E30Bb2zoL3juv0BRbiXC3PkkB1HT+cyxRPR8XYASVHLOLH0enFE+839OFI5edOFDtMQsPoO8BSbcnji45Xc3ISORDInOBEWtJ88vSDU2S9ufY79szgWydVBYZuOW5g8ayFGG4gWHusnivRLPfjigkL8/44tb7Fmuh2vdewNma+4c1Yvj7xFMO8K4cba0xsysdPYUaCxh1Ys6tk9NZ4Y7A1+QwRFxSSHXJtuP5JsTs5vvZxc5BEPXOxv81dwmAObmSPZBGodP2XqfRb8ludCFUW/v7g24ZZx1SVTzfk7LW6bhF6+oQMkUckeXeoJrTEZBlDUuOXbrtmd+CudZukXAz8JIIhJx2ZRFNkI/yl88Usmo3vIV1topRzm0kKTdXXg0q2/VO+uOY0rF6ZW/eCYyttrcl2KSEFLTtUneKsVJczizY4L+wqe5b+tpOUHzv/EuVbsbiXZU77S/bcbWGLbb6juJgtMSUkBzAYeY5jPwdLQ2l5Vn5yCd8e4FmHn/l3cIXjrtyvjxulUnm/PkZRb0YggC8795CL/DDpftCAZqmCKV34ypn+LMhqHh1fKYuWbnuBvX0PaIXb42u2fbCrFUvAa1KRCK6OKp7j6j7H1i9eZofpQZEhOGBYOOZaZV17LVcCOO4WuskNR6UxM/BnmJ3oTMRw+wEi+KEFVKvYWDTQeepnk6p6GjVCjbr3w2VDXtpjWb3vLWh/QMwHGpPzNltmroZZDAsEsxXXZQ+3nsp4tQyeLO8w8xE1vu1lKgcFxHaQ4IX/bXSaUTOJ0OovUEL7JjY63Sr9+f9KLTWbbgQ/QZrBwoGfwB364ylOt1Vfojqz6ATasdcgQXe38L548mmEDBrJ1idfj0mqNznzYpzB7/waLK7SG92pEF6b67uRxXG73PZfCVrRGXHKwu2g27J1+bUT26Ojpa0qzHEIEEUabufJSxNKKEAXqgv/DCkXyq18YrZx+NA==--85+CXyjvfbFJi6BM--CHQakeLJ4Z0qHE/8QWnxVQ== \ No newline at end of file +j1oFzRL/Z5P7MYpUZlH1tiicXPeonq+0LknaUFGHhsrIGUkk7WXGSQN89AeAehInTbXvEA7C7QZsn0xbj7xdYRAb7BVoJk1MsREM7TkfWmXnvQN66jomSsu/rO9StAoVg1PvfUANDl1ubwrtfuNRydLKCR4+vb1ZZKYneFXXBc5mDHvEPKtEnMypMeUC4LanqY2iqPTtPFLBF/3M20ofctm0s5yRcfVMQO+9KwKvZ/nkNne1LLNSbOmM3243vw+1CqO4Kw54qDF1idMamdU3tqoS64CCtcVmUid7LuYdeK2rYOk2wASNgTaascIBdsl6ZdTXq2ODj4b7WJrrRaA1JYPccwQYB7dFaKT0pkI6vvBjA1LZuFxl+VTWSpjmZCKkw0rv99ckfbV1hdkRAOHB5o3g4ceksOuFDpgJaACptioie9IB96FTwxEIaigYt2GkVFDlqt8F9N+z98cg2FW3xxWR9373hBY7i+huU2i9f8fSxvmMrmqsBJ2B64UmDYSLfYEaOevfZ1EuEznJFHAomum2zmxT5yOzNrZFlstoC793KdpFSnQugBzpa1ULWuKrl5eybdiD97L/5U0nsFEltoyr+8gDxaZpIxgrOxTyGOm8khfaPQJlia0hNvuzM5VvglpiIOY+BsFhe5xBenP0Q068g65fbXiUqVk1mXz8yW8GyGKQs3MAQj9h6td9z1f3Bn77BpRFh4pElkn07jmF8wyLQLKSrJxq37gcDmu03wwFLQ32dBOAQbbikIoNcZE0eVneZARzohdzffZzk/60g3fUNrzi0d/UXQZt0PKuBrGeQfWxkllQUEH7UqTYCzRM7PD57weSAmD/8m3N6sFOFIwsdQ/17PwTi04gdPW9YDZbVXrtw4dmw+pRPqs8bORHu+seKSIuYpBZoZuq9go0QMqKW4lVNU8eI5V3u8MJ5K9i92cQZg5Y5zpWiNpgQQhTFAq66Wv1yzqV4hGQV+OGGdQt1KHSWEsmScdUPk33jRfy8POw/iMt2NvFLcCgdsoVnm1zDz9CnKnVzJQ6opGPoIg/ncIu2ErcICGrikeV8JFgBxXpb1K2tOxkyaV8t6DELJrkYcXQ1hHy2c2AfzwvIyYB+wRZuVL7m0EngbgGDVdykgzD+XR5aGLlwc2fXZb47GkF6rrsrNXJlc7FCIkpPAkVmIy3RoSGNri0DwwKHTY3Kf57N2oTP95Ae7nPPVs99WMSy4eL2Wooy9gxM0RfxaCYirakM2OmLYFZ6LMkmXWCaeZcoiPbdQojjDhrY3PRyQx2OuwevxQzazaAFwadqZoWMSkg2LaSz/8eMa5IwM5lfoorF9ZTO3iTXyDAcHCXc+IzKg9DJ6oMWnF22VSTUQu38XswCeUquy9jkN8hSFPwE34Qhx3CesKzmIuEm0m8w+oZWigDnU2/iGf/xwv3zsR9hafoedDuv8oh6FA017VWTel/v7HZHYgcLZMB7V1Kj3ZBdn0O7n6eVd+fpFGAhClK/N1vIsgCcmVwJYuqKcWYUapQ0CQN4U031oF4KknwHcWHTkVrtzLlfh1RKXaZ8prVr/QX/fKlCrm6V5Lvfml1PA==--AgUCVbFPGIA1BP05--YI0wFyd8L6DcXLi7cDU6Cg== \ No newline at end of file diff --git a/test/generation_provider/anthropic_raw_request_test.rb b/test/generation_provider/anthropic_raw_request_test.rb new file mode 100644 index 00000000..57a2ea7a --- /dev/null +++ b/test/generation_provider/anthropic_raw_request_test.rb @@ -0,0 +1,155 @@ +require "test_helper" +require "active_agent/generation_provider/anthropic_provider" +require "active_agent/action_prompt/prompt" + +module ActiveAgent + module GenerationProvider + class AnthropicRawRequestTest < ActiveSupport::TestCase + setup do + @config = { + "api_key" => "test-key", + "model" => "claude-3-opus-20240229" + } + @provider = AnthropicProvider.new(@config) + + @prompt = ActiveAgent::ActionPrompt::Prompt.new( + messages: [ + ActiveAgent::ActionPrompt::Message.new( + content: "Hello, Claude!", + role: "user" + ) + ], + actions: [], + options: {}, + output_schema: nil + ) + end + + test "chat_response includes raw_request when provided" do + mock_response = { + "id" => "msg-123", + "content" => [ + { + "type" => "text", + "text" => "Hello! I'm Claude. How can I assist you today?" + } + ], + "stop_reason" => "end_turn", + "usage" => { + "input_tokens" => 10, + "output_tokens" => 12 + } + } + + request_params = { + model: "claude-3-opus-20240229", + messages: [ { role: "user", content: "Hello, Claude!" } ], + max_tokens: 1024, + temperature: 0.7 + } + + @provider.instance_variable_set(:@prompt, @prompt) + response = @provider.send(:chat_response, mock_response, request_params) + + assert_not_nil response + assert_equal request_params, response.raw_request + assert_equal mock_response, response.raw_response + end + + test "chat_response with tool use includes raw_request" do + mock_response = { + "id" => "msg-456", + "content" => [ + { + "type" => "text", + "text" => "I'll help you with that calculation." + }, + { + "type" => "tool_use", + "id" => "tool-789", + "name" => "calculator", + "input" => { "expression" => "2 + 2" } + } + ], + "stop_reason" => "tool_use" + } + + request_params = { + model: "claude-3-opus-20240229", + messages: [ { role: "user", content: "What is 2 + 2?" } ], + tools: [ + { + name: "calculator", + description: "Performs calculations", + input_schema: { + type: "object", + properties: { + expression: { type: "string" } + } + } + } + ], + max_tokens: 1024 + } + + @provider.instance_variable_set(:@prompt, @prompt) + response = @provider.send(:chat_response, mock_response, request_params) + + assert_not_nil response + assert_equal request_params, response.raw_request + assert_equal mock_response, response.raw_response + assert response.message.action_requested + end + + test "streaming request params are captured" do + request_params = { + model: "claude-3-opus-20240229", + messages: [ { role: "user", content: "Stream test" } ], + stream: true, + max_tokens: 1024 + } + + @provider.instance_variable_set(:@prompt, @prompt) + + # Simulate setting streaming params like in chat_prompt + @provider.instance_variable_set(:@streaming_request_params, request_params) + + assert_equal request_params, @provider.instance_variable_get(:@streaming_request_params) + end + + test "response includes metadata alongside raw_request and raw_response" do + mock_response = { + "id" => "msg-meta-123", + "content" => [ + { + "type" => "text", + "text" => "Response with metadata" + } + ], + "stop_reason" => "end_turn", + "model" => "claude-3-opus-20240229", + "usage" => { + "input_tokens" => 5, + "output_tokens" => 4 + } + } + + request_params = { + model: "claude-3-opus-20240229", + messages: [ { role: "user", content: "Test" } ], + max_tokens: 100 + } + + @provider.instance_variable_set(:@prompt, @prompt) + response = @provider.send(:chat_response, mock_response, request_params) + + assert_not_nil response + assert_equal request_params, response.raw_request + assert_equal mock_response, response.raw_response + + # Response should also have metadata + assert_instance_of Hash, response.metadata + end + end + end +end diff --git a/test/generation_provider/open_router_provider_test.rb b/test/generation_provider/open_router_provider_test.rb index 78782e4d..ce18f944 100644 --- a/test/generation_provider/open_router_provider_test.rb +++ b/test/generation_provider/open_router_provider_test.rb @@ -71,6 +71,79 @@ class OpenRouterProviderTest < ActiveSupport::TestCase assert_not_nil client # The client should be configured with OpenRouter base URL assert_equal "https://openrouter.ai/api/v1", client.instance_variable_get(:@uri_base) + + # Verify extra headers are set + extra_headers = client.instance_variable_get(:@extra_headers) + assert_not_nil extra_headers + assert_equal "TestApp", extra_headers["X-Title"] + assert_equal "https://test.app", extra_headers["HTTP-Referer"] + end + + test "uses default app name when not configured" do + config = @base_config.dup + config.delete("app_name") + + provider = OpenRouterProvider.new(config) + client = provider.instance_variable_get(:@client) + extra_headers = client.instance_variable_get(:@extra_headers) + + # Should use Rails app name or "ActiveAgent" as default + assert_not_nil extra_headers["X-Title"] + assert_includes [ "ActiveAgent", "Dummy" ], extra_headers["X-Title"] + end + + test "uses default site URL from Rails config when not provided" do + config = @base_config.dup + config.delete("site_url") + + provider = OpenRouterProvider.new(config) + client = provider.instance_variable_get(:@client) + extra_headers = client.instance_variable_get(:@extra_headers) + + # Should either be localhost or no HTTP-Referer header + referer = extra_headers["HTTP-Referer"] + assert(referer.nil? || referer.include?("localhost") || referer.include?("example.com")) + end + + test "headers are present when both app_name and site_url are configured" do + provider = OpenRouterProvider.new(@base_config) + headers = provider.send(:openrouter_headers) + + assert_equal "TestApp", headers["X-Title"] + assert_equal "https://test.app", headers["HTTP-Referer"] + end + + test "headers handle nil app_name gracefully" do + config = @base_config.merge("app_name" => nil) + provider = OpenRouterProvider.new(config) + headers = provider.send(:openrouter_headers) + + # Should still have a header, using default + assert_not_nil headers["X-Title"] + end + + test "headers handle nil site_url gracefully" do + config = @base_config.merge("site_url" => nil) + provider = OpenRouterProvider.new(config) + headers = provider.send(:openrouter_headers) + + # HTTP-Referer might be nil or use default + # The key should exist but value might be nil + assert headers.key?("HTTP-Referer") + end + + test "headers are passed to OpenAI client on initialization" do + provider = OpenRouterProvider.new(@base_config) + client = provider.instance_variable_get(:@client) + + # The OpenAI::Client should receive the extra_headers + assert_not_nil client + extra_headers = client.instance_variable_get(:@extra_headers) + + assert_equal({ + "X-Title" => "TestApp", + "HTTP-Referer" => "https://test.app" + }, extra_headers) end test "builds OpenRouter-specific parameters with fallbacks" do diff --git a/test/generation_provider/openai_raw_request_test.rb b/test/generation_provider/openai_raw_request_test.rb new file mode 100644 index 00000000..b1bee239 --- /dev/null +++ b/test/generation_provider/openai_raw_request_test.rb @@ -0,0 +1,147 @@ +require "test_helper" +require "active_agent/generation_provider/open_ai_provider" +require "active_agent/action_prompt/prompt" + +module ActiveAgent + module GenerationProvider + class OpenAIRawRequestTest < ActiveSupport::TestCase + setup do + @config = { + "api_key" => "test-key", + "model" => "gpt-4" + } + @provider = OpenAIProvider.new(@config) + + @prompt = ActiveAgent::ActionPrompt::Prompt.new( + messages: [ + ActiveAgent::ActionPrompt::Message.new( + content: "Hello, world!", + role: "user" + ) + ], + actions: [], + options: {}, + output_schema: nil + ) + end + + test "chat_response includes raw_request when provided" do + mock_response = { + "id" => "chatcmpl-123", + "choices" => [ + { + "message" => { + "role" => "assistant", + "content" => "Hello! How can I help you today?" + } + } + ] + } + + request_params = { + model: "gpt-4", + messages: [ { role: "user", content: "Hello, world!" } ], + temperature: 0.7 + } + + @provider.instance_variable_set(:@prompt, @prompt) + response = @provider.send(:chat_response, mock_response, request_params) + + assert_not_nil response + # Note: raw_request should be sanitized, but since our test key isn't in the + # sanitizers list, it should remain unchanged in this test + assert_equal request_params, response.raw_request + assert_equal mock_response, response.raw_response + end + + test "chat_response sanitizes API keys in raw_request" do + # Setup sanitizers with our test key + original_config = ActiveAgent.config + test_config = { "openai" => { "api_key" => "test-key" } } + ActiveAgent.instance_variable_set(:@config, test_config) + ActiveAgent.sanitizers_reset! + + mock_response = { + "id" => "chatcmpl-456", + "choices" => [ + { + "message" => { + "role" => "assistant", + "content" => "Response" + } + } + ] + } + + request_params = { + model: "gpt-4", + api_key: "test-key", + headers: { "Authorization" => "Bearer test-key" }, + messages: [ { role: "user", content: "Message with key: test-key" } ] + } + + @provider.instance_variable_set(:@prompt, @prompt) + response = @provider.send(:chat_response, mock_response, request_params) + + # API key should be sanitized in raw_request + assert_equal "", response.raw_request[:api_key] + assert_equal "Bearer ", response.raw_request[:headers]["Authorization"] + assert_equal "Message with key: ", + response.raw_request[:messages][0][:content] + + # Restore original config + ActiveAgent.instance_variable_set(:@config, original_config) + ActiveAgent.sanitizers_reset! + end + + test "embeddings_response includes raw_request when provided" do + mock_response = { + "data" => [ + { + "embedding" => [ 0.1, 0.2, 0.3 ] + } + ] + } + + request_params = { + model: "text-embedding-ada-002", + input: "Hello, world!" + } + + @provider.instance_variable_set(:@prompt, @prompt) + response = @provider.send(:embeddings_response, mock_response, request_params) + + assert_not_nil response + assert_equal request_params, response.raw_request + assert_equal mock_response, response.raw_response + end + + test "responses_response includes raw_request when provided" do + mock_response = { + "id" => "resp-123", + "output" => [ + { + "type" => "message", + "id" => "msg-123", + "content" => [ { "text" => "Hello response" } ], + "role" => "assistant", + "finish_reason" => "stop" + } + ] + } + + request_params = { + model: "gpt-4", + input: { messages: [ { role: "user", content: "Hello" } ] } + } + + @provider.instance_variable_set(:@prompt, @prompt) + response = @provider.send(:responses_response, mock_response, request_params) + + assert_not_nil response + assert_equal request_params, response.raw_request + assert_equal mock_response, response.raw_response + end + end + end +end diff --git a/test/generation_provider/openrouter_raw_request_test.rb b/test/generation_provider/openrouter_raw_request_test.rb new file mode 100644 index 00000000..10e66756 --- /dev/null +++ b/test/generation_provider/openrouter_raw_request_test.rb @@ -0,0 +1,99 @@ +require "test_helper" +require "active_agent/generation_provider/open_router_provider" +require "active_agent/action_prompt/prompt" + +module ActiveAgent + module GenerationProvider + class OpenRouterRawRequestTest < ActiveSupport::TestCase + setup do + @config = { + "api_key" => "test-key", + "model" => "openai/gpt-4", + "app_name" => "TestApp", + "site_url" => "https://test.app" + } + @provider = OpenRouterProvider.new(@config) + + @prompt = ActiveAgent::ActionPrompt::Prompt.new( + messages: [ + ActiveAgent::ActionPrompt::Message.new( + content: "Hello, OpenRouter!", + role: "user" + ) + ], + actions: [], + options: {}, + output_schema: nil + ) + end + + test "inherits raw_request handling from OpenAI provider" do + mock_response = { + "id" => "gen-123", + "choices" => [ + { + "message" => { + "role" => "assistant", + "content" => "Hello from OpenRouter!" + } + } + ], + "model" => "openai/gpt-4" + } + + request_params = { + model: "openai/gpt-4", + messages: [ { role: "user", content: "Hello, OpenRouter!" } ], + temperature: 0.7, + provider: { + data_collection: "allow", + allow_fallbacks: true + } + } + + @provider.instance_variable_set(:@prompt, @prompt) + response = @provider.send(:chat_response, mock_response, request_params) + + assert_not_nil response + assert_equal request_params, response.raw_request + assert_equal mock_response, response.raw_response + assert_instance_of ActiveAgent::GenerationProvider::Response, response + end + + test "raw_request includes OpenRouter-specific parameters" do + mock_response = { + "id" => "gen-456", + "choices" => [ + { + "message" => { + "role" => "assistant", + "content" => "Response with fallback" + } + } + ], + "model" => "anthropic/claude-3-opus" + } + + request_params = { + model: "openai/gpt-4", + models: [ "openai/gpt-4", "anthropic/claude-3-opus" ], + route: "fallback", + messages: [ { role: "user", content: "Test with fallbacks" } ], + provider: { + order: [ "OpenAI", "Anthropic" ], + data_collection: "deny", + allow_fallbacks: true + } + } + + @provider.instance_variable_set(:@prompt, @prompt) + response = @provider.send(:chat_response, mock_response, request_params) + + assert_not_nil response.raw_request + assert_equal [ "openai/gpt-4", "anthropic/claude-3-opus" ], response.raw_request[:models] + assert_equal "fallback", response.raw_request[:route] + assert_equal "deny", response.raw_request[:provider][:data_collection] + end + end + end +end diff --git a/test/generation_provider/response_sanitization_test.rb b/test/generation_provider/response_sanitization_test.rb new file mode 100644 index 00000000..fe1dbdd4 --- /dev/null +++ b/test/generation_provider/response_sanitization_test.rb @@ -0,0 +1,179 @@ +require "test_helper" +require "active_agent/generation_provider/response" +require "active_agent/action_prompt/prompt" +require "active_agent/action_prompt/message" + +module ActiveAgent + module GenerationProvider + class ResponseSanitizationTest < ActiveSupport::TestCase + setup do + @prompt = ActiveAgent::ActionPrompt::Prompt.new( + messages: [], + actions: [], + options: {}, + output_schema: nil + ) + + @message = ActiveAgent::ActionPrompt::Message.new( + content: "Test response", + role: "assistant" + ) + + # Mock the ActiveAgent config to set up sanitizers + @original_config = ActiveAgent.config + test_config = { + "openai" => { "api_key" => "sk-test123secret" }, + "anthropic" => { "access_token" => "ant-test456token" } + } + ActiveAgent.instance_variable_set(:@config, test_config) + ActiveAgent.sanitizers_reset! + end + + teardown do + ActiveAgent.instance_variable_set(:@config, @original_config) + ActiveAgent.sanitizers_reset! + end + + test "sanitizes API keys in raw_request" do + raw_request = { + model: "gpt-4", + messages: [ { role: "user", content: "Hello" } ], + api_key: "sk-test123secret", + headers: { + "Authorization" => "Bearer sk-test123secret" + } + } + + response = Response.new( + prompt: @prompt, + message: @message, + raw_request: raw_request + ) + + # The API key should be replaced with a placeholder + assert_equal "", response.raw_request[:api_key] + assert_equal "Bearer ", response.raw_request[:headers]["Authorization"] + + # Other fields should remain unchanged + assert_equal "gpt-4", response.raw_request[:model] + assert_equal [ { role: "user", content: "Hello" } ], response.raw_request[:messages] + end + + test "sanitizes access tokens in nested structures" do + raw_request = { + model: "claude-3", + config: { + auth: { + token: "ant-test456token", + type: "bearer" + } + }, + headers: { + "X-API-Key" => "ant-test456token" + } + } + + response = Response.new( + prompt: @prompt, + message: @message, + raw_request: raw_request + ) + + # The access token should be replaced in nested hashes + assert_equal "", response.raw_request[:config][:auth][:token] + assert_equal "", response.raw_request[:headers]["X-API-Key"] + assert_equal "bearer", response.raw_request[:config][:auth][:type] + end + + test "sanitizes credentials in arrays" do + raw_request = { + model: "gpt-4", + messages: [ + { role: "system", content: "You have API key: sk-test123secret" }, + { role: "user", content: "What's my token ant-test456token?" } + ], + tools: [ + { name: "api_call", api_key: "sk-test123secret" } + ] + } + + response = Response.new( + prompt: @prompt, + message: @message, + raw_request: raw_request + ) + + # Credentials should be sanitized in array elements + assert_equal "You have API key: ", + response.raw_request[:messages][0][:content] + assert_equal "What's my token ?", + response.raw_request[:messages][1][:content] + assert_equal "", response.raw_request[:tools][0][:api_key] + end + + test "handles nil raw_request gracefully" do + response = Response.new( + prompt: @prompt, + message: @message, + raw_request: nil + ) + + assert_nil response.raw_request + end + + test "handles non-hash raw_request gracefully" do + # If for some reason raw_request is not a hash (unlikely but defensive) + response = Response.new( + prompt: @prompt, + message: @message, + raw_request: "string_request" + ) + + assert_equal "string_request", response.raw_request + end + + test "does not modify original request object" do + original_request = { + model: "gpt-4", + api_key: "sk-test123secret", + messages: [ { role: "user", content: "Hello with key sk-test123secret" } ] + } + + # Keep a copy of the original to verify it wasn't modified + original_copy = original_request.deep_dup + + response = Response.new( + prompt: @prompt, + message: @message, + raw_request: original_request + ) + + # Original should remain unchanged + assert_equal original_copy, original_request + assert_equal "sk-test123secret", original_request[:api_key] + + # But response.raw_request should be sanitized + assert_equal "", response.raw_request[:api_key] + end + + test "sanitizes multiple different credentials" do + raw_request = { + openai_key: "sk-test123secret", + anthropic_key: "ant-test456token", + combined: "Keys: sk-test123secret and ant-test456token" + } + + response = Response.new( + prompt: @prompt, + message: @message, + raw_request: raw_request + ) + + assert_equal "", response.raw_request[:openai_key] + assert_equal "", response.raw_request[:anthropic_key] + assert_equal "Keys: and ", + response.raw_request[:combined] + end + end + end +end diff --git a/test/generation_provider/response_test.rb b/test/generation_provider/response_test.rb new file mode 100644 index 00000000..3d3b285a --- /dev/null +++ b/test/generation_provider/response_test.rb @@ -0,0 +1,121 @@ +require "test_helper" +require "active_agent/generation_provider/response" +require "active_agent/action_prompt/prompt" +require "active_agent/action_prompt/message" + +module ActiveAgent + module GenerationProvider + class ResponseTest < ActiveSupport::TestCase + setup do + @prompt = ActiveAgent::ActionPrompt::Prompt.new( + messages: [], + actions: [], + options: {}, + output_schema: nil + ) + + @message = ActiveAgent::ActionPrompt::Message.new( + content: "Test response", + role: "assistant" + ) + end + + test "initializes with raw_request parameter" do + raw_request = { + model: "gpt-4", + messages: [ { role: "user", content: "Hello" } ], + temperature: 0.7 + } + + response = Response.new( + prompt: @prompt, + message: @message, + raw_request: raw_request + ) + + assert_equal raw_request, response.raw_request + assert_equal @prompt, response.prompt + assert_equal @message, response.message + end + + test "raw_request is optional and defaults to nil" do + response = Response.new( + prompt: @prompt, + message: @message + ) + + assert_nil response.raw_request + end + + test "stores both raw_request and raw_response" do + raw_request = { + model: "gpt-4", + messages: [ { role: "user", content: "Hello" } ] + } + + raw_response = { + "id" => "chatcmpl-123", + "choices" => [ + { + "message" => { + "role" => "assistant", + "content" => "Hello! How can I help you?" + } + } + ], + "usage" => { + "prompt_tokens" => 10, + "completion_tokens" => 8, + "total_tokens" => 18 + } + } + + response = Response.new( + prompt: @prompt, + message: @message, + raw_request: raw_request, + raw_response: raw_response + ) + + assert_equal raw_request, response.raw_request + assert_equal raw_response, response.raw_response + end + + test "usage helper methods work with raw_response" do + raw_response = { + "usage" => { + "prompt_tokens" => 100, + "completion_tokens" => 50, + "total_tokens" => 150 + } + } + + response = Response.new( + prompt: @prompt, + message: @message, + raw_response: raw_response + ) + + assert_equal 100, response.prompt_tokens + assert_equal 50, response.completion_tokens + assert_equal 150, response.total_tokens + assert_equal raw_response["usage"], response.usage + end + + test "metadata can be set and accessed" do + response = Response.new( + prompt: @prompt, + message: @message, + metadata: { provider: "OpenAI", model_used: "gpt-4" } + ) + + assert_equal "OpenAI", response.metadata[:provider] + assert_equal "gpt-4", response.metadata[:model_used] + + # Metadata is mutable + response.metadata[:trace_id] = "trace-123" + assert_equal "trace-123", response.metadata[:trace_id] + end + end + end +end