Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
49 changes: 45 additions & 4 deletions lib/ruby_llm/provider.rb
Original file line number Diff line number Diff line change
Expand Up @@ -94,20 +94,61 @@ def remote?
self.class.remote?
end

# Parses error response from provider API.
#
# Supports two error formats:
# 1. OpenAI standard: {"error": {"message": "...", "type": "...", "code": "..."}}
# 2. Simple format: {"error": "error message"}
#
# @param response [Faraday::Response] The HTTP response
# @return [String, nil] The error message or nil if parsing fails
#
# @example OpenAI format
# response = double(body: '{"error": {"message": "Rate limit exceeded"}}')
# parse_error(response) #=> "Rate limit exceeded"
#
# @example Simple format (LM Studio, some local providers)
# response = double(body: '{"error": "Token limit exceeded"}')
# parse_error(response) #=> "Token limit exceeded"
def parse_error(response)
return if response.body.empty?

body = try_parse_json(response.body)
case body
when Hash
body.dig('error', 'message')
# Handle both formats:
# - {"error": "message"} (LM Studio, some providers)
# - {"error": {"message": "..."}} (OpenAI standard)
error_value = body['error']
return nil unless error_value

case error_value
when Hash
error_value['message']
when String
error_value
else
error_value.to_s if error_value
end
when Array
body.map do |part|
part.dig('error', 'message')
body.filter_map do |part|
next unless part.is_a?(Hash)

error_value = part['error']
next unless error_value

case error_value
when Hash then error_value['message']
when String then error_value
else error_value.to_s if error_value
end
end.join('. ')
else
body
body.to_s
end
rescue StandardError => e
RubyLLM.logger.debug "Error parsing response: #{e.message}"
nil
end

def format_messages(messages)
Expand Down
219 changes: 219 additions & 0 deletions spec/ruby_llm/provider_spec.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,219 @@
# frozen_string_literal: true

require 'spec_helper'

RSpec.describe RubyLLM::Provider do
# Create a concrete test provider since Provider is abstract
let(:test_provider_class) do
Class.new(described_class) do
def api_base
'http://test.example.com'
end

class << self
def capabilities
[]
end
end
end
end

let(:config) { RubyLLM::Configuration.new }
let(:provider) { test_provider_class.new(config) }

describe '#parse_error' do
context 'with OpenAI-style error format (nested Hash)' do
it 'extracts message from nested hash' do
response = double(body: '{"error": {"message": "Rate limit exceeded"}}')
expect(provider.parse_error(response)).to eq('Rate limit exceeded')
end

it 'handles error with additional fields' do
response = double(
body: '{"error": {"message": "Invalid request", "type": "invalid_request_error", "code": "invalid_api_key"}}'
)
expect(provider.parse_error(response)).to eq('Invalid request')
end
end

context 'with LM Studio-style error format (String)' do
it 'extracts message from string' do
response = double(body: '{"error": "Token limit exceeded"}')
expect(provider.parse_error(response)).to eq('Token limit exceeded')
end

it 'handles context length error' do
response = double(
body: '{"error": "The number of tokens to keep from the initial prompt is greater than the context length"}'
)
expect(provider.parse_error(response)).to eq(
'The number of tokens to keep from the initial prompt is greater than the context length'
)
end

it 'handles model not loaded error' do
response = double(body: '{"error": "No model loaded"}')
expect(provider.parse_error(response)).to eq('No model loaded')
end
end

context 'with empty or nil error value' do
it 'returns nil when error field is null' do
response = double(body: '{"error": null}')
expect(provider.parse_error(response)).to be_nil
end

it 'returns nil when error field is missing' do
response = double(body: '{"status": "error"}')
expect(provider.parse_error(response)).to be_nil
end

it 'returns nil when body is empty' do
response = double(body: '')
expect(provider.parse_error(response)).to be_nil
end
end

context 'with non-standard error types' do
it 'converts integer error to string' do
response = double(body: '{"error": 500}')
expect(provider.parse_error(response)).to eq('500')
end

it 'converts symbol-like error to string' do
# When JSON contains numeric or boolean values
response = double(body: '{"error": true}')
expect(provider.parse_error(response)).to eq('true')
end

it 'handles direct string body (not JSON)' do
response = double(body: 'Internal Server Error')
expect(provider.parse_error(response)).to eq('Internal Server Error')
end
end

context 'with array of errors' do
it 'handles array of OpenAI-style errors' do
response = double(
body: '[{"error": {"message": "Error 1"}}, {"error": {"message": "Error 2"}}]'
)
expect(provider.parse_error(response)).to eq('Error 1. Error 2')
end

it 'handles array of simple string errors' do
response = double(
body: '[{"error": "Error 1"}, {"error": "Error 2"}]'
)
expect(provider.parse_error(response)).to eq('Error 1. Error 2')
end

it 'handles mixed format errors in array' do
response = double(
body: '[{"error": {"message": "Hash error"}}, {"error": "String error"}]'
)
expect(provider.parse_error(response)).to eq('Hash error. String error')
end

it 'skips non-hash items in array' do
response = double(
body: '[{"error": "Valid"}, "invalid", {"error": "Also valid"}]'
)
expect(provider.parse_error(response)).to eq('Valid. Also valid')
end

it 'skips items without error field in array' do
response = double(
body: '[{"error": "Valid"}, {"status": "no error field"}, {"error": "Also valid"}]'
)
expect(provider.parse_error(response)).to eq('Valid. Also valid')
end

it 'returns empty string when array has no valid errors' do
response = double(body: '[{"status": "ok"}, {"result": "success"}]')
expect(provider.parse_error(response)).to eq('')
end
end

context 'with malformed JSON' do
it 'returns the raw string when JSON is invalid' do
response = double(body: '{invalid json}')
expect(provider.parse_error(response)).to eq('{invalid json}')
end

it 'handles partial JSON gracefully' do
response = double(body: '{"error": "incomplete"')
expect(provider.parse_error(response)).to eq('{"error": "incomplete"')
end
end

context 'when parsing raises exception' do
it 'logs error and returns nil' do
# Stub logger to verify debug call
allow(RubyLLM.logger).to receive(:debug)

# Create a response that will cause an exception during parsing
response = double(body: '{"error": {"message": "test"}}')
allow(response).to receive(:body).and_raise(StandardError, 'Unexpected error')

result = provider.parse_error(response)

expect(result).to be_nil
expect(RubyLLM.logger).to have_received(:debug).with(/Error parsing response/)
end
end

context 'edge cases and security' do
it 'does not execute code in error strings' do
# Ensure error message is treated as data, not code
response = double(body: '{"error": "`rm -rf /`"}')
expect(provider.parse_error(response)).to eq('`rm -rf /`')
end

it 'handles very long error messages' do
long_message = 'Error: ' + ('x' * 10_000)
response = double(body: "{\"error\": \"#{long_message}\"}")
expect(provider.parse_error(response)).to eq(long_message)
end

it 'handles unicode in error messages' do
response = double(body: '{"error": "错误:无效的请求"}')
expect(provider.parse_error(response)).to eq('错误:无效的请求')
end

it 'handles special characters in error messages' do
response = double(body: '{"error": "Error: \"quotes\", \'apostrophes\', & symbols"}')
expect(provider.parse_error(response)).to eq("Error: \"quotes\", 'apostrophes', & symbols")
end
end

context 'real-world provider error formats' do
it 'handles typical OpenAI rate limit error' do
response = double(
body: '{"error": {"message": "Rate limit reached for requests", "type": "tokens", "param": null, "code": "rate_limit_exceeded"}}'
)
expect(provider.parse_error(response)).to eq('Rate limit reached for requests')
end

it 'handles typical Anthropic error' do
response = double(
body: '{"error": {"type": "invalid_request_error", "message": "Invalid API key"}}'
)
expect(provider.parse_error(response)).to eq('Invalid API key')
end

it 'handles typical LM Studio context length error' do
response = double(
body: '{"error": "The number of tokens to keep from the initial prompt is greater than the context length"}'
)
expect(provider.parse_error(response)).to eq(
'The number of tokens to keep from the initial prompt is greater than the context length'
)
end

it 'handles Ollama-style error (typically Hash but can vary)' do
response = double(body: '{"error": "model not found"}')
expect(provider.parse_error(response)).to eq('model not found')
end
end
end
end