Skip to content
This repository was archived by the owner on Jul 22, 2025. It is now read-only.

FIX: support spaces within arguments for Open AI #499

Merged
merged 2 commits into from
Feb 29, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 5 additions & 4 deletions lib/completions/endpoints/open_ai.rb
Original file line number Diff line number Diff line change
Expand Up @@ -136,14 +136,12 @@ def prepare_request(payload)

def extract_completion_from(response_raw)
parsed = JSON.parse(response_raw, symbolize_names: true).dig(:choices, 0)

# half a line sent here
return if !parsed

response_h = @streaming_mode ? parsed.dig(:delta) : parsed.dig(:message)

@has_function_call ||= response_h.dig(:tool_calls).present?

@has_function_call ? response_h.dig(:tool_calls, 0) : response_h.dig(:content)
end

Expand Down Expand Up @@ -172,8 +170,11 @@ def add_to_buffer(function_buffer, _response_data, partial)
function_buffer.at("tool_name").content = f_name if f_name
function_buffer.at("tool_id").content = partial[:id] if partial[:id]

if partial.dig(:function, :arguments).present?
@args_buffer << partial.dig(:function, :arguments)
args = partial.dig(:function, :arguments)

# allow for SPACE within arguments
if args && args != ""
@args_buffer << args

begin
json_args = JSON.parse(@args_buffer, symbolize_names: true)
Expand Down
88 changes: 79 additions & 9 deletions spec/lib/completions/endpoints/open_ai_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,13 @@ def stream_line(delta, finish_reason: nil, tool_call: false)
}.to_json
end

def stub_raw(chunks)
WebMock.stub_request(:post, "https://api.openai.com/v1/chat/completions").to_return(
status: 200,
body: chunks,
)
end

def stub_streamed_response(prompt, deltas, tool_call: false)
chunks =
deltas.each_with_index.map do |_, index|
Expand All @@ -69,6 +76,8 @@ def stub_streamed_response(prompt, deltas, tool_call: false)
.stub_request(:post, "https://api.openai.com/v1/chat/completions")
.with(body: request_body(prompt, stream: true, tool_call: tool_call))
.to_return(status: 200, body: chunks)

yield if block_given?
end

def tool_deltas
Expand Down Expand Up @@ -168,14 +177,16 @@ def request_body(prompt, stream: false, tool_call: false)
end

it "will automatically recover from a bad payload" do
called = false

# this should not happen, but lets ensure nothing bad happens
# the row with test1 is invalid json
raw_data = <<~TEXT.strip
d|a|t|a|:| |{|"choices":[{"delta":{"content":"test,"}}]}

data: {"choices":[{"delta":{"content":"test1,"}}]
data: {"choices":[{"delta":{"content":"test|1| |,"}}]

data: {"choices":[{"delta":|{"content":"test2,"}}]}
data: {"choices":[{"delta":|{"content":"test2 ,"}}]}

data: {"choices":[{"delta":{"content":"test3,"}}]|}

Expand All @@ -187,23 +198,82 @@ def request_body(prompt, stream: false, tool_call: false)
chunks = raw_data.split("|")

open_ai_mock.with_chunk_array_support do
open_ai_mock.stub_streamed_response(compliance.dialect.translate, chunks) do
partials = []
open_ai_mock.stub_raw(chunks)

endpoint.perform_completion!(compliance.dialect, user) do |partial|
partials << partial
end
partials = []

expect(partials.join).to eq("test,test1,test2,test3,test4")
end
endpoint.perform_completion!(compliance.dialect, user) { |partial| partials << partial }

called = true
expect(partials.join).to eq("test,test2 ,test3,test4")
end
expect(called).to be(true)
end
end

context "with tools" do
it "returns a function invocation" do
compliance.streaming_mode_tools(open_ai_mock)
end

it "properly handles spaces in tools payload" do
raw_data = <<~TEXT.strip
data: {"choices":[{"index":0,"delta":{"role":"assistant","content":null,"tool_calls":[{"index":0,"id":"func_id","type":"function","function":{"name":"google","arguments":""}}]}}]}

data: {"choices": [{"index": 0, "delta": {"tool_calls": [{"index": 0, "function": {"arguments": "{\\""}}]}}]}

data: {"ch|oices": [{"index": 0, "delta": {"tool_calls": [{"index": 0, "function": {"arguments": "query"}}]}}]}

data: {"choices": [{"index": 0, "delta": {"tool_calls": [{"index": 0, "function": {"arguments": "\\":\\""}}]}}]}

data: {"choices": [{"index": 0, "delta": {"tool_calls": [{"index": 0, "function": {"arguments": "Ad"}}]}}]}

data: {"choices": [{"index": 0, "delta": {"tool_calls": [{"index": 0, "function": {"arguments": "a|b"}}]}}]}

data: {"choices": [{"index": 0, "delta": {"tool_calls": [{"index": 0, "function": {"arguments": "as"}}]}}]}

data: {"choices": [{"index": 0, "delta": {"tool_calls": [{"index": 0, "function": {"arguments": |"| "}}]}}]}

data: {"choices": [{"index": 0, "delta": {"tool_calls": [{"index": 0, "function": {"arguments": "9"}}]}}]}

data: {"choices": [{"index": 0, "delta": {"tool_calls": [{"index": 0, "function": {"arguments": "."}}]}}]}

data: {"choices": [{"index": 0, "delta": {"tool_calls": [{"index": 0, "function": {"argume|nts": "1"}}]}}]}

data: {"choices": [{"index": 0, "delta": {"tool_calls": [{"index": 0, "function": {"arguments": "\\"}"}}]}}]}

data: {"choices": [{"index": 0, "delta": {"tool_calls": []}}]}

data: [D|ONE]
TEXT

chunks = raw_data.split("|")

open_ai_mock.with_chunk_array_support do
open_ai_mock.stub_raw(chunks)
partials = []

endpoint.perform_completion!(compliance.dialect, user) do |partial, x, y|
partials << partial
end

expect(partials.length).to eq(1)

function_call = (<<~TXT).strip
<function_calls>
<invoke>
<tool_name>google</tool_name>
<tool_id>func_id</tool_id>
<parameters>
<query>Adabas 9.1</query>
</parameters>
</invoke>
</function_calls>
TXT

expect(partials[0].strip).to eq(function_call)
end
end
end
end
end
Expand Down