From 22b0aecb84b9a17536948d3ced0e4ff3479308f4 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 19 Mar 2025 23:16:16 +0000 Subject: [PATCH] feat!: support `for item in stream` style iteration on `Stream`s --- README.md | 2 +- lib/openai/base_client.rb | 2 +- lib/openai/base_stream.rb | 15 +++--- lib/openai/resources/beta/threads.rb | 9 +++- lib/openai/resources/beta/threads/runs.rb | 18 ++++++- lib/openai/resources/chat/completions.rb | 9 +++- lib/openai/resources/completions.rb | 9 +++- lib/openai/resources/responses.rb | 9 +++- lib/openai/stream.rb | 7 ++- rbi/lib/openai/base_stream.rbi | 10 ++-- rbi/lib/openai/resources/beta/threads.rbi | 8 +++ .../openai/resources/beta/threads/runs.rbi | 18 +++++++ rbi/lib/openai/resources/chat/completions.rbi | 8 +++ rbi/lib/openai/resources/completions.rbi | 8 +++ rbi/lib/openai/resources/responses.rbi | 8 +++ sig/openai/base_stream.rbs | 8 +-- test/openai/client_test.rb | 50 +++---------------- .../resources/beta/threads/runs_test.rb | 9 +--- test/openai/resources/beta/threads_test.rb | 2 +- .../openai/resources/chat/completions_test.rb | 6 +-- test/openai/resources/completions_test.rb | 3 +- test/openai/resources/responses_test.rb | 2 +- 22 files changed, 135 insertions(+), 85 deletions(-) diff --git a/README.md b/README.md index 1b269d07..ff557c2d 100644 --- a/README.md +++ b/README.md @@ -81,7 +81,7 @@ stream = openai.chat.completions.create_streaming( model: "gpt-4o" ) -stream.for_each do |completion| +stream.each do |completion| puts(completion) end ``` diff --git a/lib/openai/base_client.rb b/lib/openai/base_client.rb index fbbda8f4..4178f663 100644 --- a/lib/openai/base_client.rb +++ b/lib/openai/base_client.rb @@ -437,7 +437,7 @@ def request(req) decoded = OpenAI::Util.decode_content(response, stream: stream) case req in { stream: Class => st } - st.new(model: model, url: url, status: status, response: response, messages: decoded) + st.new(model: model, url: url, status: status, response: response, stream: decoded) in { page: Class => page } page.new(client: self, req: req, headers: response, page_data: decoded) else diff --git a/lib/openai/base_stream.rb b/lib/openai/base_stream.rb index 77e0c71a..a4dbe0bc 100644 --- a/lib/openai/base_stream.rb +++ b/lib/openai/base_stream.rb @@ -3,7 +3,7 @@ module OpenAI # @example # ```ruby - # stream.for_each do |chunk| + # stream.each do |chunk| # puts(chunk) # end # ``` @@ -12,7 +12,6 @@ module OpenAI # ```ruby # chunks = # stream - # .to_enum # .lazy # .select { _1.object_id.even? } # .map(&:itself) @@ -22,6 +21,8 @@ module OpenAI # chunks => Array # ``` module BaseStream + include Enumerable + # @return [void] def close = OpenAI::Util.close_fused!(@iterator) @@ -33,14 +34,14 @@ def close = OpenAI::Util.close_fused!(@iterator) # @param blk [Proc] # # @return [void] - def for_each(&) + def each(&) unless block_given? raise ArgumentError.new("A block must be given to ##{__method__}") end @iterator.each(&) end - # @return [Enumerable] + # @return [Enumerator] def to_enum = @iterator alias_method :enum_for, :to_enum @@ -51,13 +52,13 @@ def to_enum = @iterator # @param url [URI::Generic] # @param status [Integer] # @param response [Net::HTTPResponse] - # @param messages [Enumerable] - def initialize(model:, url:, status:, response:, messages:) + # @param stream [Enumerable] + def initialize(model:, url:, status:, response:, stream:) @model = model @url = url @status = status @response = response - @messages = messages + @stream = stream @iterator = iterator end end diff --git a/lib/openai/resources/beta/threads.rb b/lib/openai/resources/beta/threads.rb index d8f2e660..27b67c15 100644 --- a/lib/openai/resources/beta/threads.rb +++ b/lib/openai/resources/beta/threads.rb @@ -208,7 +208,10 @@ def delete(thread_id, params = {}) # @return [OpenAI::Models::Beta::Threads::Run] def create_and_run(params) parsed, options = OpenAI::Models::Beta::ThreadCreateAndRunParams.dump_request(params) - parsed.delete(:stream) + if parsed[:stream] + message = "Please use `#create_and_run_streaming` for the streaming use case." + raise ArgumentError.new(message) + end @client.request( method: :post, path: "threads/runs", @@ -315,6 +318,10 @@ def create_and_run(params) # @return [OpenAI::Stream] def create_and_run_streaming(params) parsed, options = OpenAI::Models::Beta::ThreadCreateAndRunParams.dump_request(params) + unless parsed.fetch(:stream, true) + message = "Please use `#create_and_run` for the non-streaming use case." + raise ArgumentError.new(message) + end parsed.store(:stream, true) @client.request( method: :post, diff --git a/lib/openai/resources/beta/threads/runs.rb b/lib/openai/resources/beta/threads/runs.rb index 9f5a4401..fa61373d 100644 --- a/lib/openai/resources/beta/threads/runs.rb +++ b/lib/openai/resources/beta/threads/runs.rb @@ -125,7 +125,10 @@ class Runs # @return [OpenAI::Models::Beta::Threads::Run] def create(thread_id, params) parsed, options = OpenAI::Models::Beta::Threads::RunCreateParams.dump_request(params) - parsed.delete(:stream) + if parsed[:stream] + message = "Please use `#create_streaming` for the streaming use case." + raise ArgumentError.new(message) + end query_params = [:include] @client.request( method: :post, @@ -254,6 +257,10 @@ def create(thread_id, params) # @return [OpenAI::Stream] def create_streaming(thread_id, params) parsed, options = OpenAI::Models::Beta::Threads::RunCreateParams.dump_request(params) + unless parsed.fetch(:stream, true) + message = "Please use `#create` for the non-streaming use case." + raise ArgumentError.new(message) + end parsed.store(:stream, true) query_params = [:include] @client.request( @@ -410,7 +417,10 @@ def cancel(run_id, params) # @return [OpenAI::Models::Beta::Threads::Run] def submit_tool_outputs(run_id, params) parsed, options = OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams.dump_request(params) - parsed.delete(:stream) + if parsed[:stream] + message = "Please use `#submit_tool_outputs_streaming` for the streaming use case." + raise ArgumentError.new(message) + end thread_id = parsed.delete(:thread_id) do raise ArgumentError.new("missing required path argument #{_1}") @@ -444,6 +454,10 @@ def submit_tool_outputs(run_id, params) # @return [OpenAI::Stream] def submit_tool_outputs_streaming(run_id, params) parsed, options = OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams.dump_request(params) + unless parsed.fetch(:stream, true) + message = "Please use `#submit_tool_outputs` for the non-streaming use case." + raise ArgumentError.new(message) + end parsed.store(:stream, true) thread_id = parsed.delete(:thread_id) do diff --git a/lib/openai/resources/chat/completions.rb b/lib/openai/resources/chat/completions.rb index 94e76ede..52e05866 100644 --- a/lib/openai/resources/chat/completions.rb +++ b/lib/openai/resources/chat/completions.rb @@ -215,7 +215,10 @@ class Completions # @return [OpenAI::Models::Chat::ChatCompletion] def create(params) parsed, options = OpenAI::Models::Chat::CompletionCreateParams.dump_request(params) - parsed.delete(:stream) + if parsed[:stream] + message = "Please use `#create_streaming` for the streaming use case." + raise ArgumentError.new(message) + end @client.request( method: :post, path: "chat/completions", @@ -433,6 +436,10 @@ def create(params) # @return [OpenAI::Stream] def create_streaming(params) parsed, options = OpenAI::Models::Chat::CompletionCreateParams.dump_request(params) + unless parsed.fetch(:stream, true) + message = "Please use `#create` for the non-streaming use case." + raise ArgumentError.new(message) + end parsed.store(:stream, true) @client.request( method: :post, diff --git a/lib/openai/resources/completions.rb b/lib/openai/resources/completions.rb index 79cc6805..2cf80ec1 100644 --- a/lib/openai/resources/completions.rb +++ b/lib/openai/resources/completions.rb @@ -115,7 +115,10 @@ class Completions # @return [OpenAI::Models::Completion] def create(params) parsed, options = OpenAI::Models::CompletionCreateParams.dump_request(params) - parsed.delete(:stream) + if parsed[:stream] + message = "Please use `#create_streaming` for the streaming use case." + raise ArgumentError.new(message) + end @client.request( method: :post, path: "completions", @@ -237,6 +240,10 @@ def create(params) # @return [OpenAI::Stream] def create_streaming(params) parsed, options = OpenAI::Models::CompletionCreateParams.dump_request(params) + unless parsed.fetch(:stream, true) + message = "Please use `#create` for the non-streaming use case." + raise ArgumentError.new(message) + end parsed.store(:stream, true) @client.request( method: :post, diff --git a/lib/openai/resources/responses.rb b/lib/openai/resources/responses.rb index b09ce757..738b3a39 100644 --- a/lib/openai/resources/responses.rb +++ b/lib/openai/resources/responses.rb @@ -129,7 +129,10 @@ class Responses # @return [OpenAI::Models::Responses::Response] def create(params) parsed, options = OpenAI::Models::Responses::ResponseCreateParams.dump_request(params) - parsed.delete(:stream) + if parsed[:stream] + message = "Please use `#create_streaming` for the streaming use case." + raise ArgumentError.new(message) + end @client.request( method: :post, path: "responses", @@ -262,6 +265,10 @@ def create(params) # @return [OpenAI::Stream] def create_streaming(params) parsed, options = OpenAI::Models::Responses::ResponseCreateParams.dump_request(params) + unless parsed.fetch(:stream, true) + message = "Please use `#create` for the non-streaming use case." + raise ArgumentError.new(message) + end parsed.store(:stream, true) @client.request( method: :post, diff --git a/lib/openai/stream.rb b/lib/openai/stream.rb index 8e218404..e510a11d 100644 --- a/lib/openai/stream.rb +++ b/lib/openai/stream.rb @@ -3,7 +3,7 @@ module OpenAI # @example # ```ruby - # stream.for_each do |event| + # stream.each do |event| # puts(event) # end # ``` @@ -12,7 +12,6 @@ module OpenAI # ```ruby # events = # stream - # .to_enum # .lazy # .select { _1.object_id.even? } # .map(&:itself) @@ -29,10 +28,10 @@ class Stream # @return [Enumerable] private def iterator # rubocop:disable Metrics/BlockLength - @iterator ||= OpenAI::Util.chain_fused(@messages) do |y| + @iterator ||= OpenAI::Util.chain_fused(@stream) do |y| consume = false - @messages.each do |msg| + @stream.each do |msg| next if consume case msg diff --git a/rbi/lib/openai/base_stream.rbi b/rbi/lib/openai/base_stream.rbi index c5f6c58e..9b1654a6 100644 --- a/rbi/lib/openai/base_stream.rbi +++ b/rbi/lib/openai/base_stream.rbi @@ -2,6 +2,8 @@ module OpenAI module BaseStream + include Enumerable + Message = type_member(:in) Elem = type_member(:out) @@ -15,10 +17,10 @@ module OpenAI end sig { params(blk: T.proc.params(arg0: Elem).void).void } - def for_each(&blk) + def each(&blk) end - sig { returns(T::Enumerable[Elem]) } + sig { returns(T::Enumerator[Elem]) } def to_enum end @@ -31,11 +33,11 @@ module OpenAI url: URI::Generic, status: Integer, response: Net::HTTPResponse, - messages: T::Enumerable[Message] + stream: T::Enumerable[Message] ) .void end - def initialize(model:, url:, status:, response:, messages:) + def initialize(model:, url:, status:, response:, stream:) end end end diff --git a/rbi/lib/openai/resources/beta/threads.rbi b/rbi/lib/openai/resources/beta/threads.rbi index 2170549d..aa843bbe 100644 --- a/rbi/lib/openai/resources/beta/threads.rbi +++ b/rbi/lib/openai/resources/beta/threads.rbi @@ -134,6 +134,7 @@ module OpenAI ), top_p: T.nilable(Float), truncation_strategy: T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy), + stream: T.noreturn, request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) ) .returns(OpenAI::Models::Beta::Threads::Run) @@ -227,6 +228,9 @@ module OpenAI # Controls for how a thread will be truncated prior to the run. Use this to # control the intial context window of the run. truncation_strategy: nil, + # There is no need to provide `stream:`. Instead, use `#create_and_run_streaming` + # or `#create_and_run` for streaming and non-streaming use cases, respectively. + stream: false, request_options: {} ) end @@ -264,6 +268,7 @@ module OpenAI ), top_p: T.nilable(Float), truncation_strategy: T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy), + stream: T.noreturn, request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) ) .returns( @@ -386,6 +391,9 @@ module OpenAI # Controls for how a thread will be truncated prior to the run. Use this to # control the intial context window of the run. truncation_strategy: nil, + # There is no need to provide `stream:`. Instead, use `#create_and_run_streaming` + # or `#create_and_run` for streaming and non-streaming use cases, respectively. + stream: true, request_options: {} ) end diff --git a/rbi/lib/openai/resources/beta/threads/runs.rbi b/rbi/lib/openai/resources/beta/threads/runs.rbi index c2a318dd..d4b1a449 100644 --- a/rbi/lib/openai/resources/beta/threads/runs.rbi +++ b/rbi/lib/openai/resources/beta/threads/runs.rbi @@ -45,6 +45,7 @@ module OpenAI ), top_p: T.nilable(Float), truncation_strategy: T.nilable(OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy), + stream: T.noreturn, request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) ) .returns(OpenAI::Models::Beta::Threads::Run) @@ -158,6 +159,9 @@ module OpenAI # Body param: Controls for how a thread will be truncated prior to the run. Use # this to control the intial context window of the run. truncation_strategy: nil, + # There is no need to provide `stream:`. Instead, use `#create_streaming` or + # `#create` for streaming and non-streaming use cases, respectively. + stream: false, request_options: {} ) end @@ -198,6 +202,7 @@ module OpenAI ), top_p: T.nilable(Float), truncation_strategy: T.nilable(OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy), + stream: T.noreturn, request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) ) .returns( @@ -340,6 +345,9 @@ module OpenAI # Body param: Controls for how a thread will be truncated prior to the run. Use # this to control the intial context window of the run. truncation_strategy: nil, + # There is no need to provide `stream:`. Instead, use `#create_streaming` or + # `#create` for streaming and non-streaming use cases, respectively. + stream: true, request_options: {} ) end @@ -452,6 +460,7 @@ module OpenAI run_id: String, thread_id: String, tool_outputs: T::Array[OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput], + stream: T.noreturn, request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) ) .returns(OpenAI::Models::Beta::Threads::Run) @@ -465,6 +474,10 @@ module OpenAI thread_id:, # Body param: A list of tools for which the outputs are being submitted. tool_outputs:, + # There is no need to provide `stream:`. Instead, use + # `#submit_tool_outputs_streaming` or `#submit_tool_outputs` for streaming and + # non-streaming use cases, respectively. + stream: false, request_options: {} ) end @@ -478,6 +491,7 @@ module OpenAI run_id: String, thread_id: String, tool_outputs: T::Array[OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput], + stream: T.noreturn, request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) ) .returns( @@ -520,6 +534,10 @@ module OpenAI thread_id:, # Body param: A list of tools for which the outputs are being submitted. tool_outputs:, + # There is no need to provide `stream:`. Instead, use + # `#submit_tool_outputs_streaming` or `#submit_tool_outputs` for streaming and + # non-streaming use cases, respectively. + stream: true, request_options: {} ) end diff --git a/rbi/lib/openai/resources/chat/completions.rbi b/rbi/lib/openai/resources/chat/completions.rbi index 7f6a851c..7358d205 100644 --- a/rbi/lib/openai/resources/chat/completions.rbi +++ b/rbi/lib/openai/resources/chat/completions.rbi @@ -70,6 +70,7 @@ module OpenAI top_p: T.nilable(Float), user: String, web_search_options: OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions, + stream: T.noreturn, request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) ) .returns(OpenAI::Models::Chat::ChatCompletion) @@ -258,6 +259,9 @@ module OpenAI # about the # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). web_search_options: nil, + # There is no need to provide `stream:`. Instead, use `#create_streaming` or + # `#create` for streaming and non-streaming use cases, respectively. + stream: false, request_options: {} ) end @@ -324,6 +328,7 @@ module OpenAI top_p: T.nilable(Float), user: String, web_search_options: OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions, + stream: T.noreturn, request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) ) .returns(OpenAI::Stream[OpenAI::Models::Chat::ChatCompletionChunk]) @@ -512,6 +517,9 @@ module OpenAI # about the # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). web_search_options: nil, + # There is no need to provide `stream:`. Instead, use `#create_streaming` or + # `#create` for streaming and non-streaming use cases, respectively. + stream: true, request_options: {} ) end diff --git a/rbi/lib/openai/resources/completions.rbi b/rbi/lib/openai/resources/completions.rbi index 949f94ed..8e9e52d4 100644 --- a/rbi/lib/openai/resources/completions.rbi +++ b/rbi/lib/openai/resources/completions.rbi @@ -30,6 +30,7 @@ module OpenAI temperature: T.nilable(Float), top_p: T.nilable(Float), user: String, + stream: T.noreturn, request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) ) .returns(OpenAI::Models::Completion) @@ -138,6 +139,9 @@ module OpenAI # and detect abuse. # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, + # There is no need to provide `stream:`. Instead, use `#create_streaming` or + # `#create` for streaming and non-streaming use cases, respectively. + stream: false, request_options: {} ) end @@ -169,6 +173,7 @@ module OpenAI temperature: T.nilable(Float), top_p: T.nilable(Float), user: String, + stream: T.noreturn, request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) ) .returns(OpenAI::Stream[OpenAI::Models::Completion]) @@ -277,6 +282,9 @@ module OpenAI # and detect abuse. # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, + # There is no need to provide `stream:`. Instead, use `#create_streaming` or + # `#create` for streaming and non-streaming use cases, respectively. + stream: true, request_options: {} ) end diff --git a/rbi/lib/openai/resources/responses.rbi b/rbi/lib/openai/resources/responses.rbi index 823cc8a1..c285a853 100644 --- a/rbi/lib/openai/resources/responses.rbi +++ b/rbi/lib/openai/resources/responses.rbi @@ -44,6 +44,7 @@ module OpenAI top_p: T.nilable(Float), truncation: T.nilable(Symbol), user: String, + stream: T.noreturn, request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) ) .returns(OpenAI::Models::Responses::Response) @@ -153,6 +154,9 @@ module OpenAI # and detect abuse. # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, + # There is no need to provide `stream:`. Instead, use `#create_streaming` or + # `#create` for streaming and non-streaming use cases, respectively. + stream: false, request_options: {} ) end @@ -194,6 +198,7 @@ module OpenAI top_p: T.nilable(Float), truncation: T.nilable(Symbol), user: String, + stream: T.noreturn, request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) ) .returns( @@ -340,6 +345,9 @@ module OpenAI # and detect abuse. # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, + # There is no need to provide `stream:`. Instead, use `#create_streaming` or + # `#create` for streaming and non-streaming use cases, respectively. + stream: true, request_options: {} ) end diff --git a/sig/openai/base_stream.rbs b/sig/openai/base_stream.rbs index e5d9ec89..caa21732 100644 --- a/sig/openai/base_stream.rbs +++ b/sig/openai/base_stream.rbs @@ -1,12 +1,14 @@ module OpenAI module BaseStream[Message, Elem] + include Enumerable[Elem] + def close: -> void private def iterator: -> Enumerable[Elem] - def for_each: { (Elem arg0) -> void } -> void + def each: { (Elem arg0) -> void } -> void - def to_enum: -> Enumerable[Elem] + def to_enum: -> Enumerator[Elem] alias enum_for to_enum @@ -15,7 +17,7 @@ module OpenAI url: URI::Generic, status: Integer, response: top, - messages: Enumerable[Message] + stream: Enumerable[Message] ) -> void end end diff --git a/test/openai/client_test.rb b/test/openai/client_test.rb index 9a3f400f..cbb0c21d 100644 --- a/test/openai/client_test.rb +++ b/test/openai/client_test.rb @@ -56,11 +56,7 @@ def test_client_default_request_default_retry_attempts openai.requester = requester assert_raises(OpenAI::InternalServerError) do - openai.chat.completions.create( - messages: [{content: "string", role: :developer}], - model: :"o3-mini", - stream: true - ) + openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"o3-mini") end assert_equal(3, requester.attempts.length) @@ -72,11 +68,7 @@ def test_client_given_request_default_retry_attempts openai.requester = requester assert_raises(OpenAI::InternalServerError) do - openai.chat.completions.create( - messages: [{content: "string", role: :developer}], - model: :"o3-mini", - stream: true - ) + openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"o3-mini") end assert_equal(4, requester.attempts.length) @@ -91,7 +83,6 @@ def test_client_default_request_given_retry_attempts openai.chat.completions.create( messages: [{content: "string", role: :developer}], model: :"o3-mini", - stream: true, request_options: {max_retries: 3} ) end @@ -108,7 +99,6 @@ def test_client_given_request_given_retry_attempts openai.chat.completions.create( messages: [{content: "string", role: :developer}], model: :"o3-mini", - stream: true, request_options: {max_retries: 4} ) end @@ -122,11 +112,7 @@ def test_client_retry_after_seconds openai.requester = requester assert_raises(OpenAI::InternalServerError) do - openai.chat.completions.create( - messages: [{content: "string", role: :developer}], - model: :"o3-mini", - stream: true - ) + openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"o3-mini") end assert_equal(2, requester.attempts.length) @@ -140,11 +126,7 @@ def test_client_retry_after_date assert_raises(OpenAI::InternalServerError) do Thread.current.thread_variable_set(:time_now, Time.now) - openai.chat.completions.create( - messages: [{content: "string", role: :developer}], - model: :"o3-mini", - stream: true - ) + openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"o3-mini") Thread.current.thread_variable_set(:time_now, nil) end @@ -158,11 +140,7 @@ def test_client_retry_after_ms openai.requester = requester assert_raises(OpenAI::InternalServerError) do - openai.chat.completions.create( - messages: [{content: "string", role: :developer}], - model: :"o3-mini", - stream: true - ) + openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"o3-mini") end assert_equal(2, requester.attempts.length) @@ -175,11 +153,7 @@ def test_retry_count_header openai.requester = requester assert_raises(OpenAI::InternalServerError) do - openai.chat.completions.create( - messages: [{content: "string", role: :developer}], - model: :"o3-mini", - stream: true - ) + openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"o3-mini") end retry_count_headers = requester.attempts.map { _1[:headers]["x-stainless-retry-count"] } @@ -195,7 +169,6 @@ def test_omit_retry_count_header openai.chat.completions.create( messages: [{content: "string", role: :developer}], model: :"o3-mini", - stream: true, request_options: {extra_headers: {"x-stainless-retry-count" => nil}} ) end @@ -213,7 +186,6 @@ def test_overwrite_retry_count_header openai.chat.completions.create( messages: [{content: "string", role: :developer}], model: :"o3-mini", - stream: true, request_options: {extra_headers: {"x-stainless-retry-count" => "42"}} ) end @@ -231,7 +203,6 @@ def test_client_redirect_307 openai.chat.completions.create( messages: [{content: "string", role: :developer}], model: :"o3-mini", - stream: true, request_options: {extra_headers: {}} ) end @@ -254,7 +225,6 @@ def test_client_redirect_303 openai.chat.completions.create( messages: [{content: "string", role: :developer}], model: :"o3-mini", - stream: true, request_options: {extra_headers: {}} ) end @@ -274,7 +244,6 @@ def test_client_redirect_auth_keep_same_origin openai.chat.completions.create( messages: [{content: "string", role: :developer}], model: :"o3-mini", - stream: true, request_options: {extra_headers: {"Authorization" => "Bearer xyz"}} ) end @@ -294,7 +263,6 @@ def test_client_redirect_auth_strip_cross_origin openai.chat.completions.create( messages: [{content: "string", role: :developer}], model: :"o3-mini", - stream: true, request_options: {extra_headers: {"Authorization" => "Bearer xyz"}} ) end @@ -306,11 +274,7 @@ def test_default_headers openai = OpenAI::Client.new(base_url: "http://localhost:4010", api_key: "My API Key") requester = MockRequester.new(200, {}, {}) openai.requester = requester - openai.chat.completions.create( - messages: [{content: "string", role: :developer}], - model: :"o3-mini", - stream: true - ) + openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"o3-mini") headers = requester.attempts.first[:headers] refute_empty(headers["accept"]) diff --git a/test/openai/resources/beta/threads/runs_test.rb b/test/openai/resources/beta/threads/runs_test.rb index 7c074c33..9341955e 100644 --- a/test/openai/resources/beta/threads/runs_test.rb +++ b/test/openai/resources/beta/threads/runs_test.rb @@ -4,7 +4,7 @@ class OpenAI::Test::Resources::Beta::Threads::RunsTest < OpenAI::Test::ResourceTest def test_create_required_params - response = @openai.beta.threads.runs.create("thread_id", assistant_id: "assistant_id", stream: true) + response = @openai.beta.threads.runs.create("thread_id", assistant_id: "assistant_id") assert_pattern do response => OpenAI::Models::Beta::Threads::Run @@ -210,12 +210,7 @@ def test_cancel_required_params def test_submit_tool_outputs_required_params response = - @openai.beta.threads.runs.submit_tool_outputs( - "run_id", - thread_id: "thread_id", - stream: true, - tool_outputs: [{}] - ) + @openai.beta.threads.runs.submit_tool_outputs("run_id", thread_id: "thread_id", tool_outputs: [{}]) assert_pattern do response => OpenAI::Models::Beta::Threads::Run diff --git a/test/openai/resources/beta/threads_test.rb b/test/openai/resources/beta/threads_test.rb index 4203a6f3..f7f1021f 100644 --- a/test/openai/resources/beta/threads_test.rb +++ b/test/openai/resources/beta/threads_test.rb @@ -74,7 +74,7 @@ def test_delete end def test_create_and_run_required_params - response = @openai.beta.threads.create_and_run(assistant_id: "assistant_id", stream: true) + response = @openai.beta.threads.create_and_run(assistant_id: "assistant_id") assert_pattern do response => OpenAI::Models::Beta::Threads::Run diff --git a/test/openai/resources/chat/completions_test.rb b/test/openai/resources/chat/completions_test.rb index 0d861145..ffbd392e 100644 --- a/test/openai/resources/chat/completions_test.rb +++ b/test/openai/resources/chat/completions_test.rb @@ -5,11 +5,7 @@ class OpenAI::Test::Resources::Chat::CompletionsTest < OpenAI::Test::ResourceTest def test_create_required_params response = - @openai.chat.completions.create( - messages: [{content: "string", role: :developer}], - model: :"o3-mini", - stream: true - ) + @openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"o3-mini") assert_pattern do response => OpenAI::Models::Chat::ChatCompletion diff --git a/test/openai/resources/completions_test.rb b/test/openai/resources/completions_test.rb index b402a581..f7d94ab8 100644 --- a/test/openai/resources/completions_test.rb +++ b/test/openai/resources/completions_test.rb @@ -4,8 +4,7 @@ class OpenAI::Test::Resources::CompletionsTest < OpenAI::Test::ResourceTest def test_create_required_params - response = - @openai.completions.create(model: :"gpt-3.5-turbo-instruct", prompt: "This is a test.", stream: true) + response = @openai.completions.create(model: :"gpt-3.5-turbo-instruct", prompt: "This is a test.") assert_pattern do response => OpenAI::Models::Completion diff --git a/test/openai/resources/responses_test.rb b/test/openai/resources/responses_test.rb index 1f0c3d9f..d0e0157e 100644 --- a/test/openai/resources/responses_test.rb +++ b/test/openai/resources/responses_test.rb @@ -4,7 +4,7 @@ class OpenAI::Test::Resources::ResponsesTest < OpenAI::Test::ResourceTest def test_create_required_params - response = @openai.responses.create(input: "string", model: :"gpt-4o", stream: true) + response = @openai.responses.create(input: "string", model: :"gpt-4o") assert_pattern do response => OpenAI::Models::Responses::Response