diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 1d52ad84..ce5e5c7c 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.34.1" + ".": "0.35.0" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index 7ec7944d..689bfb5c 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 135 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-a4bb37d110a22c2888f53e21281434686a6fffa3e672a40f2503ad9bd2759063.yml -openapi_spec_hash: 2d59eefb494dff4eea8c3d008c7e2070 -config_hash: 50ee3382a63c021a9f821a935950e926 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-3c5d1593d7c6f2b38a7d78d7906041465ee9d6e9022f0651e1da194654488108.yml +openapi_spec_hash: 0a4d8ad2469823ce24a3fd94f23f1c2b +config_hash: 032995825500a503a76da119f5354905 diff --git a/CHANGELOG.md b/CHANGELOG.md index 1c5b0f3d..6cb3d434 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,28 @@ # Changelog +## 0.35.0 (2025-11-03) + +Full Changelog: [v0.34.1...v0.35.0](https://github.com/openai/openai-ruby/compare/v0.34.1...v0.35.0) + +### Features + +* **api:** Realtime API token_limits, Hybrid searching ranking options ([f7f04ea](https://github.com/openai/openai-ruby/commit/f7f04ea1816e005cfc7325f3c97b1f463aa6afe3)) +* **api:** remove InputAudio from ResponseInputContent ([e8f5e9f](https://github.com/openai/openai-ruby/commit/e8f5e9f1b51843bc015f787316fbf522a87cac52)) +* handle thread interrupts in the core HTTP client ([92e26d0](https://github.com/openai/openai-ruby/commit/92e26d0593ae6487a62d500c3e1e866252f3bdeb)) + + +### Bug Fixes + +* **api:** docs updates ([88a4a35](https://github.com/openai/openai-ruby/commit/88a4a355457b22ef9ac657ecb0e7a1a2e9bc8973)) +* text and tools use mutually exclusive issue ([#855](https://github.com/openai/openai-ruby/issues/855)) ([7d93874](https://github.com/openai/openai-ruby/commit/7d93874ff34f5efa2459211984533fe72dced9e1)) + + +### Chores + +* add license information to the gemspec file ([#222](https://github.com/openai/openai-ruby/issues/222)) ([90d3c4a](https://github.com/openai/openai-ruby/commit/90d3c4aaae8a6e2fa039e0d1ad220ea3d1051ed7)) +* **client:** send user-agent header ([3a850a9](https://github.com/openai/openai-ruby/commit/3a850a93808daf101fb086edc5511db9fa224684)) +* **internal:** codegen related update ([f6b9f90](https://github.com/openai/openai-ruby/commit/f6b9f904a95d703a0ce76185e63352e095cb35af)) + ## 0.34.1 (2025-10-20) Full Changelog: [v0.34.0...v0.34.1](https://github.com/openai/openai-ruby/compare/v0.34.0...v0.34.1) diff --git a/Gemfile b/Gemfile index 0d76364b..1be178cb 100644 --- a/Gemfile +++ b/Gemfile @@ -11,8 +11,7 @@ group :development do gem "sorbet" gem "steep" gem "syntax_tree" - # TODO: using a fork for now, the prettier below has a bug - gem "syntax_tree-rbs", github: "stainless-api/syntax_tree-rbs", branch: "main" + gem "syntax_tree-rbs", github: "ruby-syntax-tree/syntax_tree-rbs", branch: "main" gem "tapioca" end diff --git a/Gemfile.lock b/Gemfile.lock index 410cbec2..1e460623 100644 --- a/Gemfile.lock +++ b/Gemfile.lock @@ -1,6 +1,6 @@ GIT - remote: https://github.com/stainless-api/syntax_tree-rbs.git - revision: c30b50219918be7cfe3ef803a00b59d1e77fcada + remote: https://github.com/ruby-syntax-tree/syntax_tree-rbs.git + revision: f94bc3060682ffbd126e4d5086ffedc89073d626 branch: main specs: syntax_tree-rbs (1.0.0) @@ -11,20 +11,20 @@ GIT PATH remote: . specs: - openai (0.34.1) + openai (0.35.0) connection_pool GEM remote: https://rubygems.org/ specs: - activesupport (8.0.2.1) + activesupport (8.1.1) base64 - benchmark (>= 0.3) bigdecimal concurrent-ruby (~> 1.0, >= 1.3.1) connection_pool (>= 2.2.5) drb i18n (>= 1.6, < 2) + json logger (>= 1.4.2) minitest (>= 5.1) securerandom (>= 0.3) @@ -33,22 +33,22 @@ GEM addressable (2.8.7) public_suffix (>= 2.0.2, < 7.0) ast (2.4.3) - async (2.27.3) + async (2.34.0) console (~> 1.29) fiber-annotation io-event (~> 1.11) metrics (~> 0.12) - traces (~> 0.15) + traces (~> 0.18) base64 (0.3.0) - benchmark (0.4.1) - bigdecimal (3.2.2) + benchmark (0.5.0) + bigdecimal (3.3.1) concurrent-ruby (1.3.5) - connection_pool (2.5.3) - console (1.33.0) + connection_pool (2.5.4) + console (1.34.2) fiber-annotation fiber-local (~> 1.1) json - crack (1.0.0) + crack (1.0.1) bigdecimal rexml csv (3.3.5) @@ -64,20 +64,20 @@ GEM fiber-local (1.1.0) fiber-storage fiber-storage (1.0.1) - fileutils (1.7.3) - hashdiff (1.2.0) + fileutils (1.8.0) + hashdiff (1.2.1) i18n (1.14.7) concurrent-ruby (~> 1.0) io-event (1.11.2) - json (2.13.2) + json (2.15.2) language_server-protocol (3.17.0.5) lint_roller (1.1.0) listen (3.9.0) rb-fsevent (~> 0.10, >= 0.10.3) rb-inotify (~> 0.9, >= 0.9.10) logger (1.7.0) - metrics (0.13.0) - minitest (5.25.5) + metrics (0.15.0) + minitest (5.26.0) minitest-focus (1.4.0) minitest (>= 4, < 6) minitest-hooks (1.5.2) @@ -89,27 +89,27 @@ GEM mutex_m (0.3.0) netrc (0.11.0) parallel (1.27.0) - parser (3.3.9.0) + parser (3.3.10.0) ast (~> 2.4.1) racc prettier_print (1.2.1) - prism (1.4.0) + prism (1.6.0) public_suffix (6.0.2) racc (1.8.1) rainbow (3.1.1) - rake (13.3.0) + rake (13.3.1) rb-fsevent (0.11.2) rb-inotify (0.11.1) ffi (~> 1.0) - rbi (0.3.6) + rbi (0.3.7) prism (~> 1.0) rbs (>= 3.4.4) - rbs (3.9.4) + rbs (3.9.5) logger redcarpet (3.6.1) - regexp_parser (2.11.2) - rexml (3.4.1) - rubocop (1.79.2) + regexp_parser (2.11.3) + rexml (3.4.4) + rubocop (1.81.7) json (~> 2.3) language_server-protocol (~> 3.17.0.2) lint_roller (~> 1.1.0) @@ -117,23 +117,23 @@ GEM parser (>= 3.3.0.2) rainbow (>= 2.2.2, < 4.0) regexp_parser (>= 2.9.3, < 3.0) - rubocop-ast (>= 1.46.0, < 2.0) + rubocop-ast (>= 1.47.1, < 2.0) ruby-progressbar (~> 1.7) unicode-display_width (>= 2.4.0, < 4.0) - rubocop-ast (1.46.0) + rubocop-ast (1.47.1) parser (>= 3.3.7.2) prism (~> 1.4) ruby-progressbar (1.13.0) securerandom (0.4.1) - sorbet (0.5.12424) - sorbet-static (= 0.5.12424) - sorbet-runtime (0.5.12424) - sorbet-static (0.5.12424-aarch64-linux) - sorbet-static (0.5.12424-universal-darwin) - sorbet-static (0.5.12424-x86_64-linux) - sorbet-static-and-runtime (0.5.12424) - sorbet (= 0.5.12424) - sorbet-runtime (= 0.5.12424) + sorbet (0.6.12690) + sorbet-static (= 0.6.12690) + sorbet-runtime (0.6.12690) + sorbet-static (0.6.12690-aarch64-linux) + sorbet-static (0.6.12690-universal-darwin) + sorbet-static (0.6.12690-x86_64-linux) + sorbet-static-and-runtime (0.6.12690) + sorbet (= 0.6.12690) + sorbet-runtime (= 0.6.12690) spoom (1.6.3) erubi (>= 1.10.0) prism (>= 0.28.0) @@ -174,14 +174,14 @@ GEM terminal-table (4.0.0) unicode-display_width (>= 1.1.1, < 4) thor (1.4.0) - traces (0.17.0) + traces (0.18.2) tzinfo (2.0.6) concurrent-ruby (~> 1.0) - unicode-display_width (3.1.5) - unicode-emoji (~> 4.0, >= 4.0.4) - unicode-emoji (4.0.4) - uri (1.0.3) - webmock (3.25.1) + unicode-display_width (3.2.0) + unicode-emoji (~> 4.1) + unicode-emoji (4.1.0) + uri (1.1.0) + webmock (3.26.1) addressable (>= 2.8.0) crack (>= 0.3.2) hashdiff (>= 0.4.0, < 2.0.0) diff --git a/README.md b/README.md index 91041cd4..b5a9943c 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ To use this gem, install via Bundler by adding the following to your application ```ruby -gem "openai", "~> 0.34.1" +gem "openai", "~> 0.35.0" ``` diff --git a/lib/openai/internal/transport/base_client.rb b/lib/openai/internal/transport/base_client.rb index db53aa1c..01772d3f 100644 --- a/lib/openai/internal/transport/base_client.rb +++ b/lib/openai/internal/transport/base_client.rb @@ -201,7 +201,8 @@ def initialize( self.class::PLATFORM_HEADERS, { "accept" => "application/json", - "content-type" => "application/json" + "content-type" => "application/json", + "user-agent" => user_agent }, headers ) @@ -219,6 +220,11 @@ def initialize( # @return [Hash{String=>String}] private def auth_headers = {} + # @api private + # + # @return [String] + private def user_agent = "#{self.class.name}/Ruby #{OpenAI::VERSION}" + # @api private # # @return [String] diff --git a/lib/openai/internal/transport/pooled_net_requester.rb b/lib/openai/internal/transport/pooled_net_requester.rb index 9af98e5a..7ace605f 100644 --- a/lib/openai/internal/transport/pooled_net_requester.rb +++ b/lib/openai/internal/transport/pooled_net_requester.rb @@ -128,40 +128,48 @@ def execute(request) url, deadline = request.fetch_values(:url, :deadline) req = nil - eof = false finished = false - closing = nil # rubocop:disable Metrics/BlockLength enum = Enumerator.new do |y| next if finished with_pool(url, deadline: deadline) do |conn| - req, closing = self.class.build_request(request) do - self.class.calibrate_socket_timeout(conn, deadline) - end - - self.class.calibrate_socket_timeout(conn, deadline) - unless conn.started? - conn.keep_alive_timeout = self.class::KEEP_ALIVE_TIMEOUT - conn.start - end + eof = false + closing = nil + ::Thread.handle_interrupt(Object => :never) do + ::Thread.handle_interrupt(Object => :immediate) do + req, closing = self.class.build_request(request) do + self.class.calibrate_socket_timeout(conn, deadline) + end - self.class.calibrate_socket_timeout(conn, deadline) - conn.request(req) do |rsp| - y << [req, rsp] - break if finished - - rsp.read_body do |bytes| - y << bytes.force_encoding(Encoding::BINARY) - break if finished + self.class.calibrate_socket_timeout(conn, deadline) + unless conn.started? + conn.keep_alive_timeout = self.class::KEEP_ALIVE_TIMEOUT + conn.start + end self.class.calibrate_socket_timeout(conn, deadline) + conn.request(req) do |rsp| + y << [req, rsp] + break if finished + + rsp.read_body do |bytes| + y << bytes.force_encoding(Encoding::BINARY) + break if finished + + self.class.calibrate_socket_timeout(conn, deadline) + end + eof = true + end + end + ensure + begin + conn.finish if !eof && conn&.started? + ensure + closing&.call end - eof = true end - ensure - conn.finish if !eof && conn&.started? end rescue Timeout::Error raise OpenAI::Errors::APITimeoutError.new(url: url, request: req) @@ -174,8 +182,6 @@ def execute(request) body = OpenAI::Internal::Util.fused_enum(enum, external: true) do finished = true loop { enum.next } - ensure - closing&.call end [Integer(response.code), response, body] end diff --git a/lib/openai/models/custom_tool_input_format.rb b/lib/openai/models/custom_tool_input_format.rb index 6eacce5b..e716a67d 100644 --- a/lib/openai/models/custom_tool_input_format.rb +++ b/lib/openai/models/custom_tool_input_format.rb @@ -8,8 +8,10 @@ module CustomToolInputFormat discriminator :type + # Unconstrained free-form text. variant :text, -> { OpenAI::CustomToolInputFormat::Text } + # A grammar defined by the user. variant :grammar, -> { OpenAI::CustomToolInputFormat::Grammar } class Text < OpenAI::Internal::Type::BaseModel @@ -20,6 +22,8 @@ class Text < OpenAI::Internal::Type::BaseModel required :type, const: :text # @!method initialize(type: :text) + # Unconstrained free-form text. + # # @param type [Symbol, :text] Unconstrained text format. Always `text`. end @@ -43,6 +47,8 @@ class Grammar < OpenAI::Internal::Type::BaseModel required :type, const: :grammar # @!method initialize(definition:, syntax:, type: :grammar) + # A grammar defined by the user. + # # @param definition [String] The grammar definition. # # @param syntax [Symbol, OpenAI::Models::CustomToolInputFormat::Grammar::Syntax] The syntax of the grammar definition. One of `lark` or `regex`. diff --git a/lib/openai/models/image_edit_params.rb b/lib/openai/models/image_edit_params.rb index dbd938de..146b683e 100644 --- a/lib/openai/models/image_edit_params.rb +++ b/lib/openai/models/image_edit_params.rb @@ -142,7 +142,7 @@ class ImageEditParams < OpenAI::Internal::Type::BaseModel # # @param background [Symbol, OpenAI::Models::ImageEditParams::Background, nil] Allows to set transparency for the background of the generated image(s). # - # @param input_fidelity [Symbol, OpenAI::Models::ImageEditParams::InputFidelity, nil] + # @param input_fidelity [Symbol, OpenAI::Models::ImageEditParams::InputFidelity, nil] Control how much effort the model will exert to match the style and features, es # # @param mask [Pathname, StringIO, IO, String, OpenAI::FilePart] An additional image whose fully transparent areas (e.g. where alpha is zero) ind # diff --git a/lib/openai/models/realtime/realtime_session_create_request.rb b/lib/openai/models/realtime/realtime_session_create_request.rb index 7eef6d7e..9f59c470 100644 --- a/lib/openai/models/realtime/realtime_session_create_request.rb +++ b/lib/openai/models/realtime/realtime_session_create_request.rb @@ -98,8 +98,19 @@ class RealtimeSessionCreateRequest < OpenAI::Internal::Type::BaseModel optional :tracing, union: -> { OpenAI::Realtime::RealtimeTracingConfig }, nil?: true # @!attribute truncation - # Controls how the realtime conversation is truncated prior to model inference. - # The default is `auto`. + # When the number of tokens in a conversation exceeds the model's input token + # limit, the conversation be truncated, meaning messages (starting from the + # oldest) will not be included in the model's context. A 32k context model with + # 4,096 max output tokens can only include 28,224 tokens in the context before + # truncation occurs. Clients can configure truncation behavior to truncate with a + # lower max token limit, which is an effective way to control token usage and + # cost. Truncation will reduce the number of cached tokens on the next turn + # (busting the cache), since messages are dropped from the beginning of the + # context. However, clients can also configure truncation to retain messages up to + # a fraction of the maximum context size, which will reduce the need for future + # truncations and thus improve the cache rate. Truncation can be disabled + # entirely, which means the server will never truncate but would instead return an + # error if the conversation exceeds the model's input token limit. # # @return [Symbol, OpenAI::Models::Realtime::RealtimeTruncation::RealtimeTruncationStrategy, OpenAI::Models::Realtime::RealtimeTruncationRetentionRatio, nil] optional :truncation, union: -> { OpenAI::Realtime::RealtimeTruncation } @@ -130,7 +141,7 @@ class RealtimeSessionCreateRequest < OpenAI::Internal::Type::BaseModel # # @param tracing [Symbol, :auto, OpenAI::Models::Realtime::RealtimeTracingConfig::TracingConfiguration, nil] Realtime API can write session traces to the [Traces Dashboard](/logs?api=traces # - # @param truncation [Symbol, OpenAI::Models::Realtime::RealtimeTruncation::RealtimeTruncationStrategy, OpenAI::Models::Realtime::RealtimeTruncationRetentionRatio] Controls how the realtime conversation is truncated prior to model inference. + # @param truncation [Symbol, OpenAI::Models::Realtime::RealtimeTruncation::RealtimeTruncationStrategy, OpenAI::Models::Realtime::RealtimeTruncationRetentionRatio] When the number of tokens in a conversation exceeds the model's input token limi # # @param type [Symbol, :realtime] The type of session to create. Always `realtime` for the Realtime API. diff --git a/lib/openai/models/realtime/realtime_session_create_response.rb b/lib/openai/models/realtime/realtime_session_create_response.rb index 5960952f..2b423b90 100644 --- a/lib/openai/models/realtime/realtime_session_create_response.rb +++ b/lib/openai/models/realtime/realtime_session_create_response.rb @@ -106,8 +106,19 @@ class RealtimeSessionCreateResponse < OpenAI::Internal::Type::BaseModel optional :tracing, union: -> { OpenAI::Realtime::RealtimeSessionCreateResponse::Tracing }, nil?: true # @!attribute truncation - # Controls how the realtime conversation is truncated prior to model inference. - # The default is `auto`. + # When the number of tokens in a conversation exceeds the model's input token + # limit, the conversation be truncated, meaning messages (starting from the + # oldest) will not be included in the model's context. A 32k context model with + # 4,096 max output tokens can only include 28,224 tokens in the context before + # truncation occurs. Clients can configure truncation behavior to truncate with a + # lower max token limit, which is an effective way to control token usage and + # cost. Truncation will reduce the number of cached tokens on the next turn + # (busting the cache), since messages are dropped from the beginning of the + # context. However, clients can also configure truncation to retain messages up to + # a fraction of the maximum context size, which will reduce the need for future + # truncations and thus improve the cache rate. Truncation can be disabled + # entirely, which means the server will never truncate but would instead return an + # error if the conversation exceeds the model's input token limit. # # @return [Symbol, OpenAI::Models::Realtime::RealtimeTruncation::RealtimeTruncationStrategy, OpenAI::Models::Realtime::RealtimeTruncationRetentionRatio, nil] optional :truncation, union: -> { OpenAI::Realtime::RealtimeTruncation } @@ -141,7 +152,7 @@ class RealtimeSessionCreateResponse < OpenAI::Internal::Type::BaseModel # # @param tracing [Symbol, :auto, OpenAI::Models::Realtime::RealtimeSessionCreateResponse::Tracing::TracingConfiguration, nil] Realtime API can write session traces to the [Traces Dashboard](/logs?api=traces # - # @param truncation [Symbol, OpenAI::Models::Realtime::RealtimeTruncation::RealtimeTruncationStrategy, OpenAI::Models::Realtime::RealtimeTruncationRetentionRatio] Controls how the realtime conversation is truncated prior to model inference. + # @param truncation [Symbol, OpenAI::Models::Realtime::RealtimeTruncation::RealtimeTruncationStrategy, OpenAI::Models::Realtime::RealtimeTruncationRetentionRatio] When the number of tokens in a conversation exceeds the model's input token limi # # @param type [Symbol, :realtime] The type of session to create. Always `realtime` for the Realtime API. @@ -990,7 +1001,7 @@ module McpToolApprovalSetting module Tracing extend OpenAI::Internal::Type::Union - # Default tracing mode for the session. + # Enables tracing and sets default values for tracing configuration options. Always `auto`. variant const: :auto # Granular configuration for tracing. diff --git a/lib/openai/models/realtime/realtime_tracing_config.rb b/lib/openai/models/realtime/realtime_tracing_config.rb index 2b5f87ff..4102fcc7 100644 --- a/lib/openai/models/realtime/realtime_tracing_config.rb +++ b/lib/openai/models/realtime/realtime_tracing_config.rb @@ -12,7 +12,7 @@ module Realtime module RealtimeTracingConfig extend OpenAI::Internal::Type::Union - # Default tracing mode for the session. + # Enables tracing and sets default values for tracing configuration options. Always `auto`. variant const: :auto # Granular configuration for tracing. diff --git a/lib/openai/models/realtime/realtime_truncation.rb b/lib/openai/models/realtime/realtime_truncation.rb index 7220e112..cb92b40b 100644 --- a/lib/openai/models/realtime/realtime_truncation.rb +++ b/lib/openai/models/realtime/realtime_truncation.rb @@ -3,8 +3,19 @@ module OpenAI module Models module Realtime - # Controls how the realtime conversation is truncated prior to model inference. - # The default is `auto`. + # When the number of tokens in a conversation exceeds the model's input token + # limit, the conversation be truncated, meaning messages (starting from the + # oldest) will not be included in the model's context. A 32k context model with + # 4,096 max output tokens can only include 28,224 tokens in the context before + # truncation occurs. Clients can configure truncation behavior to truncate with a + # lower max token limit, which is an effective way to control token usage and + # cost. Truncation will reduce the number of cached tokens on the next turn + # (busting the cache), since messages are dropped from the beginning of the + # context. However, clients can also configure truncation to retain messages up to + # a fraction of the maximum context size, which will reduce the need for future + # truncations and thus improve the cache rate. Truncation can be disabled + # entirely, which means the server will never truncate but would instead return an + # error if the conversation exceeds the model's input token limit. module RealtimeTruncation extend OpenAI::Internal::Type::Union diff --git a/lib/openai/models/realtime/realtime_truncation_retention_ratio.rb b/lib/openai/models/realtime/realtime_truncation_retention_ratio.rb index 43e6778e..86bf5499 100644 --- a/lib/openai/models/realtime/realtime_truncation_retention_ratio.rb +++ b/lib/openai/models/realtime/realtime_truncation_retention_ratio.rb @@ -5,8 +5,10 @@ module Models module Realtime class RealtimeTruncationRetentionRatio < OpenAI::Internal::Type::BaseModel # @!attribute retention_ratio - # Fraction of post-instruction conversation tokens to retain (0.0 - 1.0) when the - # conversation exceeds the input token limit. + # Fraction of post-instruction conversation tokens to retain (`0.0` - `1.0`) when + # the conversation exceeds the input token limit. Setting this to `0.8` means that + # messages will be dropped until 80% of the maximum allowed tokens are used. This + # helps reduce the frequency of truncations and improve cache rates. # # @return [Float] required :retention_ratio, Float @@ -17,7 +19,14 @@ class RealtimeTruncationRetentionRatio < OpenAI::Internal::Type::BaseModel # @return [Symbol, :retention_ratio] required :type, const: :retention_ratio - # @!method initialize(retention_ratio:, type: :retention_ratio) + # @!attribute token_limits + # Optional custom token limits for this truncation strategy. If not provided, the + # model's default token limits will be used. + # + # @return [OpenAI::Models::Realtime::RealtimeTruncationRetentionRatio::TokenLimits, nil] + optional :token_limits, -> { OpenAI::Realtime::RealtimeTruncationRetentionRatio::TokenLimits } + + # @!method initialize(retention_ratio:, token_limits: nil, type: :retention_ratio) # Some parameter documentations has been truncated, see # {OpenAI::Models::Realtime::RealtimeTruncationRetentionRatio} for more details. # @@ -25,9 +34,34 @@ class RealtimeTruncationRetentionRatio < OpenAI::Internal::Type::BaseModel # input token limit. This allows you to amortize truncations across multiple # turns, which can help improve cached token usage. # - # @param retention_ratio [Float] Fraction of post-instruction conversation tokens to retain (0.0 - 1.0) when the + # @param retention_ratio [Float] Fraction of post-instruction conversation tokens to retain (`0.0` - `1.0`) when + # + # @param token_limits [OpenAI::Models::Realtime::RealtimeTruncationRetentionRatio::TokenLimits] Optional custom token limits for this truncation strategy. If not provided, the # # @param type [Symbol, :retention_ratio] Use retention ratio truncation. + + # @see OpenAI::Models::Realtime::RealtimeTruncationRetentionRatio#token_limits + class TokenLimits < OpenAI::Internal::Type::BaseModel + # @!attribute post_instructions + # Maximum tokens allowed in the conversation after instructions (which including + # tool definitions). For example, setting this to 5,000 would mean that truncation + # would occur when the conversation exceeds 5,000 tokens after instructions. This + # cannot be higher than the model's context window size minus the maximum output + # tokens. + # + # @return [Integer, nil] + optional :post_instructions, Integer + + # @!method initialize(post_instructions: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Realtime::RealtimeTruncationRetentionRatio::TokenLimits} for + # more details. + # + # Optional custom token limits for this truncation strategy. If not provided, the + # model's default token limits will be used. + # + # @param post_instructions [Integer] Maximum tokens allowed in the conversation after instructions (which including t + end end end end diff --git a/lib/openai/models/reasoning.rb b/lib/openai/models/reasoning.rb index 6a7b66c9..177da9ea 100644 --- a/lib/openai/models/reasoning.rb +++ b/lib/openai/models/reasoning.rb @@ -33,6 +33,8 @@ class Reasoning < OpenAI::Internal::Type::BaseModel # debugging and understanding the model's reasoning process. One of `auto`, # `concise`, or `detailed`. # + # `concise` is only supported for `computer-use-preview` models. + # # @return [Symbol, OpenAI::Models::Reasoning::Summary, nil] optional :summary, enum: -> { OpenAI::Reasoning::Summary }, nil?: true @@ -75,6 +77,8 @@ module GenerateSummary # debugging and understanding the model's reasoning process. One of `auto`, # `concise`, or `detailed`. # + # `concise` is only supported for `computer-use-preview` models. + # # @see OpenAI::Models::Reasoning#summary module Summary extend OpenAI::Internal::Type::Enum diff --git a/lib/openai/models/responses/custom_tool.rb b/lib/openai/models/responses/custom_tool.rb index 297335bc..e5a9d29c 100644 --- a/lib/openai/models/responses/custom_tool.rb +++ b/lib/openai/models/responses/custom_tool.rb @@ -29,6 +29,9 @@ class CustomTool < OpenAI::Internal::Type::BaseModel optional :format_, union: -> { OpenAI::CustomToolInputFormat }, api_name: :format # @!method initialize(name:, description: nil, format_: nil, type: :custom) + # A custom tool that processes input using a specified format. Learn more about + # [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) + # # @param name [String] The name of the custom tool, used to identify it in tool calls. # # @param description [String] Optional description of the custom tool, used to provide more context. diff --git a/lib/openai/models/responses/easy_input_message.rb b/lib/openai/models/responses/easy_input_message.rb index 06a54f56..64f37584 100644 --- a/lib/openai/models/responses/easy_input_message.rb +++ b/lib/openai/models/responses/easy_input_message.rb @@ -8,7 +8,7 @@ class EasyInputMessage < OpenAI::Internal::Type::BaseModel # Text, image, or audio input to the model, used to generate a response. Can also # contain previous assistant responses. # - # @return [String, Array] + # @return [String, Array] required :content, union: -> { OpenAI::Responses::EasyInputMessage::Content } # @!attribute role @@ -34,7 +34,7 @@ class EasyInputMessage < OpenAI::Internal::Type::BaseModel # `assistant` role are presumed to have been generated by the model in previous # interactions. # - # @param content [String, Array] Text, image, or audio input to the model, used to generate a response. + # @param content [String, Array] Text, image, or audio input to the model, used to generate a response. # # @param role [Symbol, OpenAI::Models::Responses::EasyInputMessage::Role] The role of the message input. One of `user`, `assistant`, `system`, or # @@ -55,7 +55,7 @@ module Content variant -> { OpenAI::Responses::ResponseInputMessageContentList } # @!method self.variants - # @return [Array(String, Array)] + # @return [Array(String, Array)] end # The role of the message input. One of `user`, `assistant`, `system`, or diff --git a/lib/openai/models/responses/file_search_tool.rb b/lib/openai/models/responses/file_search_tool.rb index aead0521..6ac36036 100644 --- a/lib/openai/models/responses/file_search_tool.rb +++ b/lib/openai/models/responses/file_search_tool.rb @@ -71,6 +71,13 @@ module Filters # @see OpenAI::Models::Responses::FileSearchTool#ranking_options class RankingOptions < OpenAI::Internal::Type::BaseModel + # @!attribute hybrid_search + # Weights that control how reciprocal rank fusion balances semantic embedding + # matches versus sparse keyword matches when hybrid search is enabled. + # + # @return [OpenAI::Models::Responses::FileSearchTool::RankingOptions::HybridSearch, nil] + optional :hybrid_search, -> { OpenAI::Responses::FileSearchTool::RankingOptions::HybridSearch } + # @!attribute ranker # The ranker to use for the file search. # @@ -85,16 +92,41 @@ class RankingOptions < OpenAI::Internal::Type::BaseModel # @return [Float, nil] optional :score_threshold, Float - # @!method initialize(ranker: nil, score_threshold: nil) + # @!method initialize(hybrid_search: nil, ranker: nil, score_threshold: nil) # Some parameter documentations has been truncated, see # {OpenAI::Models::Responses::FileSearchTool::RankingOptions} for more details. # # Ranking options for search. # + # @param hybrid_search [OpenAI::Models::Responses::FileSearchTool::RankingOptions::HybridSearch] Weights that control how reciprocal rank fusion balances semantic embedding matc + # # @param ranker [Symbol, OpenAI::Models::Responses::FileSearchTool::RankingOptions::Ranker] The ranker to use for the file search. # # @param score_threshold [Float] The score threshold for the file search, a number between 0 and 1. Numbers close + # @see OpenAI::Models::Responses::FileSearchTool::RankingOptions#hybrid_search + class HybridSearch < OpenAI::Internal::Type::BaseModel + # @!attribute embedding_weight + # The weight of the embedding in the reciprocal ranking fusion. + # + # @return [Float] + required :embedding_weight, Float + + # @!attribute text_weight + # The weight of the text in the reciprocal ranking fusion. + # + # @return [Float] + required :text_weight, Float + + # @!method initialize(embedding_weight:, text_weight:) + # Weights that control how reciprocal rank fusion balances semantic embedding + # matches versus sparse keyword matches when hybrid search is enabled. + # + # @param embedding_weight [Float] The weight of the embedding in the reciprocal ranking fusion. + # + # @param text_weight [Float] The weight of the text in the reciprocal ranking fusion. + end + # The ranker to use for the file search. # # @see OpenAI::Models::Responses::FileSearchTool::RankingOptions#ranker diff --git a/lib/openai/models/responses/response_content.rb b/lib/openai/models/responses/response_content.rb index b07ae3a7..89e2500f 100644 --- a/lib/openai/models/responses/response_content.rb +++ b/lib/openai/models/responses/response_content.rb @@ -16,9 +16,6 @@ module ResponseContent # A file input to the model. variant -> { OpenAI::Responses::ResponseInputFile } - # An audio input to the model. - variant -> { OpenAI::Responses::ResponseInputAudio } - # A text output from the model. variant -> { OpenAI::Responses::ResponseOutputText } @@ -50,7 +47,7 @@ class ReasoningTextContent < OpenAI::Internal::Type::BaseModel end # @!method self.variants - # @return [Array(OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile, OpenAI::Models::Responses::ResponseInputAudio, OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal, OpenAI::Models::Responses::ResponseContent::ReasoningTextContent)] + # @return [Array(OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile, OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal, OpenAI::Models::Responses::ResponseContent::ReasoningTextContent)] end end end diff --git a/lib/openai/models/responses/response_input_content.rb b/lib/openai/models/responses/response_input_content.rb index b0ccf419..59c6970e 100644 --- a/lib/openai/models/responses/response_input_content.rb +++ b/lib/openai/models/responses/response_input_content.rb @@ -18,11 +18,8 @@ module ResponseInputContent # A file input to the model. variant :input_file, -> { OpenAI::Responses::ResponseInputFile } - # An audio input to the model. - variant :input_audio, -> { OpenAI::Responses::ResponseInputAudio } - # @!method self.variants - # @return [Array(OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile, OpenAI::Models::Responses::ResponseInputAudio)] + # @return [Array(OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile)] end end end diff --git a/lib/openai/models/responses/response_input_item.rb b/lib/openai/models/responses/response_input_item.rb index af672d03..0ff9bfc7 100644 --- a/lib/openai/models/responses/response_input_item.rb +++ b/lib/openai/models/responses/response_input_item.rb @@ -94,7 +94,7 @@ class Message < OpenAI::Internal::Type::BaseModel # A list of one or many input items to the model, containing different content # types. # - # @return [Array] + # @return [Array] required :content, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseInputContent] } # @!attribute role @@ -124,7 +124,7 @@ class Message < OpenAI::Internal::Type::BaseModel # hierarchy. Instructions given with the `developer` or `system` role take # precedence over instructions given with the `user` role. # - # @param content [Array] A list of one or many input items to the model, containing different content + # @param content [Array] A list of one or many input items to the model, containing different content # # @param role [Symbol, OpenAI::Models::Responses::ResponseInputItem::Message::Role] The role of the message input. One of `user`, `system`, or `developer`. # diff --git a/lib/openai/models/responses/response_input_message_item.rb b/lib/openai/models/responses/response_input_message_item.rb index 5998522e..48236782 100644 --- a/lib/openai/models/responses/response_input_message_item.rb +++ b/lib/openai/models/responses/response_input_message_item.rb @@ -14,7 +14,7 @@ class ResponseInputMessageItem < OpenAI::Internal::Type::BaseModel # A list of one or many input items to the model, containing different content # types. # - # @return [Array] + # @return [Array] required :content, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseInputContent] } # @!attribute role @@ -42,7 +42,7 @@ class ResponseInputMessageItem < OpenAI::Internal::Type::BaseModel # # @param id [String] The unique ID of the message input. # - # @param content [Array] A list of one or many input items to the model, containing different content + # @param content [Array] A list of one or many input items to the model, containing different content # # @param role [Symbol, OpenAI::Models::Responses::ResponseInputMessageItem::Role] The role of the message input. One of `user`, `system`, or `developer`. # diff --git a/lib/openai/models/responses/response_output_text.rb b/lib/openai/models/responses/response_output_text.rb index aec78ec6..acd7df97 100644 --- a/lib/openai/models/responses/response_output_text.rb +++ b/lib/openai/models/responses/response_output_text.rb @@ -11,6 +11,11 @@ class ResponseOutputText < OpenAI::Internal::Type::BaseModel required :annotations, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseOutputText::Annotation] } + # @!attribute logprobs + # + # @return [Array] + required :logprobs, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseOutputText::Logprob] } + # @!attribute text # The text output from the model. # @@ -31,20 +36,15 @@ class ResponseOutputText < OpenAI::Internal::Type::BaseModel # @return [Symbol, :output_text] required :type, const: :output_text - # @!attribute logprobs - # - # @return [Array, nil] - optional :logprobs, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseOutputText::Logprob] } - - # @!method initialize(annotations:, text:, logprobs: nil, type: :output_text) + # @!method initialize(annotations:, logprobs:, text:, type: :output_text) # A text output from the model. # # @param annotations [Array] The annotations of the text output. # - # @param text [String] The text output from the model. - # # @param logprobs [Array] # + # @param text [String] The text output from the model. + # # @param type [Symbol, :output_text] The type of the output text. Always `output_text`. # A citation to a file. diff --git a/lib/openai/models/responses/tool.rb b/lib/openai/models/responses/tool.rb index b1247db0..97f3c7b1 100644 --- a/lib/openai/models/responses/tool.rb +++ b/lib/openai/models/responses/tool.rb @@ -30,8 +30,10 @@ module Tool # A tool that generates images using a model like `gpt-image-1`. variant :image_generation, -> { OpenAI::Responses::Tool::ImageGeneration } + # A tool that allows the model to execute shell commands in a local environment. variant :local_shell, -> { OpenAI::Responses::Tool::LocalShell } + # A custom tool that processes input using a specified format. Learn more about [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) variant :custom, -> { OpenAI::Responses::CustomTool } # Search the Internet for sources related to the prompt. Learn more about the @@ -388,13 +390,37 @@ class CodeInterpreterToolAuto < OpenAI::Internal::Type::BaseModel # @return [Array, nil] optional :file_ids, OpenAI::Internal::Type::ArrayOf[String] - # @!method initialize(file_ids: nil, type: :auto) + # @!attribute memory_limit + # + # @return [Symbol, OpenAI::Models::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto::MemoryLimit, nil] + optional :memory_limit, + enum: -> { + OpenAI::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto::MemoryLimit + }, + nil?: true + + # @!method initialize(file_ids: nil, memory_limit: nil, type: :auto) # Configuration for a code interpreter container. Optionally specify the IDs of # the files to run the code on. # # @param file_ids [Array] An optional list of uploaded files to make available to your code. # + # @param memory_limit [Symbol, OpenAI::Models::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto::MemoryLimit, nil] + # # @param type [Symbol, :auto] Always `auto`. + + # @see OpenAI::Models::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto#memory_limit + module MemoryLimit + extend OpenAI::Internal::Type::Enum + + MEMORY_LIMIT_1G = :"1g" + MEMORY_LIMIT_4G = :"4g" + MEMORY_LIMIT_16G = :"16g" + MEMORY_LIMIT_64G = :"64g" + + # @!method self.values + # @return [Array] + end end # @!method self.variants @@ -490,7 +516,7 @@ class ImageGeneration < OpenAI::Internal::Type::BaseModel # # @param background [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Background] Background type for the generated image. One of `transparent`, # - # @param input_fidelity [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::InputFidelity, nil] + # @param input_fidelity [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::InputFidelity, nil] Control how much effort the model will exert to match the style and features, es # # @param input_image_mask [OpenAI::Models::Responses::Tool::ImageGeneration::InputImageMask] Optional mask for inpainting. Contains `image_url` # @@ -650,6 +676,8 @@ class LocalShell < OpenAI::Internal::Type::BaseModel required :type, const: :local_shell # @!method initialize(type: :local_shell) + # A tool that allows the model to execute shell commands in a local environment. + # # @param type [Symbol, :local_shell] The type of the local shell tool. Always `local_shell`. end diff --git a/lib/openai/models/vector_stores/file_batch_create_params.rb b/lib/openai/models/vector_stores/file_batch_create_params.rb index 0815e0f1..d1cf1ad2 100644 --- a/lib/openai/models/vector_stores/file_batch_create_params.rb +++ b/lib/openai/models/vector_stores/file_batch_create_params.rb @@ -8,14 +8,6 @@ class FileBatchCreateParams < OpenAI::Internal::Type::BaseModel extend OpenAI::Internal::Type::RequestParameters::Converter include OpenAI::Internal::Type::RequestParameters - # @!attribute file_ids - # A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that - # the vector store should use. Useful for tools like `file_search` that can access - # files. - # - # @return [Array] - required :file_ids, OpenAI::Internal::Type::ArrayOf[String] - # @!attribute attributes # Set of 16 key-value pairs that can be attached to an object. This can be useful # for storing additional information about the object in a structured format, and @@ -37,16 +29,36 @@ class FileBatchCreateParams < OpenAI::Internal::Type::BaseModel # @return [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam, nil] optional :chunking_strategy, union: -> { OpenAI::FileChunkingStrategyParam } - # @!method initialize(file_ids:, attributes: nil, chunking_strategy: nil, request_options: {}) + # @!attribute file_ids + # A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + # the vector store should use. Useful for tools like `file_search` that can access + # files. If `attributes` or `chunking_strategy` are provided, they will be applied + # to all files in the batch. Mutually exclusive with `files`. + # + # @return [Array, nil] + optional :file_ids, OpenAI::Internal::Type::ArrayOf[String] + + # @!attribute files + # A list of objects that each include a `file_id` plus optional `attributes` or + # `chunking_strategy`. Use this when you need to override metadata for specific + # files. The global `attributes` or `chunking_strategy` will be ignored and must + # be specified for each file. Mutually exclusive with `file_ids`. + # + # @return [Array, nil] + optional :files, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::VectorStores::FileBatchCreateParams::File] } + + # @!method initialize(attributes: nil, chunking_strategy: nil, file_ids: nil, files: nil, request_options: {}) # Some parameter documentations has been truncated, see # {OpenAI::Models::VectorStores::FileBatchCreateParams} for more details. # - # @param file_ids [Array] A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that - # # @param attributes [Hash{Symbol=>String, Float, Boolean}, nil] Set of 16 key-value pairs that can be attached to an object. This can be # # @param chunking_strategy [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam] The chunking strategy used to chunk the file(s). If not set, will use the `auto` # + # @param file_ids [Array] A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + # + # @param files [Array] A list of objects that each include a `file_id` plus optional `attributes` or `c + # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] module Attribute @@ -61,6 +73,60 @@ module Attribute # @!method self.variants # @return [Array(String, Float, Boolean)] end + + class File < OpenAI::Internal::Type::BaseModel + # @!attribute file_id + # A [File](https://platform.openai.com/docs/api-reference/files) ID that the + # vector store should use. Useful for tools like `file_search` that can access + # files. + # + # @return [String] + required :file_id, String + + # @!attribute attributes + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. + # + # @return [Hash{Symbol=>String, Float, Boolean}, nil] + optional :attributes, + -> { + OpenAI::Internal::Type::HashOf[union: OpenAI::VectorStores::FileBatchCreateParams::File::Attribute] + }, + nil?: true + + # @!attribute chunking_strategy + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. Only applicable if `file_ids` is non-empty. + # + # @return [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam, nil] + optional :chunking_strategy, union: -> { OpenAI::FileChunkingStrategyParam } + + # @!method initialize(file_id:, attributes: nil, chunking_strategy: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::VectorStores::FileBatchCreateParams::File} for more details. + # + # @param file_id [String] A [File](https://platform.openai.com/docs/api-reference/files) ID that the vecto + # + # @param attributes [Hash{Symbol=>String, Float, Boolean}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param chunking_strategy [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam] The chunking strategy used to chunk the file(s). If not set, will use the `auto` + + module Attribute + extend OpenAI::Internal::Type::Union + + variant String + + variant Float + + variant OpenAI::Internal::Type::Boolean + + # @!method self.variants + # @return [Array(String, Float, Boolean)] + end + end end end end diff --git a/lib/openai/models/video.rb b/lib/openai/models/video.rb index 6537b78b..e07da5ca 100644 --- a/lib/openai/models/video.rb +++ b/lib/openai/models/video.rb @@ -52,6 +52,12 @@ class Video < OpenAI::Internal::Type::BaseModel # @return [Integer] required :progress, Integer + # @!attribute prompt + # The prompt that was used to generate the video. + # + # @return [String, nil] + required :prompt, String, nil?: true + # @!attribute remixed_from_video_id # Identifier of the source video if this video is a remix. # @@ -76,7 +82,7 @@ class Video < OpenAI::Internal::Type::BaseModel # @return [Symbol, OpenAI::Models::Video::Status] required :status, enum: -> { OpenAI::Video::Status } - # @!method initialize(id:, completed_at:, created_at:, error:, expires_at:, model:, progress:, remixed_from_video_id:, seconds:, size:, status:, object: :video) + # @!method initialize(id:, completed_at:, created_at:, error:, expires_at:, model:, progress:, prompt:, remixed_from_video_id:, seconds:, size:, status:, object: :video) # Structured information describing a generated video job. # # @param id [String] Unique identifier for the video job. @@ -93,6 +99,8 @@ class Video < OpenAI::Internal::Type::BaseModel # # @param progress [Integer] Approximate completion percentage for the generation task. # + # @param prompt [String, nil] The prompt that was used to generate the video. + # # @param remixed_from_video_id [String, nil] Identifier of the source video if this video is a remix. # # @param seconds [Symbol, OpenAI::Models::VideoSeconds] Duration of the generated clip in seconds. diff --git a/lib/openai/resources/files.rb b/lib/openai/resources/files.rb index 39a6207c..4a83d55b 100644 --- a/lib/openai/resources/files.rb +++ b/lib/openai/resources/files.rb @@ -10,20 +10,19 @@ class Files # up to 512 MB, and the size of all files uploaded by one organization can be up # to 1 TB. # - # The Assistants API supports files up to 2 million tokens and of specific file - # types. See the - # [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for - # details. - # - # The Fine-tuning API only supports `.jsonl` files. The input also has certain - # required formats for fine-tuning - # [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or - # [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) - # models. - # - # The Batch API only supports `.jsonl` files up to 200 MB in size. The input also - # has a specific required - # [format](https://platform.openai.com/docs/api-reference/batch/request-input). + # - The Assistants API supports files up to 2 million tokens and of specific file + # types. See the + # [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) + # for details. + # - The Fine-tuning API only supports `.jsonl` files. The input also has certain + # required formats for fine-tuning + # [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) + # or + # [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + # models. + # - The Batch API only supports `.jsonl` files up to 200 MB in size. The input + # also has a specific required + # [format](https://platform.openai.com/docs/api-reference/batch/request-input). # # Please [contact us](https://help.openai.com/) if you need to increase these # storage limits. diff --git a/lib/openai/resources/images.rb b/lib/openai/resources/images.rb index b1c0164c..ae463f07 100644 --- a/lib/openai/resources/images.rb +++ b/lib/openai/resources/images.rb @@ -55,7 +55,7 @@ def create_variation(params) # # @param background [Symbol, OpenAI::Models::ImageEditParams::Background, nil] Allows to set transparency for the background of the generated image(s). # - # @param input_fidelity [Symbol, OpenAI::Models::ImageEditParams::InputFidelity, nil] + # @param input_fidelity [Symbol, OpenAI::Models::ImageEditParams::InputFidelity, nil] Control how much effort the model will exert to match the style and features, es # # @param mask [Pathname, StringIO, IO, String, OpenAI::FilePart] An additional image whose fully transparent areas (e.g. where alpha is zero) ind # @@ -114,7 +114,7 @@ def edit(params) # # @param background [Symbol, OpenAI::Models::ImageEditParams::Background, nil] Allows to set transparency for the background of the generated image(s). # - # @param input_fidelity [Symbol, OpenAI::Models::ImageEditParams::InputFidelity, nil] + # @param input_fidelity [Symbol, OpenAI::Models::ImageEditParams::InputFidelity, nil] Control how much effort the model will exert to match the style and features, es # # @param mask [Pathname, StringIO, IO, String, OpenAI::FilePart] An additional image whose fully transparent areas (e.g. where alpha is zero) ind # diff --git a/lib/openai/resources/realtime/calls.rb b/lib/openai/resources/realtime/calls.rb index b25dc4d7..138d5029 100644 --- a/lib/openai/resources/realtime/calls.rb +++ b/lib/openai/resources/realtime/calls.rb @@ -34,7 +34,7 @@ class Calls # # @param tracing [Symbol, :auto, OpenAI::Models::Realtime::RealtimeTracingConfig::TracingConfiguration, nil] Realtime API can write session traces to the [Traces Dashboard](/logs?api=traces # - # @param truncation [Symbol, OpenAI::Models::Realtime::RealtimeTruncation::RealtimeTruncationStrategy, OpenAI::Models::Realtime::RealtimeTruncationRetentionRatio] Controls how the realtime conversation is truncated prior to model inference. + # @param truncation [Symbol, OpenAI::Models::Realtime::RealtimeTruncation::RealtimeTruncationStrategy, OpenAI::Models::Realtime::RealtimeTruncationRetentionRatio] When the number of tokens in a conversation exceeds the model's input token limi # # @param type [Symbol, :realtime] The type of session to create. Always `realtime` for the Realtime API. # diff --git a/lib/openai/resources/responses.rb b/lib/openai/resources/responses.rb index 0b4cf3a1..9172f36e 100644 --- a/lib/openai/resources/responses.rb +++ b/lib/openai/resources/responses.rb @@ -559,6 +559,10 @@ def get_structured_output_models(parsed) in {text: {format: {type: :json_schema, schema: OpenAI::StructuredOutput::JsonSchemaConverter => model}}} parsed.dig(:text, :format).store(:schema, model.to_json_schema) + else + end + + case parsed in {tools: Array => tools} # rubocop:disable Metrics/BlockLength mapped = tools.map do |tool| diff --git a/lib/openai/resources/vector_stores/file_batches.rb b/lib/openai/resources/vector_stores/file_batches.rb index 99d3e7df..7880c9ec 100644 --- a/lib/openai/resources/vector_stores/file_batches.rb +++ b/lib/openai/resources/vector_stores/file_batches.rb @@ -9,22 +9,24 @@ class FileBatches # # Create a vector store file batch. # - # @overload create(vector_store_id, file_ids:, attributes: nil, chunking_strategy: nil, request_options: {}) + # @overload create(vector_store_id, attributes: nil, chunking_strategy: nil, file_ids: nil, files: nil, request_options: {}) # # @param vector_store_id [String] The ID of the vector store for which to create a File Batch. # - # @param file_ids [Array] A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that - # # @param attributes [Hash{Symbol=>String, Float, Boolean}, nil] Set of 16 key-value pairs that can be attached to an object. This can be # # @param chunking_strategy [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam] The chunking strategy used to chunk the file(s). If not set, will use the `auto` # + # @param file_ids [Array] A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + # + # @param files [Array] A list of objects that each include a `file_id` plus optional `attributes` or `c + # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::VectorStores::VectorStoreFileBatch] # # @see OpenAI::Models::VectorStores::FileBatchCreateParams - def create(vector_store_id, params) + def create(vector_store_id, params = {}) parsed, options = OpenAI::VectorStores::FileBatchCreateParams.dump_request(params) @client.request( method: :post, diff --git a/lib/openai/version.rb b/lib/openai/version.rb index 348060c1..91426220 100644 --- a/lib/openai/version.rb +++ b/lib/openai/version.rb @@ -1,5 +1,5 @@ # frozen_string_literal: true module OpenAI - VERSION = "0.34.1" + VERSION = "0.35.0" end diff --git a/openai.gemspec b/openai.gemspec index cc0a7426..b2512036 100644 --- a/openai.gemspec +++ b/openai.gemspec @@ -13,6 +13,7 @@ Gem::Specification.new do |s| s.metadata["source_code_uri"] = "https://github.com/openai/openai-ruby" s.metadata["rubygems_mfa_required"] = false.to_s s.required_ruby_version = ">= 3.2.0" + s.license = "Apache-2.0" s.files = Dir[ "lib/**/*.rb", diff --git a/rbi/openai/internal/transport/base_client.rbi b/rbi/openai/internal/transport/base_client.rbi index a15bc545..879f4b9f 100644 --- a/rbi/openai/internal/transport/base_client.rbi +++ b/rbi/openai/internal/transport/base_client.rbi @@ -184,6 +184,11 @@ module OpenAI private def auth_headers end + # @api private + sig { returns(String) } + private def user_agent + end + # @api private sig { returns(String) } private def generate_idempotency_key diff --git a/rbi/openai/internal/type/base_model.rbi b/rbi/openai/internal/type/base_model.rbi index df16ad79..21cb6dfb 100644 --- a/rbi/openai/internal/type/base_model.rbi +++ b/rbi/openai/internal/type/base_model.rbi @@ -28,7 +28,7 @@ module OpenAI # # Assumes superclass fields are totally defined before fields are accessed / # defined on subclasses. - sig { params(child: T.self_type).void } + sig { params(child: OpenAI::Internal::Type::BaseModel).void } def inherited(child) end @@ -267,9 +267,13 @@ module OpenAI # Create a new instance of a model. sig do - params(data: T.any(T::Hash[Symbol, T.anything], T.self_type)).returns( - T.attached_class - ) + params( + data: + T.any( + T::Hash[Symbol, T.anything], + OpenAI::Internal::Type::BaseModel + ) + ).returns(T.attached_class) end def self.new(data = {}) end diff --git a/rbi/openai/models/custom_tool_input_format.rbi b/rbi/openai/models/custom_tool_input_format.rbi index 079defcf..d2f57068 100644 --- a/rbi/openai/models/custom_tool_input_format.rbi +++ b/rbi/openai/models/custom_tool_input_format.rbi @@ -27,6 +27,7 @@ module OpenAI sig { returns(Symbol) } attr_accessor :type + # Unconstrained free-form text. sig { params(type: Symbol).returns(T.attached_class) } def self.new( # Unconstrained text format. Always `text`. @@ -62,6 +63,7 @@ module OpenAI sig { returns(Symbol) } attr_accessor :type + # A grammar defined by the user. sig do params( definition: String, diff --git a/rbi/openai/models/realtime/realtime_session_create_request.rbi b/rbi/openai/models/realtime/realtime_session_create_request.rbi index 741c73d8..d7ac4fca 100644 --- a/rbi/openai/models/realtime/realtime_session_create_request.rbi +++ b/rbi/openai/models/realtime/realtime_session_create_request.rbi @@ -210,8 +210,19 @@ module OpenAI end attr_accessor :tracing - # Controls how the realtime conversation is truncated prior to model inference. - # The default is `auto`. + # When the number of tokens in a conversation exceeds the model's input token + # limit, the conversation be truncated, meaning messages (starting from the + # oldest) will not be included in the model's context. A 32k context model with + # 4,096 max output tokens can only include 28,224 tokens in the context before + # truncation occurs. Clients can configure truncation behavior to truncate with a + # lower max token limit, which is an effective way to control token usage and + # cost. Truncation will reduce the number of cached tokens on the next turn + # (busting the cache), since messages are dropped from the beginning of the + # context. However, clients can also configure truncation to retain messages up to + # a fraction of the maximum context size, which will reduce the need for future + # truncations and thus improve the cache rate. Truncation can be disabled + # entirely, which means the server will never truncate but would instead return an + # error if the conversation exceeds the model's input token limit. sig do returns( T.nilable( @@ -329,8 +340,19 @@ module OpenAI # `auto` will create a trace for the session with default values for the workflow # name, group id, and metadata. tracing: nil, - # Controls how the realtime conversation is truncated prior to model inference. - # The default is `auto`. + # When the number of tokens in a conversation exceeds the model's input token + # limit, the conversation be truncated, meaning messages (starting from the + # oldest) will not be included in the model's context. A 32k context model with + # 4,096 max output tokens can only include 28,224 tokens in the context before + # truncation occurs. Clients can configure truncation behavior to truncate with a + # lower max token limit, which is an effective way to control token usage and + # cost. Truncation will reduce the number of cached tokens on the next turn + # (busting the cache), since messages are dropped from the beginning of the + # context. However, clients can also configure truncation to retain messages up to + # a fraction of the maximum context size, which will reduce the need for future + # truncations and thus improve the cache rate. Truncation can be disabled + # entirely, which means the server will never truncate but would instead return an + # error if the conversation exceeds the model's input token limit. truncation: nil, # The type of session to create. Always `realtime` for the Realtime API. type: :realtime diff --git a/rbi/openai/models/realtime/realtime_session_create_response.rbi b/rbi/openai/models/realtime/realtime_session_create_response.rbi index 73f36107..896af7c2 100644 --- a/rbi/openai/models/realtime/realtime_session_create_response.rbi +++ b/rbi/openai/models/realtime/realtime_session_create_response.rbi @@ -223,8 +223,19 @@ module OpenAI end attr_accessor :tracing - # Controls how the realtime conversation is truncated prior to model inference. - # The default is `auto`. + # When the number of tokens in a conversation exceeds the model's input token + # limit, the conversation be truncated, meaning messages (starting from the + # oldest) will not be included in the model's context. A 32k context model with + # 4,096 max output tokens can only include 28,224 tokens in the context before + # truncation occurs. Clients can configure truncation behavior to truncate with a + # lower max token limit, which is an effective way to control token usage and + # cost. Truncation will reduce the number of cached tokens on the next turn + # (busting the cache), since messages are dropped from the beginning of the + # context. However, clients can also configure truncation to retain messages up to + # a fraction of the maximum context size, which will reduce the need for future + # truncations and thus improve the cache rate. Truncation can be disabled + # entirely, which means the server will never truncate but would instead return an + # error if the conversation exceeds the model's input token limit. sig do returns(T.nilable(OpenAI::Realtime::RealtimeTruncation::Variants)) end @@ -341,8 +352,19 @@ module OpenAI # `auto` will create a trace for the session with default values for the workflow # name, group id, and metadata. tracing: nil, - # Controls how the realtime conversation is truncated prior to model inference. - # The default is `auto`. + # When the number of tokens in a conversation exceeds the model's input token + # limit, the conversation be truncated, meaning messages (starting from the + # oldest) will not be included in the model's context. A 32k context model with + # 4,096 max output tokens can only include 28,224 tokens in the context before + # truncation occurs. Clients can configure truncation behavior to truncate with a + # lower max token limit, which is an effective way to control token usage and + # cost. Truncation will reduce the number of cached tokens on the next turn + # (busting the cache), since messages are dropped from the beginning of the + # context. However, clients can also configure truncation to retain messages up to + # a fraction of the maximum context size, which will reduce the need for future + # truncations and thus improve the cache rate. Truncation can be disabled + # entirely, which means the server will never truncate but would instead return an + # error if the conversation exceeds the model's input token limit. truncation: nil, # The type of session to create. Always `realtime` for the Realtime API. type: :realtime diff --git a/rbi/openai/models/realtime/realtime_truncation.rbi b/rbi/openai/models/realtime/realtime_truncation.rbi index a86ecf53..901cc027 100644 --- a/rbi/openai/models/realtime/realtime_truncation.rbi +++ b/rbi/openai/models/realtime/realtime_truncation.rbi @@ -3,8 +3,19 @@ module OpenAI module Models module Realtime - # Controls how the realtime conversation is truncated prior to model inference. - # The default is `auto`. + # When the number of tokens in a conversation exceeds the model's input token + # limit, the conversation be truncated, meaning messages (starting from the + # oldest) will not be included in the model's context. A 32k context model with + # 4,096 max output tokens can only include 28,224 tokens in the context before + # truncation occurs. Clients can configure truncation behavior to truncate with a + # lower max token limit, which is an effective way to control token usage and + # cost. Truncation will reduce the number of cached tokens on the next turn + # (busting the cache), since messages are dropped from the beginning of the + # context. However, clients can also configure truncation to retain messages up to + # a fraction of the maximum context size, which will reduce the need for future + # truncations and thus improve the cache rate. Truncation can be disabled + # entirely, which means the server will never truncate but would instead return an + # error if the conversation exceeds the model's input token limit. module RealtimeTruncation extend OpenAI::Internal::Type::Union diff --git a/rbi/openai/models/realtime/realtime_truncation_retention_ratio.rbi b/rbi/openai/models/realtime/realtime_truncation_retention_ratio.rbi index d7929b09..c3f33cca 100644 --- a/rbi/openai/models/realtime/realtime_truncation_retention_ratio.rbi +++ b/rbi/openai/models/realtime/realtime_truncation_retention_ratio.rbi @@ -12,8 +12,10 @@ module OpenAI ) end - # Fraction of post-instruction conversation tokens to retain (0.0 - 1.0) when the - # conversation exceeds the input token limit. + # Fraction of post-instruction conversation tokens to retain (`0.0` - `1.0`) when + # the conversation exceeds the input token limit. Setting this to `0.8` means that + # messages will be dropped until 80% of the maximum allowed tokens are used. This + # helps reduce the frequency of truncations and improve cache rates. sig { returns(Float) } attr_accessor :retention_ratio @@ -21,24 +23,100 @@ module OpenAI sig { returns(Symbol) } attr_accessor :type + # Optional custom token limits for this truncation strategy. If not provided, the + # model's default token limits will be used. + sig do + returns( + T.nilable( + OpenAI::Realtime::RealtimeTruncationRetentionRatio::TokenLimits + ) + ) + end + attr_reader :token_limits + + sig do + params( + token_limits: + OpenAI::Realtime::RealtimeTruncationRetentionRatio::TokenLimits::OrHash + ).void + end + attr_writer :token_limits + # Retain a fraction of the conversation tokens when the conversation exceeds the # input token limit. This allows you to amortize truncations across multiple # turns, which can help improve cached token usage. sig do - params(retention_ratio: Float, type: Symbol).returns(T.attached_class) + params( + retention_ratio: Float, + token_limits: + OpenAI::Realtime::RealtimeTruncationRetentionRatio::TokenLimits::OrHash, + type: Symbol + ).returns(T.attached_class) end def self.new( - # Fraction of post-instruction conversation tokens to retain (0.0 - 1.0) when the - # conversation exceeds the input token limit. + # Fraction of post-instruction conversation tokens to retain (`0.0` - `1.0`) when + # the conversation exceeds the input token limit. Setting this to `0.8` means that + # messages will be dropped until 80% of the maximum allowed tokens are used. This + # helps reduce the frequency of truncations and improve cache rates. retention_ratio:, + # Optional custom token limits for this truncation strategy. If not provided, the + # model's default token limits will be used. + token_limits: nil, # Use retention ratio truncation. type: :retention_ratio ) end - sig { override.returns({ retention_ratio: Float, type: Symbol }) } + sig do + override.returns( + { + retention_ratio: Float, + type: Symbol, + token_limits: + OpenAI::Realtime::RealtimeTruncationRetentionRatio::TokenLimits + } + ) + end def to_hash end + + class TokenLimits < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Realtime::RealtimeTruncationRetentionRatio::TokenLimits, + OpenAI::Internal::AnyHash + ) + end + + # Maximum tokens allowed in the conversation after instructions (which including + # tool definitions). For example, setting this to 5,000 would mean that truncation + # would occur when the conversation exceeds 5,000 tokens after instructions. This + # cannot be higher than the model's context window size minus the maximum output + # tokens. + sig { returns(T.nilable(Integer)) } + attr_reader :post_instructions + + sig { params(post_instructions: Integer).void } + attr_writer :post_instructions + + # Optional custom token limits for this truncation strategy. If not provided, the + # model's default token limits will be used. + sig { params(post_instructions: Integer).returns(T.attached_class) } + def self.new( + # Maximum tokens allowed in the conversation after instructions (which including + # tool definitions). For example, setting this to 5,000 would mean that truncation + # would occur when the conversation exceeds 5,000 tokens after instructions. This + # cannot be higher than the model's context window size minus the maximum output + # tokens. + post_instructions: nil + ) + end + + sig { override.returns({ post_instructions: Integer }) } + def to_hash + end + end end end end diff --git a/rbi/openai/models/reasoning.rbi b/rbi/openai/models/reasoning.rbi index 530a4b32..7d1e6b53 100644 --- a/rbi/openai/models/reasoning.rbi +++ b/rbi/openai/models/reasoning.rbi @@ -28,6 +28,8 @@ module OpenAI # A summary of the reasoning performed by the model. This can be useful for # debugging and understanding the model's reasoning process. One of `auto`, # `concise`, or `detailed`. + # + # `concise` is only supported for `computer-use-preview` models. sig { returns(T.nilable(OpenAI::Reasoning::Summary::OrSymbol)) } attr_accessor :summary @@ -62,6 +64,8 @@ module OpenAI # A summary of the reasoning performed by the model. This can be useful for # debugging and understanding the model's reasoning process. One of `auto`, # `concise`, or `detailed`. + # + # `concise` is only supported for `computer-use-preview` models. summary: nil ) end @@ -109,6 +113,8 @@ module OpenAI # A summary of the reasoning performed by the model. This can be useful for # debugging and understanding the model's reasoning process. One of `auto`, # `concise`, or `detailed`. + # + # `concise` is only supported for `computer-use-preview` models. module Summary extend OpenAI::Internal::Type::Enum diff --git a/rbi/openai/models/responses/custom_tool.rbi b/rbi/openai/models/responses/custom_tool.rbi index 598ee419..1085f508 100644 --- a/rbi/openai/models/responses/custom_tool.rbi +++ b/rbi/openai/models/responses/custom_tool.rbi @@ -48,6 +48,8 @@ module OpenAI end attr_writer :format_ + # A custom tool that processes input using a specified format. Learn more about + # [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) sig do params( name: String, diff --git a/rbi/openai/models/responses/file_search_tool.rbi b/rbi/openai/models/responses/file_search_tool.rbi index 29ff2004..d8ab94c2 100644 --- a/rbi/openai/models/responses/file_search_tool.rbi +++ b/rbi/openai/models/responses/file_search_tool.rbi @@ -125,6 +125,25 @@ module OpenAI ) end + # Weights that control how reciprocal rank fusion balances semantic embedding + # matches versus sparse keyword matches when hybrid search is enabled. + sig do + returns( + T.nilable( + OpenAI::Responses::FileSearchTool::RankingOptions::HybridSearch + ) + ) + end + attr_reader :hybrid_search + + sig do + params( + hybrid_search: + OpenAI::Responses::FileSearchTool::RankingOptions::HybridSearch::OrHash + ).void + end + attr_writer :hybrid_search + # The ranker to use for the file search. sig do returns( @@ -155,12 +174,17 @@ module OpenAI # Ranking options for search. sig do params( + hybrid_search: + OpenAI::Responses::FileSearchTool::RankingOptions::HybridSearch::OrHash, ranker: OpenAI::Responses::FileSearchTool::RankingOptions::Ranker::OrSymbol, score_threshold: Float ).returns(T.attached_class) end def self.new( + # Weights that control how reciprocal rank fusion balances semantic embedding + # matches versus sparse keyword matches when hybrid search is enabled. + hybrid_search: nil, # The ranker to use for the file search. ranker: nil, # The score threshold for the file search, a number between 0 and 1. Numbers @@ -173,6 +197,8 @@ module OpenAI sig do override.returns( { + hybrid_search: + OpenAI::Responses::FileSearchTool::RankingOptions::HybridSearch, ranker: OpenAI::Responses::FileSearchTool::RankingOptions::Ranker::OrSymbol, score_threshold: Float @@ -182,6 +208,45 @@ module OpenAI def to_hash end + class HybridSearch < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::FileSearchTool::RankingOptions::HybridSearch, + OpenAI::Internal::AnyHash + ) + end + + # The weight of the embedding in the reciprocal ranking fusion. + sig { returns(Float) } + attr_accessor :embedding_weight + + # The weight of the text in the reciprocal ranking fusion. + sig { returns(Float) } + attr_accessor :text_weight + + # Weights that control how reciprocal rank fusion balances semantic embedding + # matches versus sparse keyword matches when hybrid search is enabled. + sig do + params(embedding_weight: Float, text_weight: Float).returns( + T.attached_class + ) + end + def self.new( + # The weight of the embedding in the reciprocal ranking fusion. + embedding_weight:, + # The weight of the text in the reciprocal ranking fusion. + text_weight: + ) + end + + sig do + override.returns({ embedding_weight: Float, text_weight: Float }) + end + def to_hash + end + end + # The ranker to use for the file search. module Ranker extend OpenAI::Internal::Type::Enum diff --git a/rbi/openai/models/responses/response_content.rbi b/rbi/openai/models/responses/response_content.rbi index 0f9d5bb1..1a119ccb 100644 --- a/rbi/openai/models/responses/response_content.rbi +++ b/rbi/openai/models/responses/response_content.rbi @@ -13,7 +13,6 @@ module OpenAI OpenAI::Responses::ResponseInputText, OpenAI::Responses::ResponseInputImage, OpenAI::Responses::ResponseInputFile, - OpenAI::Responses::ResponseInputAudio, OpenAI::Responses::ResponseOutputText, OpenAI::Responses::ResponseOutputRefusal, OpenAI::Responses::ResponseContent::ReasoningTextContent diff --git a/rbi/openai/models/responses/response_input_content.rbi b/rbi/openai/models/responses/response_input_content.rbi index 07754cc8..f18545cc 100644 --- a/rbi/openai/models/responses/response_input_content.rbi +++ b/rbi/openai/models/responses/response_input_content.rbi @@ -12,8 +12,7 @@ module OpenAI T.any( OpenAI::Responses::ResponseInputText, OpenAI::Responses::ResponseInputImage, - OpenAI::Responses::ResponseInputFile, - OpenAI::Responses::ResponseInputAudio + OpenAI::Responses::ResponseInputFile ) end diff --git a/rbi/openai/models/responses/response_input_item.rbi b/rbi/openai/models/responses/response_input_item.rbi index 359bc51c..a8650b01 100644 --- a/rbi/openai/models/responses/response_input_item.rbi +++ b/rbi/openai/models/responses/response_input_item.rbi @@ -55,8 +55,7 @@ module OpenAI T.any( OpenAI::Responses::ResponseInputText, OpenAI::Responses::ResponseInputImage, - OpenAI::Responses::ResponseInputFile, - OpenAI::Responses::ResponseInputAudio + OpenAI::Responses::ResponseInputFile ) ] ) @@ -118,8 +117,7 @@ module OpenAI T.any( OpenAI::Responses::ResponseInputText::OrHash, OpenAI::Responses::ResponseInputImage::OrHash, - OpenAI::Responses::ResponseInputFile::OrHash, - OpenAI::Responses::ResponseInputAudio::OrHash + OpenAI::Responses::ResponseInputFile::OrHash ) ], role: @@ -152,8 +150,7 @@ module OpenAI T.any( OpenAI::Responses::ResponseInputText, OpenAI::Responses::ResponseInputImage, - OpenAI::Responses::ResponseInputFile, - OpenAI::Responses::ResponseInputAudio + OpenAI::Responses::ResponseInputFile ) ], role: diff --git a/rbi/openai/models/responses/response_input_message_item.rbi b/rbi/openai/models/responses/response_input_message_item.rbi index fa778c0c..f36d17e6 100644 --- a/rbi/openai/models/responses/response_input_message_item.rbi +++ b/rbi/openai/models/responses/response_input_message_item.rbi @@ -75,8 +75,7 @@ module OpenAI T.any( OpenAI::Responses::ResponseInputText::OrHash, OpenAI::Responses::ResponseInputImage::OrHash, - OpenAI::Responses::ResponseInputFile::OrHash, - OpenAI::Responses::ResponseInputAudio::OrHash + OpenAI::Responses::ResponseInputFile::OrHash ) ], role: OpenAI::Responses::ResponseInputMessageItem::Role::OrSymbol, diff --git a/rbi/openai/models/responses/response_output_text.rbi b/rbi/openai/models/responses/response_output_text.rbi index 7b73351a..9369e30c 100644 --- a/rbi/openai/models/responses/response_output_text.rbi +++ b/rbi/openai/models/responses/response_output_text.rbi @@ -27,6 +27,11 @@ module OpenAI end attr_accessor :annotations + sig do + returns(T::Array[OpenAI::Responses::ResponseOutputText::Logprob]) + end + attr_accessor :logprobs + # The text output from the model. sig { returns(String) } attr_accessor :text @@ -39,21 +44,6 @@ module OpenAI sig { returns(Symbol) } attr_accessor :type - sig do - returns( - T.nilable(T::Array[OpenAI::Responses::ResponseOutputText::Logprob]) - ) - end - attr_reader :logprobs - - sig do - params( - logprobs: - T::Array[OpenAI::Responses::ResponseOutputText::Logprob::OrHash] - ).void - end - attr_writer :logprobs - # A text output from the model. sig do params( @@ -66,18 +56,18 @@ module OpenAI OpenAI::Responses::ResponseOutputText::Annotation::FilePath::OrHash ) ], - text: String, logprobs: T::Array[OpenAI::Responses::ResponseOutputText::Logprob::OrHash], + text: String, type: Symbol ).returns(T.attached_class) end def self.new( # The annotations of the text output. annotations:, + logprobs:, # The text output from the model. text:, - logprobs: nil, # The type of the output text. Always `output_text`. type: :output_text ) @@ -95,9 +85,10 @@ module OpenAI OpenAI::Responses::ResponseOutputText::Annotation::FilePath ) ], + logprobs: + T::Array[OpenAI::Responses::ResponseOutputText::Logprob], text: String, - type: Symbol, - logprobs: T::Array[OpenAI::Responses::ResponseOutputText::Logprob] + type: Symbol } ) end diff --git a/rbi/openai/models/responses/tool.rbi b/rbi/openai/models/responses/tool.rbi index 74fa7744..2de15c79 100644 --- a/rbi/openai/models/responses/tool.rbi +++ b/rbi/openai/models/responses/tool.rbi @@ -713,26 +713,94 @@ module OpenAI sig { params(file_ids: T::Array[String]).void } attr_writer :file_ids + sig do + returns( + T.nilable( + OpenAI::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto::MemoryLimit::OrSymbol + ) + ) + end + attr_accessor :memory_limit + # Configuration for a code interpreter container. Optionally specify the IDs of # the files to run the code on. sig do - params(file_ids: T::Array[String], type: Symbol).returns( - T.attached_class - ) + params( + file_ids: T::Array[String], + memory_limit: + T.nilable( + OpenAI::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto::MemoryLimit::OrSymbol + ), + type: Symbol + ).returns(T.attached_class) end def self.new( # An optional list of uploaded files to make available to your code. file_ids: nil, + memory_limit: nil, # Always `auto`. type: :auto ) end sig do - override.returns({ type: Symbol, file_ids: T::Array[String] }) + override.returns( + { + type: Symbol, + file_ids: T::Array[String], + memory_limit: + T.nilable( + OpenAI::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto::MemoryLimit::OrSymbol + ) + } + ) end def to_hash end + + module MemoryLimit + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto::MemoryLimit + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + MEMORY_LIMIT_1G = + T.let( + :"1g", + OpenAI::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto::MemoryLimit::TaggedSymbol + ) + MEMORY_LIMIT_4G = + T.let( + :"4g", + OpenAI::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto::MemoryLimit::TaggedSymbol + ) + MEMORY_LIMIT_16G = + T.let( + :"16g", + OpenAI::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto::MemoryLimit::TaggedSymbol + ) + MEMORY_LIMIT_64G = + T.let( + :"64g", + OpenAI::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto::MemoryLimit::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto::MemoryLimit::TaggedSymbol + ] + ) + end + def self.values + end + end end sig do @@ -1335,6 +1403,7 @@ module OpenAI sig { returns(Symbol) } attr_accessor :type + # A tool that allows the model to execute shell commands in a local environment. sig { params(type: Symbol).returns(T.attached_class) } def self.new( # The type of the local shell tool. Always `local_shell`. diff --git a/rbi/openai/models/vector_stores/file_batch_create_params.rbi b/rbi/openai/models/vector_stores/file_batch_create_params.rbi index c4e42f6b..67238df6 100644 --- a/rbi/openai/models/vector_stores/file_batch_create_params.rbi +++ b/rbi/openai/models/vector_stores/file_batch_create_params.rbi @@ -15,12 +15,6 @@ module OpenAI ) end - # A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that - # the vector store should use. Useful for tools like `file_search` that can access - # files. - sig { returns(T::Array[String]) } - attr_accessor :file_ids - # Set of 16 key-value pairs that can be attached to an object. This can be useful # for storing additional information about the object in a structured format, and # querying for objects via API or the dashboard. Keys are strings with a maximum @@ -63,9 +57,41 @@ module OpenAI end attr_writer :chunking_strategy + # A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + # the vector store should use. Useful for tools like `file_search` that can access + # files. If `attributes` or `chunking_strategy` are provided, they will be applied + # to all files in the batch. Mutually exclusive with `files`. + sig { returns(T.nilable(T::Array[String])) } + attr_reader :file_ids + + sig { params(file_ids: T::Array[String]).void } + attr_writer :file_ids + + # A list of objects that each include a `file_id` plus optional `attributes` or + # `chunking_strategy`. Use this when you need to override metadata for specific + # files. The global `attributes` or `chunking_strategy` will be ignored and must + # be specified for each file. Mutually exclusive with `file_ids`. + sig do + returns( + T.nilable( + T::Array[OpenAI::VectorStores::FileBatchCreateParams::File] + ) + ) + end + attr_reader :files + + sig do + params( + files: + T::Array[ + OpenAI::VectorStores::FileBatchCreateParams::File::OrHash + ] + ).void + end + attr_writer :files + sig do params( - file_ids: T::Array[String], attributes: T.nilable( T::Hash[ @@ -78,14 +104,15 @@ module OpenAI OpenAI::AutoFileChunkingStrategyParam::OrHash, OpenAI::StaticFileChunkingStrategyObjectParam::OrHash ), + file_ids: T::Array[String], + files: + T::Array[ + OpenAI::VectorStores::FileBatchCreateParams::File::OrHash + ], request_options: OpenAI::RequestOptions::OrHash ).returns(T.attached_class) end def self.new( - # A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that - # the vector store should use. Useful for tools like `file_search` that can access - # files. - file_ids:, # Set of 16 key-value pairs that can be attached to an object. This can be useful # for storing additional information about the object in a structured format, and # querying for objects via API or the dashboard. Keys are strings with a maximum @@ -95,6 +122,16 @@ module OpenAI # The chunking strategy used to chunk the file(s). If not set, will use the `auto` # strategy. Only applicable if `file_ids` is non-empty. chunking_strategy: nil, + # A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + # the vector store should use. Useful for tools like `file_search` that can access + # files. If `attributes` or `chunking_strategy` are provided, they will be applied + # to all files in the batch. Mutually exclusive with `files`. + file_ids: nil, + # A list of objects that each include a `file_id` plus optional `attributes` or + # `chunking_strategy`. Use this when you need to override metadata for specific + # files. The global `attributes` or `chunking_strategy` will be ignored and must + # be specified for each file. Mutually exclusive with `file_ids`. + files: nil, request_options: {} ) end @@ -102,7 +139,6 @@ module OpenAI sig do override.returns( { - file_ids: T::Array[String], attributes: T.nilable( T::Hash[ @@ -115,6 +151,9 @@ module OpenAI OpenAI::AutoFileChunkingStrategyParam, OpenAI::StaticFileChunkingStrategyObjectParam ), + file_ids: T::Array[String], + files: + T::Array[OpenAI::VectorStores::FileBatchCreateParams::File], request_options: OpenAI::RequestOptions } ) @@ -137,6 +176,136 @@ module OpenAI def self.variants end end + + class File < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::VectorStores::FileBatchCreateParams::File, + OpenAI::Internal::AnyHash + ) + end + + # A [File](https://platform.openai.com/docs/api-reference/files) ID that the + # vector store should use. Useful for tools like `file_search` that can access + # files. + sig { returns(String) } + attr_accessor :file_id + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. + sig do + returns( + T.nilable( + T::Hash[ + Symbol, + OpenAI::VectorStores::FileBatchCreateParams::File::Attribute::Variants + ] + ) + ) + end + attr_accessor :attributes + + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. Only applicable if `file_ids` is non-empty. + sig do + returns( + T.nilable( + T.any( + OpenAI::AutoFileChunkingStrategyParam, + OpenAI::StaticFileChunkingStrategyObjectParam + ) + ) + ) + end + attr_reader :chunking_strategy + + sig do + params( + chunking_strategy: + T.any( + OpenAI::AutoFileChunkingStrategyParam::OrHash, + OpenAI::StaticFileChunkingStrategyObjectParam::OrHash + ) + ).void + end + attr_writer :chunking_strategy + + sig do + params( + file_id: String, + attributes: + T.nilable( + T::Hash[ + Symbol, + OpenAI::VectorStores::FileBatchCreateParams::File::Attribute::Variants + ] + ), + chunking_strategy: + T.any( + OpenAI::AutoFileChunkingStrategyParam::OrHash, + OpenAI::StaticFileChunkingStrategyObjectParam::OrHash + ) + ).returns(T.attached_class) + end + def self.new( + # A [File](https://platform.openai.com/docs/api-reference/files) ID that the + # vector store should use. Useful for tools like `file_search` that can access + # files. + file_id:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. + attributes: nil, + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. Only applicable if `file_ids` is non-empty. + chunking_strategy: nil + ) + end + + sig do + override.returns( + { + file_id: String, + attributes: + T.nilable( + T::Hash[ + Symbol, + OpenAI::VectorStores::FileBatchCreateParams::File::Attribute::Variants + ] + ), + chunking_strategy: + T.any( + OpenAI::AutoFileChunkingStrategyParam, + OpenAI::StaticFileChunkingStrategyObjectParam + ) + } + ) + end + def to_hash + end + + module Attribute + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias { T.any(String, Float, T::Boolean) } + + sig do + override.returns( + T::Array[ + OpenAI::VectorStores::FileBatchCreateParams::File::Attribute::Variants + ] + ) + end + def self.variants + end + end + end end end end diff --git a/rbi/openai/models/video.rbi b/rbi/openai/models/video.rbi index 096195d6..67b73fa7 100644 --- a/rbi/openai/models/video.rbi +++ b/rbi/openai/models/video.rbi @@ -40,6 +40,10 @@ module OpenAI sig { returns(Integer) } attr_accessor :progress + # The prompt that was used to generate the video. + sig { returns(T.nilable(String)) } + attr_accessor :prompt + # Identifier of the source video if this video is a remix. sig { returns(T.nilable(String)) } attr_accessor :remixed_from_video_id @@ -66,6 +70,7 @@ module OpenAI expires_at: T.nilable(Integer), model: OpenAI::VideoModel::OrSymbol, progress: Integer, + prompt: T.nilable(String), remixed_from_video_id: T.nilable(String), seconds: OpenAI::VideoSeconds::OrSymbol, size: OpenAI::VideoSize::OrSymbol, @@ -88,6 +93,8 @@ module OpenAI model:, # Approximate completion percentage for the generation task. progress:, + # The prompt that was used to generate the video. + prompt:, # Identifier of the source video if this video is a remix. remixed_from_video_id:, # Duration of the generated clip in seconds. @@ -112,6 +119,7 @@ module OpenAI model: OpenAI::VideoModel::TaggedSymbol, object: Symbol, progress: Integer, + prompt: T.nilable(String), remixed_from_video_id: T.nilable(String), seconds: OpenAI::VideoSeconds::TaggedSymbol, size: OpenAI::VideoSize::TaggedSymbol, diff --git a/rbi/openai/resources/files.rbi b/rbi/openai/resources/files.rbi index dd7f80af..258c782a 100644 --- a/rbi/openai/resources/files.rbi +++ b/rbi/openai/resources/files.rbi @@ -7,20 +7,19 @@ module OpenAI # up to 512 MB, and the size of all files uploaded by one organization can be up # to 1 TB. # - # The Assistants API supports files up to 2 million tokens and of specific file - # types. See the - # [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for - # details. - # - # The Fine-tuning API only supports `.jsonl` files. The input also has certain - # required formats for fine-tuning - # [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or - # [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) - # models. - # - # The Batch API only supports `.jsonl` files up to 200 MB in size. The input also - # has a specific required - # [format](https://platform.openai.com/docs/api-reference/batch/request-input). + # - The Assistants API supports files up to 2 million tokens and of specific file + # types. See the + # [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) + # for details. + # - The Fine-tuning API only supports `.jsonl` files. The input also has certain + # required formats for fine-tuning + # [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) + # or + # [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + # models. + # - The Batch API only supports `.jsonl` files up to 200 MB in size. The input + # also has a specific required + # [format](https://platform.openai.com/docs/api-reference/batch/request-input). # # Please [contact us](https://help.openai.com/) if you need to increase these # storage limits. diff --git a/rbi/openai/resources/realtime/calls.rbi b/rbi/openai/resources/realtime/calls.rbi index f1df77f2..4b6876ab 100644 --- a/rbi/openai/resources/realtime/calls.rbi +++ b/rbi/openai/resources/realtime/calls.rbi @@ -57,7 +57,7 @@ module OpenAI end def accept( # The identifier for the call provided in the - # [`realtime.call.incoming`](https://platform.openai.com/docs/api-reference/webhook_events/realtime/call/incoming) + # [`realtime.call.incoming`](https://platform.openai.com/docs/api-reference/webhook-events/realtime/call/incoming) # webhook. call_id, # Configuration for input and output audio. @@ -105,8 +105,19 @@ module OpenAI # `auto` will create a trace for the session with default values for the workflow # name, group id, and metadata. tracing: nil, - # Controls how the realtime conversation is truncated prior to model inference. - # The default is `auto`. + # When the number of tokens in a conversation exceeds the model's input token + # limit, the conversation be truncated, meaning messages (starting from the + # oldest) will not be included in the model's context. A 32k context model with + # 4,096 max output tokens can only include 28,224 tokens in the context before + # truncation occurs. Clients can configure truncation behavior to truncate with a + # lower max token limit, which is an effective way to control token usage and + # cost. Truncation will reduce the number of cached tokens on the next turn + # (busting the cache), since messages are dropped from the beginning of the + # context. However, clients can also configure truncation to retain messages up to + # a fraction of the maximum context size, which will reduce the need for future + # truncations and thus improve the cache rate. Truncation can be disabled + # entirely, which means the server will never truncate but would instead return an + # error if the conversation exceeds the model's input token limit. truncation: nil, # The type of session to create. Always `realtime` for the Realtime API. type: :realtime, @@ -123,7 +134,7 @@ module OpenAI end def hangup( # The identifier for the call. For SIP calls, use the value provided in the - # [`realtime.call.incoming`](https://platform.openai.com/docs/api-reference/webhook_events/realtime/call/incoming) + # [`realtime.call.incoming`](https://platform.openai.com/docs/api-reference/webhook-events/realtime/call/incoming) # webhook. For WebRTC sessions, reuse the call ID returned in the `Location` # header when creating the call with # [`POST /v1/realtime/calls`](https://platform.openai.com/docs/api-reference/realtime/create-call). @@ -142,7 +153,7 @@ module OpenAI end def refer( # The identifier for the call provided in the - # [`realtime.call.incoming`](https://platform.openai.com/docs/api-reference/webhook_events/realtime/call/incoming) + # [`realtime.call.incoming`](https://platform.openai.com/docs/api-reference/webhook-events/realtime/call/incoming) # webhook. call_id, # URI that should appear in the SIP Refer-To header. Supports values like @@ -162,7 +173,7 @@ module OpenAI end def reject( # The identifier for the call provided in the - # [`realtime.call.incoming`](https://platform.openai.com/docs/api-reference/webhook_events/realtime/call/incoming) + # [`realtime.call.incoming`](https://platform.openai.com/docs/api-reference/webhook-events/realtime/call/incoming) # webhook. call_id, # SIP response code to send back to the caller. Defaults to `603` (Decline) when diff --git a/rbi/openai/resources/vector_stores/file_batches.rbi b/rbi/openai/resources/vector_stores/file_batches.rbi index c6aca892..6455b94a 100644 --- a/rbi/openai/resources/vector_stores/file_batches.rbi +++ b/rbi/openai/resources/vector_stores/file_batches.rbi @@ -8,7 +8,6 @@ module OpenAI sig do params( vector_store_id: String, - file_ids: T::Array[String], attributes: T.nilable( T::Hash[ @@ -21,16 +20,17 @@ module OpenAI OpenAI::AutoFileChunkingStrategyParam::OrHash, OpenAI::StaticFileChunkingStrategyObjectParam::OrHash ), + file_ids: T::Array[String], + files: + T::Array[ + OpenAI::VectorStores::FileBatchCreateParams::File::OrHash + ], request_options: OpenAI::RequestOptions::OrHash ).returns(OpenAI::VectorStores::VectorStoreFileBatch) end def create( # The ID of the vector store for which to create a File Batch. vector_store_id, - # A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that - # the vector store should use. Useful for tools like `file_search` that can access - # files. - file_ids:, # Set of 16 key-value pairs that can be attached to an object. This can be useful # for storing additional information about the object in a structured format, and # querying for objects via API or the dashboard. Keys are strings with a maximum @@ -40,6 +40,16 @@ module OpenAI # The chunking strategy used to chunk the file(s). If not set, will use the `auto` # strategy. Only applicable if `file_ids` is non-empty. chunking_strategy: nil, + # A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + # the vector store should use. Useful for tools like `file_search` that can access + # files. If `attributes` or `chunking_strategy` are provided, they will be applied + # to all files in the batch. Mutually exclusive with `files`. + file_ids: nil, + # A list of objects that each include a `file_id` plus optional `attributes` or + # `chunking_strategy`. Use this when you need to override metadata for specific + # files. The global `attributes` or `chunking_strategy` will be ignored and must + # be specified for each file. Mutually exclusive with `file_ids`. + files: nil, request_options: {} ) end diff --git a/sig/openai/internal/transport/base_client.rbs b/sig/openai/internal/transport/base_client.rbs index db2e8ff4..c58554c4 100644 --- a/sig/openai/internal/transport/base_client.rbs +++ b/sig/openai/internal/transport/base_client.rbs @@ -87,6 +87,8 @@ module OpenAI private def auth_headers: -> ::Hash[String, String] + private def user_agent: -> String + private def generate_idempotency_key: -> String private def build_request: ( diff --git a/sig/openai/models/realtime/realtime_truncation_retention_ratio.rbs b/sig/openai/models/realtime/realtime_truncation_retention_ratio.rbs index 15d9917c..d37a54e3 100644 --- a/sig/openai/models/realtime/realtime_truncation_retention_ratio.rbs +++ b/sig/openai/models/realtime/realtime_truncation_retention_ratio.rbs @@ -2,19 +2,46 @@ module OpenAI module Models module Realtime type realtime_truncation_retention_ratio = - { retention_ratio: Float, type: :retention_ratio } + { + retention_ratio: Float, + type: :retention_ratio, + token_limits: OpenAI::Realtime::RealtimeTruncationRetentionRatio::TokenLimits + } class RealtimeTruncationRetentionRatio < OpenAI::Internal::Type::BaseModel attr_accessor retention_ratio: Float attr_accessor type: :retention_ratio + attr_reader token_limits: OpenAI::Realtime::RealtimeTruncationRetentionRatio::TokenLimits? + + def token_limits=: ( + OpenAI::Realtime::RealtimeTruncationRetentionRatio::TokenLimits + ) -> OpenAI::Realtime::RealtimeTruncationRetentionRatio::TokenLimits + def initialize: ( retention_ratio: Float, + ?token_limits: OpenAI::Realtime::RealtimeTruncationRetentionRatio::TokenLimits, ?type: :retention_ratio ) -> void - def to_hash: -> { retention_ratio: Float, type: :retention_ratio } + def to_hash: -> { + retention_ratio: Float, + type: :retention_ratio, + token_limits: OpenAI::Realtime::RealtimeTruncationRetentionRatio::TokenLimits + } + + type token_limits = { post_instructions: Integer } + + class TokenLimits < OpenAI::Internal::Type::BaseModel + attr_reader post_instructions: Integer? + + def post_instructions=: (Integer) -> Integer + + def initialize: (?post_instructions: Integer) -> void + + def to_hash: -> { post_instructions: Integer } + end end end end diff --git a/sig/openai/models/responses/file_search_tool.rbs b/sig/openai/models/responses/file_search_tool.rbs index cc1a7d01..486a0581 100644 --- a/sig/openai/models/responses/file_search_tool.rbs +++ b/sig/openai/models/responses/file_search_tool.rbs @@ -53,11 +53,18 @@ module OpenAI type ranking_options = { + hybrid_search: OpenAI::Responses::FileSearchTool::RankingOptions::HybridSearch, ranker: OpenAI::Models::Responses::FileSearchTool::RankingOptions::ranker, score_threshold: Float } class RankingOptions < OpenAI::Internal::Type::BaseModel + attr_reader hybrid_search: OpenAI::Responses::FileSearchTool::RankingOptions::HybridSearch? + + def hybrid_search=: ( + OpenAI::Responses::FileSearchTool::RankingOptions::HybridSearch + ) -> OpenAI::Responses::FileSearchTool::RankingOptions::HybridSearch + attr_reader ranker: OpenAI::Models::Responses::FileSearchTool::RankingOptions::ranker? def ranker=: ( @@ -69,15 +76,32 @@ module OpenAI def score_threshold=: (Float) -> Float def initialize: ( + ?hybrid_search: OpenAI::Responses::FileSearchTool::RankingOptions::HybridSearch, ?ranker: OpenAI::Models::Responses::FileSearchTool::RankingOptions::ranker, ?score_threshold: Float ) -> void def to_hash: -> { + hybrid_search: OpenAI::Responses::FileSearchTool::RankingOptions::HybridSearch, ranker: OpenAI::Models::Responses::FileSearchTool::RankingOptions::ranker, score_threshold: Float } + type hybrid_search = { embedding_weight: Float, text_weight: Float } + + class HybridSearch < OpenAI::Internal::Type::BaseModel + attr_accessor embedding_weight: Float + + attr_accessor text_weight: Float + + def initialize: ( + embedding_weight: Float, + text_weight: Float + ) -> void + + def to_hash: -> { embedding_weight: Float, text_weight: Float } + end + type ranker = :auto | :"default-2024-11-15" module Ranker diff --git a/sig/openai/models/responses/response_content.rbs b/sig/openai/models/responses/response_content.rbs index 4df9f00a..08ef3dc5 100644 --- a/sig/openai/models/responses/response_content.rbs +++ b/sig/openai/models/responses/response_content.rbs @@ -5,7 +5,6 @@ module OpenAI OpenAI::Responses::ResponseInputText | OpenAI::Responses::ResponseInputImage | OpenAI::Responses::ResponseInputFile - | OpenAI::Responses::ResponseInputAudio | OpenAI::Responses::ResponseOutputText | OpenAI::Responses::ResponseOutputRefusal | OpenAI::Responses::ResponseContent::ReasoningTextContent diff --git a/sig/openai/models/responses/response_input_content.rbs b/sig/openai/models/responses/response_input_content.rbs index 1ec4ec96..81fa4a14 100644 --- a/sig/openai/models/responses/response_input_content.rbs +++ b/sig/openai/models/responses/response_input_content.rbs @@ -5,7 +5,6 @@ module OpenAI OpenAI::Responses::ResponseInputText | OpenAI::Responses::ResponseInputImage | OpenAI::Responses::ResponseInputFile - | OpenAI::Responses::ResponseInputAudio module ResponseInputContent extend OpenAI::Internal::Type::Union diff --git a/sig/openai/models/responses/response_output_text.rbs b/sig/openai/models/responses/response_output_text.rbs index c1ad5888..d0e9a420 100644 --- a/sig/openai/models/responses/response_output_text.rbs +++ b/sig/openai/models/responses/response_output_text.rbs @@ -4,36 +4,32 @@ module OpenAI type response_output_text = { annotations: ::Array[OpenAI::Models::Responses::ResponseOutputText::annotation], + logprobs: ::Array[OpenAI::Responses::ResponseOutputText::Logprob], text: String, - type: :output_text, - logprobs: ::Array[OpenAI::Responses::ResponseOutputText::Logprob] + type: :output_text } class ResponseOutputText < OpenAI::Internal::Type::BaseModel attr_accessor annotations: ::Array[OpenAI::Models::Responses::ResponseOutputText::annotation] + attr_accessor logprobs: ::Array[OpenAI::Responses::ResponseOutputText::Logprob] + attr_accessor text: String attr_accessor type: :output_text - attr_reader logprobs: ::Array[OpenAI::Responses::ResponseOutputText::Logprob]? - - def logprobs=: ( - ::Array[OpenAI::Responses::ResponseOutputText::Logprob] - ) -> ::Array[OpenAI::Responses::ResponseOutputText::Logprob] - def initialize: ( annotations: ::Array[OpenAI::Models::Responses::ResponseOutputText::annotation], + logprobs: ::Array[OpenAI::Responses::ResponseOutputText::Logprob], text: String, - ?logprobs: ::Array[OpenAI::Responses::ResponseOutputText::Logprob], ?type: :output_text ) -> void def to_hash: -> { annotations: ::Array[OpenAI::Models::Responses::ResponseOutputText::annotation], + logprobs: ::Array[OpenAI::Responses::ResponseOutputText::Logprob], text: String, - type: :output_text, - logprobs: ::Array[OpenAI::Responses::ResponseOutputText::Logprob] + type: :output_text } type annotation = diff --git a/sig/openai/models/responses/tool.rbs b/sig/openai/models/responses/tool.rbs index 27f20f94..90caad98 100644 --- a/sig/openai/models/responses/tool.rbs +++ b/sig/openai/models/responses/tool.rbs @@ -258,7 +258,11 @@ module OpenAI extend OpenAI::Internal::Type::Union type code_interpreter_tool_auto = - { type: :auto, file_ids: ::Array[String] } + { + type: :auto, + file_ids: ::Array[String], + memory_limit: OpenAI::Models::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto::memory_limit? + } class CodeInterpreterToolAuto < OpenAI::Internal::Type::BaseModel attr_accessor type: :auto @@ -267,9 +271,32 @@ module OpenAI def file_ids=: (::Array[String]) -> ::Array[String] - def initialize: (?file_ids: ::Array[String], ?type: :auto) -> void + attr_accessor memory_limit: OpenAI::Models::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto::memory_limit? + + def initialize: ( + ?file_ids: ::Array[String], + ?memory_limit: OpenAI::Models::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto::memory_limit?, + ?type: :auto + ) -> void + + def to_hash: -> { + type: :auto, + file_ids: ::Array[String], + memory_limit: OpenAI::Models::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto::memory_limit? + } + + type memory_limit = :"1g" | :"4g" | :"16g" | :"64g" - def to_hash: -> { type: :auto, file_ids: ::Array[String] } + module MemoryLimit + extend OpenAI::Internal::Type::Enum + + MEMORY_LIMIT_1G: :"1g" + MEMORY_LIMIT_4G: :"4g" + MEMORY_LIMIT_16G: :"16g" + MEMORY_LIMIT_64G: :"64g" + + def self?.values: -> ::Array[OpenAI::Models::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto::memory_limit] + end end def self?.variants: -> ::Array[OpenAI::Models::Responses::Tool::CodeInterpreter::container] diff --git a/sig/openai/models/vector_stores/file_batch_create_params.rbs b/sig/openai/models/vector_stores/file_batch_create_params.rbs index b9eca2b5..a9044a7f 100644 --- a/sig/openai/models/vector_stores/file_batch_create_params.rbs +++ b/sig/openai/models/vector_stores/file_batch_create_params.rbs @@ -3,9 +3,10 @@ module OpenAI module VectorStores type file_batch_create_params = { - file_ids: ::Array[String], attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileBatchCreateParams::attribute]?, - chunking_strategy: OpenAI::Models::file_chunking_strategy_param + chunking_strategy: OpenAI::Models::file_chunking_strategy_param, + file_ids: ::Array[String], + files: ::Array[OpenAI::VectorStores::FileBatchCreateParams::File] } & OpenAI::Internal::Type::request_parameters @@ -13,8 +14,6 @@ module OpenAI extend OpenAI::Internal::Type::RequestParameters::Converter include OpenAI::Internal::Type::RequestParameters - attr_accessor file_ids: ::Array[String] - attr_accessor attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileBatchCreateParams::attribute]? attr_reader chunking_strategy: OpenAI::Models::file_chunking_strategy_param? @@ -23,17 +22,29 @@ module OpenAI OpenAI::Models::file_chunking_strategy_param ) -> OpenAI::Models::file_chunking_strategy_param + attr_reader file_ids: ::Array[String]? + + def file_ids=: (::Array[String]) -> ::Array[String] + + attr_reader files: ::Array[OpenAI::VectorStores::FileBatchCreateParams::File]? + + def files=: ( + ::Array[OpenAI::VectorStores::FileBatchCreateParams::File] + ) -> ::Array[OpenAI::VectorStores::FileBatchCreateParams::File] + def initialize: ( - file_ids: ::Array[String], ?attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileBatchCreateParams::attribute]?, ?chunking_strategy: OpenAI::Models::file_chunking_strategy_param, + ?file_ids: ::Array[String], + ?files: ::Array[OpenAI::VectorStores::FileBatchCreateParams::File], ?request_options: OpenAI::request_opts ) -> void def to_hash: -> { - file_ids: ::Array[String], attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileBatchCreateParams::attribute]?, chunking_strategy: OpenAI::Models::file_chunking_strategy_param, + file_ids: ::Array[String], + files: ::Array[OpenAI::VectorStores::FileBatchCreateParams::File], request_options: OpenAI::RequestOptions } @@ -44,6 +55,45 @@ module OpenAI def self?.variants: -> ::Array[OpenAI::Models::VectorStores::FileBatchCreateParams::attribute] end + + type file = + { + file_id: String, + attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileBatchCreateParams::File::attribute]?, + chunking_strategy: OpenAI::Models::file_chunking_strategy_param + } + + class File < OpenAI::Internal::Type::BaseModel + attr_accessor file_id: String + + attr_accessor attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileBatchCreateParams::File::attribute]? + + attr_reader chunking_strategy: OpenAI::Models::file_chunking_strategy_param? + + def chunking_strategy=: ( + OpenAI::Models::file_chunking_strategy_param + ) -> OpenAI::Models::file_chunking_strategy_param + + def initialize: ( + file_id: String, + ?attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileBatchCreateParams::File::attribute]?, + ?chunking_strategy: OpenAI::Models::file_chunking_strategy_param + ) -> void + + def to_hash: -> { + file_id: String, + attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileBatchCreateParams::File::attribute]?, + chunking_strategy: OpenAI::Models::file_chunking_strategy_param + } + + type attribute = String | Float | bool + + module Attribute + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::VectorStores::FileBatchCreateParams::File::attribute] + end + end end end end diff --git a/sig/openai/models/video.rbs b/sig/openai/models/video.rbs index 91e3be95..e3b388eb 100644 --- a/sig/openai/models/video.rbs +++ b/sig/openai/models/video.rbs @@ -10,6 +10,7 @@ module OpenAI model: OpenAI::Models::video_model, object: :video, progress: Integer, + prompt: String?, remixed_from_video_id: String?, seconds: OpenAI::Models::video_seconds, size: OpenAI::Models::video_size, @@ -33,6 +34,8 @@ module OpenAI attr_accessor progress: Integer + attr_accessor prompt: String? + attr_accessor remixed_from_video_id: String? attr_accessor seconds: OpenAI::Models::video_seconds @@ -49,6 +52,7 @@ module OpenAI expires_at: Integer?, model: OpenAI::Models::video_model, progress: Integer, + prompt: String?, remixed_from_video_id: String?, seconds: OpenAI::Models::video_seconds, size: OpenAI::Models::video_size, @@ -65,6 +69,7 @@ module OpenAI model: OpenAI::Models::video_model, object: :video, progress: Integer, + prompt: String?, remixed_from_video_id: String?, seconds: OpenAI::Models::video_seconds, size: OpenAI::Models::video_size, diff --git a/sig/openai/resources/vector_stores/file_batches.rbs b/sig/openai/resources/vector_stores/file_batches.rbs index 1228381c..0cb21016 100644 --- a/sig/openai/resources/vector_stores/file_batches.rbs +++ b/sig/openai/resources/vector_stores/file_batches.rbs @@ -4,9 +4,10 @@ module OpenAI class FileBatches def create: ( String vector_store_id, - file_ids: ::Array[String], ?attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileBatchCreateParams::attribute]?, ?chunking_strategy: OpenAI::Models::file_chunking_strategy_param, + ?file_ids: ::Array[String], + ?files: ::Array[OpenAI::VectorStores::FileBatchCreateParams::File], ?request_options: OpenAI::request_opts ) -> OpenAI::VectorStores::VectorStoreFileBatch diff --git a/test/openai/internal/util_test.rb b/test/openai/internal/util_test.rb index 60fdfe9b..94148d2a 100644 --- a/test/openai/internal/util_test.rb +++ b/test/openai/internal/util_test.rb @@ -343,6 +343,29 @@ def test_rewind_closing assert_equal(0, steps) end + def test_thread_interrupts + once = 0 + que = Queue.new + enum = Enumerator.new do |y| + 10.times { y << _1 } + ensure + once = once.succ + end + + fused_1 = OpenAI::Internal::Util.fused_enum(enum, external: true) { loop { enum.next } } + fused_2 = OpenAI::Internal::Util.chain_fused(fused_1) { fused_1.each(&_1) } + fused_3 = OpenAI::Internal::Util.chain_fused(fused_2) { fused_2.each(&_1) } + + th = ::Thread.new do + que << "🐶" + fused_3.each { sleep(10) } + end + + assert_equal("🐶", que.pop) + th.kill.join + assert_equal(1, once) + end + def test_closing arr = [1, 2, 3] once = 0 diff --git a/test/openai/resources/responses/streaming_test.rb b/test/openai/resources/responses/streaming_test.rb index fbf1ef51..67f1263d 100644 --- a/test/openai/resources/responses/streaming_test.rb +++ b/test/openai/resources/responses/streaming_test.rb @@ -430,6 +430,75 @@ def test_structured_output_parsed_in_final_response end end + class CalendarEvent < OpenAI::BaseModel + required :name, String + required :date, String + required :location, String + end + + class LookupCalendar < OpenAI::BaseModel + required :first_name, String + required :last_name, String + end + + def test_stream_with_both_text_and_tools + stub_request(:post, "http://localhost/responses") + .to_return( + status: 200, + headers: {"Content-Type" => "text/event-stream"}, + body: text_and_tools_sse_response + ) + + stream = @client.responses.stream( + model: "gpt-4o-2024-08-06", + input: [ + {role: :system, content: "Extract event info and look up attendees."}, + {role: :user, content: "Ada Lovelace is going to a conference on Friday at the Convention Center."} + ], + text: CalendarEvent, + tools: [LookupCalendar] + ) + + events = stream.to_a + + text_done = events.find { |e| e.type == :"response.output_text.done" } + assert_pattern do + text_done => OpenAI::Streaming::ResponseTextDoneEvent[ + parsed: CalendarEvent[ + name: "Conference", + date: "Friday", + location: "Convention Center" + ] + ] + end + + function_done = events.find { |e| e.type == :"response.function_call_arguments.done" } + assert_equal('{"first_name":"Ada","last_name":"Lovelace"}', function_done.arguments) + + final_response = stream.get_final_response + + text_output = final_response.output.find { |o| o.is_a?(OpenAI::Models::Responses::ResponseOutputMessage) } + text_content = text_output.content.find { |c| c[:type] == :output_text } + assert_pattern do + text_content[:parsed] => CalendarEvent[ + name: "Conference", + date: "Friday", + location: "Convention Center" + ] + end + + tool_call = final_response.output.find { |o| o.is_a?(OpenAI::Models::Responses::ResponseFunctionToolCall) } + assert_pattern do + tool_call => OpenAI::Models::Responses::ResponseFunctionToolCall[ + name: "LookupCalendar", + parsed: LookupCalendar[ + first_name: "Ada", + last_name: "Lovelace" + ] + ] + end + end + private def function_tool_params @@ -742,4 +811,51 @@ def error_sse_response SSE end + + def text_and_tools_sse_response + <<~SSE + event: response.created + data: {"type":"response.created","sequence_number":1,"response":{"id":"resp_stream_001","object":"realtime.response","status":"in_progress","output":[],"usage":null}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":2,"response_id":"resp_stream_001","output_index":0,"item":{"id":"msg_001","object":"realtime.item","type":"message","status":"in_progress","role":"assistant","content":[]}} + + event: response.content_part.added + data: {"type":"response.content_part.added","sequence_number":3,"response_id":"resp_stream_001","item_id":"msg_001","output_index":0,"content_index":0,"part":{"type":"output_text","text":""}} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":4,"response_id":"resp_stream_001","item_id":"msg_001","output_index":0,"content_index":0,"delta":"{\\"name\\":\\"Conference\\","} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":5,"response_id":"resp_stream_001","item_id":"msg_001","output_index":0,"content_index":0,"delta":"\\"date\\":\\"Friday\\",\\"location\\":\\"Convention Center\\"}"} + + event: response.output_text.done + data: {"type":"response.output_text.done","sequence_number":6,"response_id":"resp_stream_001","item_id":"msg_001","output_index":0,"content_index":0,"text":"{\\"name\\":\\"Conference\\",\\"date\\":\\"Friday\\",\\"location\\":\\"Convention Center\\"}"} + + event: response.content_part.done + data: {"type":"response.content_part.done","sequence_number":7,"response_id":"resp_stream_001","item_id":"msg_001","output_index":0,"content_index":0,"part":{"type":"output_text","text":"{\\"name\\":\\"Conference\\",\\"date\\":\\"Friday\\",\\"location\\":\\"Convention Center\\"}"}} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":8,"response_id":"resp_stream_001","item_id":"msg_001","output_index":0,"item":{"id":"msg_001","object":"realtime.item","type":"message","status":"completed","role":"assistant","content":[{"type":"output_text","text":"{\\"name\\":\\"Conference\\",\\"date\\":\\"Friday\\",\\"location\\":\\"Convention Center\\"}"}]}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":9,"response_id":"resp_stream_001","output_index":1,"item":{"id":"call_001","object":"realtime.item","type":"function_call","status":"in_progress","name":"LookupCalendar","arguments":"","call_id":"call_001"}} + + event: response.function_call_arguments.delta + data: {"type":"response.function_call_arguments.delta","sequence_number":10,"item_id":"call_001","output_index":1,"delta":"{\\"first_name\\":\\"Ada\\","} + + event: response.function_call_arguments.delta + data: {"type":"response.function_call_arguments.delta","sequence_number":11,"item_id":"call_001","output_index":1,"delta":"\\"last_name\\":\\"Lovelace\\"}"} + + event: response.function_call_arguments.done + data: {"type":"response.function_call_arguments.done","sequence_number":12,"item_id":"call_001","output_index":1,"arguments":"{\\"first_name\\":\\"Ada\\",\\"last_name\\":\\"Lovelace\\"}"} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":13,"response_id":"resp_stream_001","item_id":"call_001","output_index":1,"item":{"id":"call_001","object":"realtime.item","type":"function_call","status":"completed","name":"LookupCalendar","arguments":"{\\"first_name\\":\\"Ada\\",\\"last_name\\":\\"Lovelace\\"}","call_id":"call_001"}} + + event: response.completed + data: {"type":"response.completed","sequence_number":14,"response":{"id":"resp_stream_001","object":"realtime.response","status":"completed","output":[{"id":"msg_001","object":"realtime.item","type":"message","status":"completed","role":"assistant","content":[{"type":"output_text","text":"{\\"name\\":\\"Conference\\",\\"date\\":\\"Friday\\",\\"location\\":\\"Convention Center\\"}"}]},{"id":"call_001","object":"realtime.item","type":"function_call","status":"completed","name":"LookupCalendar","arguments":"{\\"first_name\\":\\"Ada\\",\\"last_name\\":\\"Lovelace\\"}","call_id":"call_001"}],"usage":{"total_tokens":50,"input_tokens":30,"output_tokens":20}}} + + SSE + end end diff --git a/test/openai/resources/vector_stores/file_batches_test.rb b/test/openai/resources/vector_stores/file_batches_test.rb index ccaeb85e..9a80f206 100644 --- a/test/openai/resources/vector_stores/file_batches_test.rb +++ b/test/openai/resources/vector_stores/file_batches_test.rb @@ -3,8 +3,8 @@ require_relative "../../test_helper" class OpenAI::Test::Resources::VectorStores::FileBatchesTest < OpenAI::Test::ResourceTest - def test_create_required_params - response = @openai.vector_stores.file_batches.create("vs_abc123", file_ids: ["string"]) + def test_create + response = @openai.vector_stores.file_batches.create("vs_abc123") assert_pattern do response => OpenAI::VectorStores::VectorStoreFileBatch diff --git a/test/openai/resources/videos_test.rb b/test/openai/resources/videos_test.rb index 4ea56365..12c46f18 100644 --- a/test/openai/resources/videos_test.rb +++ b/test/openai/resources/videos_test.rb @@ -20,6 +20,7 @@ def test_create_required_params model: OpenAI::VideoModel, object: Symbol, progress: Integer, + prompt: String | nil, remixed_from_video_id: String | nil, seconds: OpenAI::VideoSeconds, size: OpenAI::VideoSize, @@ -45,6 +46,7 @@ def test_retrieve model: OpenAI::VideoModel, object: Symbol, progress: Integer, + prompt: String | nil, remixed_from_video_id: String | nil, seconds: OpenAI::VideoSeconds, size: OpenAI::VideoSize, @@ -77,6 +79,7 @@ def test_list model: OpenAI::VideoModel, object: Symbol, progress: Integer, + prompt: String | nil, remixed_from_video_id: String | nil, seconds: OpenAI::VideoSeconds, size: OpenAI::VideoSize, @@ -128,6 +131,7 @@ def test_remix_required_params model: OpenAI::VideoModel, object: Symbol, progress: Integer, + prompt: String | nil, remixed_from_video_id: String | nil, seconds: OpenAI::VideoSeconds, size: OpenAI::VideoSize,