diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 0ada041e..157f0355 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.35.2" + ".": "0.36.0" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index d697cee1..38f260dd 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 135 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-eeba8addf3a5f412e5ce8d22031e60c61650cee3f5d9e587a2533f6818a249ea.yml -openapi_spec_hash: 0a4d8ad2469823ce24a3fd94f23f1c2b -config_hash: 0bb1941a78ece0b610a2fbba7d74a84c +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-ca24bc4d8125b5153514ce643c4e3220f25971b7d67ca384d56d493c72c0d977.yml +openapi_spec_hash: c6f048c7b3d29f4de48fde0e845ba33f +config_hash: b876221dfb213df9f0a999e75d38a65e diff --git a/CHANGELOG.md b/CHANGELOG.md index 9fb9343c..d4ca19dc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 0.36.0 (2025-11-13) + +Full Changelog: [v0.35.2...v0.36.0](https://github.com/openai/openai-ruby/compare/v0.35.2...v0.36.0) + +### Features + +* **api:** gpt 5.1 ([26ece0e](https://github.com/openai/openai-ruby/commit/26ece0eb68486e40066c89f626b9a83c4f274889)) + ## 0.35.2 (2025-11-05) Full Changelog: [v0.35.1...v0.35.2](https://github.com/openai/openai-ruby/compare/v0.35.1...v0.35.2) diff --git a/Gemfile.lock b/Gemfile.lock index a8a5beff..05d32a9c 100644 --- a/Gemfile.lock +++ b/Gemfile.lock @@ -11,7 +11,7 @@ GIT PATH remote: . specs: - openai (0.35.2) + openai (0.36.0) connection_pool GEM diff --git a/README.md b/README.md index 54246b60..bf2b6f36 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ To use this gem, install via Bundler by adding the following to your application ```ruby -gem "openai", "~> 0.35.2" +gem "openai", "~> 0.36.0" ``` @@ -30,7 +30,10 @@ openai = OpenAI::Client.new( api_key: ENV["OPENAI_API_KEY"] # This is the default and can be omitted ) -chat_completion = openai.chat.completions.create(messages: [{role: "user", content: "Say this is a test"}], model: :"gpt-5") +chat_completion = openai.chat.completions.create( + messages: [{role: "user", content: "Say this is a test"}], + model: :"gpt-5.1" +) puts(chat_completion) ``` @@ -42,7 +45,7 @@ We provide support for streaming responses using Server-Sent Events (SSE). ```ruby stream = openai.responses.stream( input: "Write a haiku about OpenAI.", - model: :"gpt-5" + model: :"gpt-5.1" ) stream.each do |event| @@ -340,7 +343,7 @@ openai = OpenAI::Client.new( # Or, configure per-request: openai.chat.completions.create( messages: [{role: "user", content: "How can I get the name of the current day in JavaScript?"}], - model: :"gpt-5", + model: :"gpt-5.1", request_options: {max_retries: 5} ) ``` @@ -358,7 +361,7 @@ openai = OpenAI::Client.new( # Or, configure per-request: openai.chat.completions.create( messages: [{role: "user", content: "How can I list all files in a directory using Python?"}], - model: :"gpt-5", + model: :"gpt-5.1", request_options: {timeout: 5} ) ``` @@ -393,7 +396,7 @@ Note: the `extra_` parameters of the same name overrides the documented paramete chat_completion = openai.chat.completions.create( messages: [{role: "user", content: "How can I get the name of the current day in JavaScript?"}], - model: :"gpt-5", + model: :"gpt-5.1", request_options: { extra_query: {my_query_parameter: value}, extra_body: {my_body_parameter: value}, @@ -441,7 +444,7 @@ You can provide typesafe request parameters like so: ```ruby openai.chat.completions.create( messages: [OpenAI::Chat::ChatCompletionUserMessageParam.new(content: "Say this is a test")], - model: :"gpt-5" + model: :"gpt-5.1" ) ``` @@ -449,12 +452,15 @@ Or, equivalently: ```ruby # Hashes work, but are not typesafe: -openai.chat.completions.create(messages: [{role: "user", content: "Say this is a test"}], model: :"gpt-5") +openai.chat.completions.create( + messages: [{role: "user", content: "Say this is a test"}], + model: :"gpt-5.1" +) # You can also splat a full Params class: params = OpenAI::Chat::CompletionCreateParams.new( messages: [OpenAI::Chat::ChatCompletionUserMessageParam.new(content: "Say this is a test")], - model: :"gpt-5" + model: :"gpt-5.1" ) openai.chat.completions.create(**params) ``` @@ -464,11 +470,11 @@ openai.chat.completions.create(**params) Since this library does not depend on `sorbet-runtime`, it cannot provide [`T::Enum`](https://sorbet.org/docs/tenum) instances. Instead, we provide "tagged symbols" instead, which is always a primitive at runtime: ```ruby -# :minimal -puts(OpenAI::ReasoningEffort::MINIMAL) +# :"in-memory" +puts(OpenAI::Chat::CompletionCreateParams::PromptCacheRetention::IN_MEMORY) -# Revealed type: `T.all(OpenAI::ReasoningEffort, Symbol)` -T.reveal_type(OpenAI::ReasoningEffort::MINIMAL) +# Revealed type: `T.all(OpenAI::Chat::CompletionCreateParams::PromptCacheRetention, Symbol)` +T.reveal_type(OpenAI::Chat::CompletionCreateParams::PromptCacheRetention::IN_MEMORY) ``` Enum parameters have a "relaxed" type, so you can either pass in enum constants or their literal value: @@ -476,13 +482,13 @@ Enum parameters have a "relaxed" type, so you can either pass in enum constants ```ruby # Using the enum constants preserves the tagged type information: openai.chat.completions.create( - reasoning_effort: OpenAI::ReasoningEffort::MINIMAL, + prompt_cache_retention: OpenAI::Chat::CompletionCreateParams::PromptCacheRetention::IN_MEMORY, # … ) # Literal values are also permissible: openai.chat.completions.create( - reasoning_effort: :minimal, + prompt_cache_retention: :"in-memory", # … ) ``` diff --git a/lib/openai.rb b/lib/openai.rb index d3d9d205..2bc6fe0e 100644 --- a/lib/openai.rb +++ b/lib/openai.rb @@ -528,15 +528,19 @@ require_relative "openai/models/response_format_text" require_relative "openai/models/response_format_text_grammar" require_relative "openai/models/response_format_text_python" +require_relative "openai/models/responses/apply_patch_tool" require_relative "openai/models/responses/computer_tool" require_relative "openai/models/responses/custom_tool" require_relative "openai/models/responses/easy_input_message" require_relative "openai/models/responses/file_search_tool" +require_relative "openai/models/responses/function_shell_tool" require_relative "openai/models/responses/function_tool" require_relative "openai/models/responses/input_item_list_params" require_relative "openai/models/responses/input_token_count_params" require_relative "openai/models/responses/input_token_count_response" require_relative "openai/models/responses/response" +require_relative "openai/models/responses/response_apply_patch_tool_call" +require_relative "openai/models/responses/response_apply_patch_tool_call_output" require_relative "openai/models/responses/response_audio_delta_event" require_relative "openai/models/responses/response_audio_done_event" require_relative "openai/models/responses/response_audio_transcript_delta_event" @@ -576,6 +580,9 @@ require_relative "openai/models/responses/response_function_call_arguments_done_event" require_relative "openai/models/responses/response_function_call_output_item" require_relative "openai/models/responses/response_function_call_output_item_list" +require_relative "openai/models/responses/response_function_shell_call_output_content" +require_relative "openai/models/responses/response_function_shell_tool_call" +require_relative "openai/models/responses/response_function_shell_tool_call_output" require_relative "openai/models/responses/response_function_tool_call_item" require_relative "openai/models/responses/response_function_tool_call_output_item" require_relative "openai/models/responses/response_function_web_search" @@ -634,10 +641,12 @@ require_relative "openai/models/responses/response_web_search_call_searching_event" require_relative "openai/models/responses/tool" require_relative "openai/models/responses/tool_choice_allowed" +require_relative "openai/models/responses/tool_choice_apply_patch" require_relative "openai/models/responses/tool_choice_custom" require_relative "openai/models/responses/tool_choice_function" require_relative "openai/models/responses/tool_choice_mcp" require_relative "openai/models/responses/tool_choice_options" +require_relative "openai/models/responses/tool_choice_shell" require_relative "openai/models/responses/tool_choice_types" require_relative "openai/models/responses/web_search_preview_tool" require_relative "openai/models/responses/web_search_tool" diff --git a/lib/openai/internal/type/enum.rb b/lib/openai/internal/type/enum.rb index 70476264..227ff0f2 100644 --- a/lib/openai/internal/type/enum.rb +++ b/lib/openai/internal/type/enum.rb @@ -19,11 +19,11 @@ module Type # @example # # `chat_model` is a `OpenAI::ChatModel` # case chat_model - # when OpenAI::ChatModel::GPT_5 + # when OpenAI::ChatModel::GPT_5_1 # # ... - # when OpenAI::ChatModel::GPT_5_MINI + # when OpenAI::ChatModel::GPT_5_1_2025_11_13 # # ... - # when OpenAI::ChatModel::GPT_5_NANO + # when OpenAI::ChatModel::GPT_5_1_CODEX # # ... # else # puts(chat_model) @@ -31,11 +31,11 @@ module Type # # @example # case chat_model - # in :"gpt-5" + # in :"gpt-5.1" # # ... - # in :"gpt-5-mini" + # in :"gpt-5.1-2025-11-13" # # ... - # in :"gpt-5-nano" + # in :"gpt-5.1-codex" # # ... # else # puts(chat_model) diff --git a/lib/openai/models/batch_create_params.rb b/lib/openai/models/batch_create_params.rb index bdb51f2e..b91d1190 100644 --- a/lib/openai/models/batch_create_params.rb +++ b/lib/openai/models/batch_create_params.rb @@ -16,9 +16,10 @@ class BatchCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute endpoint # The endpoint to be used for all requests in the batch. Currently - # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` - # are supported. Note that `/v1/embeddings` batches are also restricted to a - # maximum of 50,000 embedding inputs across all requests in the batch. + # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, `/v1/completions`, + # and `/v1/moderations` are supported. Note that `/v1/embeddings` batches are also + # restricted to a maximum of 50,000 embedding inputs across all requests in the + # batch. # # @return [Symbol, OpenAI::Models::BatchCreateParams::Endpoint] required :endpoint, enum: -> { OpenAI::BatchCreateParams::Endpoint } @@ -83,9 +84,10 @@ module CompletionWindow end # The endpoint to be used for all requests in the batch. Currently - # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` - # are supported. Note that `/v1/embeddings` batches are also restricted to a - # maximum of 50,000 embedding inputs across all requests in the batch. + # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, `/v1/completions`, + # and `/v1/moderations` are supported. Note that `/v1/embeddings` batches are also + # restricted to a maximum of 50,000 embedding inputs across all requests in the + # batch. module Endpoint extend OpenAI::Internal::Type::Enum @@ -93,6 +95,7 @@ module Endpoint V1_CHAT_COMPLETIONS = :"/v1/chat/completions" V1_EMBEDDINGS = :"/v1/embeddings" V1_COMPLETIONS = :"/v1/completions" + V1_MODERATIONS = :"/v1/moderations" # @!method self.values # @return [Array] diff --git a/lib/openai/models/beta/assistant_create_params.rb b/lib/openai/models/beta/assistant_create_params.rb index d1903311..6f9eed7f 100644 --- a/lib/openai/models/beta/assistant_create_params.rb +++ b/lib/openai/models/beta/assistant_create_params.rb @@ -51,12 +51,16 @@ class AssistantCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute reasoning_effort # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true diff --git a/lib/openai/models/beta/assistant_update_params.rb b/lib/openai/models/beta/assistant_update_params.rb index 3adb9322..612660a9 100644 --- a/lib/openai/models/beta/assistant_update_params.rb +++ b/lib/openai/models/beta/assistant_update_params.rb @@ -51,12 +51,16 @@ class AssistantUpdateParams < OpenAI::Internal::Type::BaseModel # @!attribute reasoning_effort # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true diff --git a/lib/openai/models/beta/threads/run_create_params.rb b/lib/openai/models/beta/threads/run_create_params.rb index 57269c0b..17b4c5ca 100644 --- a/lib/openai/models/beta/threads/run_create_params.rb +++ b/lib/openai/models/beta/threads/run_create_params.rb @@ -109,12 +109,16 @@ class RunCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute reasoning_effort # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. - # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. + # + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true diff --git a/lib/openai/models/chat/completion_create_params.rb b/lib/openai/models/chat/completion_create_params.rb index bc924fb1..bb091ba7 100644 --- a/lib/openai/models/chat/completion_create_params.rb +++ b/lib/openai/models/chat/completion_create_params.rb @@ -190,15 +190,30 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # @return [String, nil] optional :prompt_cache_key, String + # @!attribute prompt_cache_retention + # The retention policy for the prompt cache. Set to `24h` to enable extended + # prompt caching, which keeps cached prefixes active for longer, up to a maximum + # of 24 hours. + # [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + # + # @return [Symbol, OpenAI::Models::Chat::CompletionCreateParams::PromptCacheRetention, nil] + optional :prompt_cache_retention, + enum: -> { OpenAI::Chat::CompletionCreateParams::PromptCacheRetention }, + nil?: true + # @!attribute reasoning_effort # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true @@ -368,7 +383,7 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # @return [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions, nil] optional :web_search_options, -> { OpenAI::Chat::CompletionCreateParams::WebSearchOptions } - # @!method initialize(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, verbosity: nil, web_search_options: nil, request_options: {}) + # @!method initialize(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, prompt_cache_retention: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, verbosity: nil, web_search_options: nil, request_options: {}) # Some parameter documentations has been truncated, see # {OpenAI::Models::Chat::CompletionCreateParams} for more details. # @@ -406,6 +421,8 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi # + # @param prompt_cache_retention [Symbol, OpenAI::Models::Chat::CompletionCreateParams::PromptCacheRetention, nil] The retention policy for the prompt cache. Set to `24h` to enable extended promp + # # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for # # @param response_format [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::StructuredOutput::JsonSchemaConverter, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output. @@ -551,6 +568,20 @@ module Modality # @return [Array] end + # The retention policy for the prompt cache. Set to `24h` to enable extended + # prompt caching, which keeps cached prefixes active for longer, up to a maximum + # of 24 hours. + # [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + module PromptCacheRetention + extend OpenAI::Internal::Type::Enum + + IN_MEMORY = :"in-memory" + PROMPT_CACHE_RETENTION_24H = :"24h" + + # @!method self.values + # @return [Array] + end + # An object specifying the format that the model must output. # # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured diff --git a/lib/openai/models/chat_model.rb b/lib/openai/models/chat_model.rb index adeb1665..2faf5e1d 100644 --- a/lib/openai/models/chat_model.rb +++ b/lib/openai/models/chat_model.rb @@ -5,6 +5,11 @@ module Models module ChatModel extend OpenAI::Internal::Type::Enum + GPT_5_1 = :"gpt-5.1" + GPT_5_1_2025_11_13 = :"gpt-5.1-2025-11-13" + GPT_5_1_CODEX = :"gpt-5.1-codex" + GPT_5_1_MINI = :"gpt-5.1-mini" + GPT_5_1_CHAT_LATEST = :"gpt-5.1-chat-latest" GPT_5 = :"gpt-5" GPT_5_MINI = :"gpt-5-mini" GPT_5_NANO = :"gpt-5-nano" diff --git a/lib/openai/models/conversations/conversation_create_params.rb b/lib/openai/models/conversations/conversation_create_params.rb index 368c1269..e05ad5b4 100644 --- a/lib/openai/models/conversations/conversation_create_params.rb +++ b/lib/openai/models/conversations/conversation_create_params.rb @@ -12,7 +12,7 @@ class ConversationCreateParams < OpenAI::Internal::Type::BaseModel # Initial items to include in the conversation context. You may add up to 20 items # at a time. # - # @return [Array, nil] + # @return [Array, nil] optional :items, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseInputItem] }, nil?: true @@ -32,7 +32,7 @@ class ConversationCreateParams < OpenAI::Internal::Type::BaseModel # Some parameter documentations has been truncated, see # {OpenAI::Models::Conversations::ConversationCreateParams} for more details. # - # @param items [Array, nil] Initial items to include in the conversation context. You may add up to 20 items + # @param items [Array, nil] Initial items to include in the conversation context. You may add up to 20 items # # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be # diff --git a/lib/openai/models/conversations/conversation_item.rb b/lib/openai/models/conversations/conversation_item.rb index 14ee48c8..8c4e390a 100644 --- a/lib/openai/models/conversations/conversation_item.rb +++ b/lib/openai/models/conversations/conversation_item.rb @@ -54,6 +54,18 @@ module ConversationItem # The output of a local shell tool call. variant :local_shell_call_output, -> { OpenAI::Conversations::ConversationItem::LocalShellCallOutput } + # A tool call that executes one or more shell commands in a managed environment. + variant :shell_call, -> { OpenAI::Responses::ResponseFunctionShellToolCall } + + # The output of a shell tool call. + variant :shell_call_output, -> { OpenAI::Responses::ResponseFunctionShellToolCallOutput } + + # A tool call that applies file diffs by creating, deleting, or updating files. + variant :apply_patch_call, -> { OpenAI::Responses::ResponseApplyPatchToolCall } + + # The output emitted by an apply patch tool call. + variant :apply_patch_call_output, -> { OpenAI::Responses::ResponseApplyPatchToolCallOutput } + # A list of tools available on an MCP server. variant :mcp_list_tools, -> { OpenAI::Conversations::ConversationItem::McpListTools } @@ -592,7 +604,7 @@ module Status end # @!method self.variants - # @return [Array(OpenAI::Models::Conversations::Message, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput, OpenAI::Models::Conversations::ConversationItem::McpListTools, OpenAI::Models::Conversations::ConversationItem::McpApprovalRequest, OpenAI::Models::Conversations::ConversationItem::McpApprovalResponse, OpenAI::Models::Conversations::ConversationItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput)] + # @return [Array(OpenAI::Models::Conversations::Message, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseFunctionShellToolCall, OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput, OpenAI::Models::Responses::ResponseApplyPatchToolCall, OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput, OpenAI::Models::Conversations::ConversationItem::McpListTools, OpenAI::Models::Conversations::ConversationItem::McpApprovalRequest, OpenAI::Models::Conversations::ConversationItem::McpApprovalResponse, OpenAI::Models::Conversations::ConversationItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput)] end end diff --git a/lib/openai/models/conversations/conversation_item_list.rb b/lib/openai/models/conversations/conversation_item_list.rb index 4c36f797..4ea23c3f 100644 --- a/lib/openai/models/conversations/conversation_item_list.rb +++ b/lib/openai/models/conversations/conversation_item_list.rb @@ -8,7 +8,7 @@ class ConversationItemList < OpenAI::Internal::Type::BaseModel # @!attribute data # A list of conversation items. # - # @return [Array] + # @return [Array] required :data, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Conversations::ConversationItem] } # @!attribute first_id @@ -38,7 +38,7 @@ class ConversationItemList < OpenAI::Internal::Type::BaseModel # @!method initialize(data:, first_id:, has_more:, last_id:, object: :list) # A list of Conversation items. # - # @param data [Array] A list of conversation items. + # @param data [Array] A list of conversation items. # # @param first_id [String] The ID of the first item in the list. # diff --git a/lib/openai/models/conversations/item_create_params.rb b/lib/openai/models/conversations/item_create_params.rb index bebc18e0..239cd735 100644 --- a/lib/openai/models/conversations/item_create_params.rb +++ b/lib/openai/models/conversations/item_create_params.rb @@ -11,7 +11,7 @@ class ItemCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute items # The items to add to the conversation. You may add up to 20 items at a time. # - # @return [Array] + # @return [Array] required :items, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseInputItem] } # @!attribute include @@ -26,7 +26,7 @@ class ItemCreateParams < OpenAI::Internal::Type::BaseModel # Some parameter documentations has been truncated, see # {OpenAI::Models::Conversations::ItemCreateParams} for more details. # - # @param items [Array] The items to add to the conversation. You may add up to 20 items at a time. + # @param items [Array] The items to add to the conversation. You may add up to 20 items at a time. # # @param include [Array] Additional fields to include in the response. See the `include` # diff --git a/lib/openai/models/evals/create_eval_completions_run_data_source.rb b/lib/openai/models/evals/create_eval_completions_run_data_source.rb index 920f3fd0..62277272 100644 --- a/lib/openai/models/evals/create_eval_completions_run_data_source.rb +++ b/lib/openai/models/evals/create_eval_completions_run_data_source.rb @@ -462,12 +462,16 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # @!attribute reasoning_effort # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true diff --git a/lib/openai/models/evals/run_cancel_response.rb b/lib/openai/models/evals/run_cancel_response.rb index 0b0f2b47..505e352e 100644 --- a/lib/openai/models/evals/run_cancel_response.rb +++ b/lib/openai/models/evals/run_cancel_response.rb @@ -316,12 +316,16 @@ class Responses < OpenAI::Internal::Type::BaseModel # @!attribute reasoning_effort # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true @@ -660,12 +664,16 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # @!attribute reasoning_effort # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true @@ -709,7 +717,7 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # the model to call your own code. Learn more about # [function calling](https://platform.openai.com/docs/guides/function-calling). # - # @return [Array, nil] + # @return [Array, nil] optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] } # @!attribute top_p @@ -733,7 +741,7 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # # @param text [OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text] Configuration options for a text response from the model. Can be plain # - # @param tools [Array] An array of tools the model may call while generating a response. You + # @param tools [Array] An array of tools the model may call while generating a response. You # # @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens. diff --git a/lib/openai/models/evals/run_create_params.rb b/lib/openai/models/evals/run_create_params.rb index f4ec6c70..2fbf70a1 100644 --- a/lib/openai/models/evals/run_create_params.rb +++ b/lib/openai/models/evals/run_create_params.rb @@ -228,12 +228,16 @@ class Responses < OpenAI::Internal::Type::BaseModel # @!attribute reasoning_effort # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true @@ -588,12 +592,16 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # @!attribute reasoning_effort # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true @@ -637,7 +645,7 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # the model to call your own code. Learn more about # [function calling](https://platform.openai.com/docs/guides/function-calling). # - # @return [Array, nil] + # @return [Array, nil] optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] } # @!attribute top_p @@ -661,7 +669,7 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # # @param text [OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text] Configuration options for a text response from the model. Can be plain # - # @param tools [Array] An array of tools the model may call while generating a response. You + # @param tools [Array] An array of tools the model may call while generating a response. You # # @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens. diff --git a/lib/openai/models/evals/run_create_response.rb b/lib/openai/models/evals/run_create_response.rb index 32d9244d..755a7fc7 100644 --- a/lib/openai/models/evals/run_create_response.rb +++ b/lib/openai/models/evals/run_create_response.rb @@ -316,12 +316,16 @@ class Responses < OpenAI::Internal::Type::BaseModel # @!attribute reasoning_effort # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true @@ -660,12 +664,16 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # @!attribute reasoning_effort # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true @@ -709,7 +717,7 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # the model to call your own code. Learn more about # [function calling](https://platform.openai.com/docs/guides/function-calling). # - # @return [Array, nil] + # @return [Array, nil] optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] } # @!attribute top_p @@ -733,7 +741,7 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # # @param text [OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams::Text] Configuration options for a text response from the model. Can be plain # - # @param tools [Array] An array of tools the model may call while generating a response. You + # @param tools [Array] An array of tools the model may call while generating a response. You # # @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens. diff --git a/lib/openai/models/evals/run_list_response.rb b/lib/openai/models/evals/run_list_response.rb index b1947e4d..555af497 100644 --- a/lib/openai/models/evals/run_list_response.rb +++ b/lib/openai/models/evals/run_list_response.rb @@ -316,12 +316,16 @@ class Responses < OpenAI::Internal::Type::BaseModel # @!attribute reasoning_effort # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true @@ -660,12 +664,16 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # @!attribute reasoning_effort # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true @@ -708,7 +716,7 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # the model to call your own code. Learn more about # [function calling](https://platform.openai.com/docs/guides/function-calling). # - # @return [Array, nil] + # @return [Array, nil] optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] } # @!attribute top_p @@ -732,7 +740,7 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # # @param text [OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams::Text] Configuration options for a text response from the model. Can be plain # - # @param tools [Array] An array of tools the model may call while generating a response. You + # @param tools [Array] An array of tools the model may call while generating a response. You # # @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens. diff --git a/lib/openai/models/evals/run_retrieve_response.rb b/lib/openai/models/evals/run_retrieve_response.rb index 9a2d8896..341d4ab5 100644 --- a/lib/openai/models/evals/run_retrieve_response.rb +++ b/lib/openai/models/evals/run_retrieve_response.rb @@ -316,12 +316,16 @@ class Responses < OpenAI::Internal::Type::BaseModel # @!attribute reasoning_effort # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true @@ -664,12 +668,16 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # @!attribute reasoning_effort # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true @@ -713,7 +721,7 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # the model to call your own code. Learn more about # [function calling](https://platform.openai.com/docs/guides/function-calling). # - # @return [Array, nil] + # @return [Array, nil] optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] } # @!attribute top_p @@ -737,7 +745,7 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # # @param text [OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams::Text] Configuration options for a text response from the model. Can be plain # - # @param tools [Array] An array of tools the model may call while generating a response. You + # @param tools [Array] An array of tools the model may call while generating a response. You # # @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens. diff --git a/lib/openai/models/graders/score_model_grader.rb b/lib/openai/models/graders/score_model_grader.rb index 8a7eb20e..20e396e7 100644 --- a/lib/openai/models/graders/score_model_grader.rb +++ b/lib/openai/models/graders/score_model_grader.rb @@ -222,12 +222,16 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # @!attribute reasoning_effort # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true diff --git a/lib/openai/models/reasoning.rb b/lib/openai/models/reasoning.rb index 177da9ea..1608fb08 100644 --- a/lib/openai/models/reasoning.rb +++ b/lib/openai/models/reasoning.rb @@ -6,12 +6,16 @@ class Reasoning < OpenAI::Internal::Type::BaseModel # @!attribute effort # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. - # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. + # + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] optional :effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true diff --git a/lib/openai/models/reasoning_effort.rb b/lib/openai/models/reasoning_effort.rb index 02a47244..335bc085 100644 --- a/lib/openai/models/reasoning_effort.rb +++ b/lib/openai/models/reasoning_effort.rb @@ -4,15 +4,20 @@ module OpenAI module Models # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. module ReasoningEffort extend OpenAI::Internal::Type::Enum + NONE = :none MINIMAL = :minimal LOW = :low MEDIUM = :medium diff --git a/lib/openai/models/responses/apply_patch_tool.rb b/lib/openai/models/responses/apply_patch_tool.rb new file mode 100644 index 00000000..ea2a7625 --- /dev/null +++ b/lib/openai/models/responses/apply_patch_tool.rb @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ApplyPatchTool < OpenAI::Internal::Type::BaseModel + # @!attribute type + # The type of the tool. Always `apply_patch`. + # + # @return [Symbol, :apply_patch] + required :type, const: :apply_patch + + # @!method initialize(type: :apply_patch) + # Allows the assistant to create, delete, or update files using unified diffs. + # + # @param type [Symbol, :apply_patch] The type of the tool. Always `apply_patch`. + end + end + end +end diff --git a/lib/openai/models/responses/function_shell_tool.rb b/lib/openai/models/responses/function_shell_tool.rb new file mode 100644 index 00000000..268de40c --- /dev/null +++ b/lib/openai/models/responses/function_shell_tool.rb @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class FunctionShellTool < OpenAI::Internal::Type::BaseModel + # @!attribute type + # The type of the shell tool. Always `shell`. + # + # @return [Symbol, :shell] + required :type, const: :shell + + # @!method initialize(type: :shell) + # A tool that allows the model to execute shell commands. + # + # @param type [Symbol, :shell] The type of the shell tool. Always `shell`. + end + end + end +end diff --git a/lib/openai/models/responses/input_token_count_params.rb b/lib/openai/models/responses/input_token_count_params.rb index 511ea5d7..1dad73f7 100644 --- a/lib/openai/models/responses/input_token_count_params.rb +++ b/lib/openai/models/responses/input_token_count_params.rb @@ -24,7 +24,7 @@ class InputTokenCountParams < OpenAI::Internal::Type::BaseModel # @!attribute input # Text, image, or file inputs to the model, used to generate a response # - # @return [String, Array, nil] + # @return [String, Array, nil] optional :input, union: -> { OpenAI::Responses::InputTokenCountParams::Input }, nil?: true # @!attribute instructions @@ -83,14 +83,14 @@ class InputTokenCountParams < OpenAI::Internal::Type::BaseModel # response. See the `tools` parameter to see how to specify which tools the model # can call. # - # @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, nil] + # @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, OpenAI::Models::Responses::ToolChoiceApplyPatch, OpenAI::Models::Responses::ToolChoiceShell, nil] optional :tool_choice, union: -> { OpenAI::Responses::InputTokenCountParams::ToolChoice }, nil?: true # @!attribute tools # An array of tools the model may call while generating a response. You can # specify which tool to use by setting the `tool_choice` parameter. # - # @return [Array, nil] + # @return [Array, nil] optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }, nil?: true # @!attribute truncation @@ -109,7 +109,7 @@ class InputTokenCountParams < OpenAI::Internal::Type::BaseModel # # @param conversation [String, OpenAI::Models::Responses::ResponseConversationParam, nil] The conversation that this response belongs to. Items from this conversation are # - # @param input [String, Array, nil] Text, image, or file inputs to the model, used to generate a response + # @param input [String, Array, nil] Text, image, or file inputs to the model, used to generate a response # # @param instructions [String, nil] A system (or developer) message inserted into the model's context. # @@ -123,9 +123,9 @@ class InputTokenCountParams < OpenAI::Internal::Type::BaseModel # # @param text [OpenAI::Models::Responses::InputTokenCountParams::Text, nil] Configuration options for a text response from the model. Can be plain # - # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, nil] How the model should select which tool (or tools) to use when generating + # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, OpenAI::Models::Responses::ToolChoiceApplyPatch, OpenAI::Models::Responses::ToolChoiceShell, nil] How the model should select which tool (or tools) to use when generating # - # @param tools [Array, nil] An array of tools the model may call while generating a response. You can specif + # @param tools [Array, nil] An array of tools the model may call while generating a response. You can specif # # @param truncation [Symbol, OpenAI::Models::Responses::InputTokenCountParams::Truncation] The truncation strategy to use for the model response. - `auto`: If the input to # @@ -158,7 +158,7 @@ module Input variant -> { OpenAI::Models::Responses::InputTokenCountParams::Input::ResponseInputItemArray } # @!method self.variants - # @return [Array(String, Array)] + # @return [Array(String, Array)] # @type [OpenAI::Internal::Type::Converter] ResponseInputItemArray = @@ -259,8 +259,14 @@ module ToolChoice # Use this option to force the model to call a specific custom tool. variant -> { OpenAI::Responses::ToolChoiceCustom } + # Forces the model to call the apply_patch tool when executing a tool call. + variant -> { OpenAI::Responses::ToolChoiceApplyPatch } + + # Forces the model to call the function shell tool when a tool call is required. + variant -> { OpenAI::Responses::ToolChoiceShell } + # @!method self.variants - # @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom)] + # @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, OpenAI::Models::Responses::ToolChoiceApplyPatch, OpenAI::Models::Responses::ToolChoiceShell)] end # The truncation strategy to use for the model response. - `auto`: If the input to diff --git a/lib/openai/models/responses/response.rb b/lib/openai/models/responses/response.rb index b327e434..10b2c3fc 100644 --- a/lib/openai/models/responses/response.rb +++ b/lib/openai/models/responses/response.rb @@ -38,7 +38,7 @@ class Response < OpenAI::Internal::Type::BaseModel # response will not be carried over to the next response. This makes it simple to # swap out system (or developer) messages in new responses. # - # @return [String, Array, nil] + # @return [String, Array, nil] required :instructions, union: -> { OpenAI::Responses::Response::Instructions }, nil?: true # @!attribute metadata @@ -77,7 +77,7 @@ class Response < OpenAI::Internal::Type::BaseModel # an `assistant` message with the content generated by the model, you might # consider using the `output_text` property where supported in SDKs. # - # @return [Array] + # @return [Array] required :output, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseOutputItem] } # @!attribute parallel_tool_calls @@ -100,7 +100,7 @@ class Response < OpenAI::Internal::Type::BaseModel # response. See the `tools` parameter to see how to specify which tools the model # can call. # - # @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom] + # @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, OpenAI::Models::Responses::ToolChoiceApplyPatch, OpenAI::Models::Responses::ToolChoiceShell] required :tool_choice, union: -> { OpenAI::Responses::Response::ToolChoice } # @!attribute tools @@ -124,7 +124,7 @@ class Response < OpenAI::Internal::Type::BaseModel # [function calling](https://platform.openai.com/docs/guides/function-calling). # You can also use custom tools to call your own code. # - # @return [Array] + # @return [Array] required :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] } # @!attribute top_p @@ -192,6 +192,17 @@ class Response < OpenAI::Internal::Type::BaseModel # @return [String, nil] optional :prompt_cache_key, String + # @!attribute prompt_cache_retention + # The retention policy for the prompt cache. Set to `24h` to enable extended + # prompt caching, which keeps cached prefixes active for longer, up to a maximum + # of 24 hours. + # [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + # + # @return [Symbol, OpenAI::Models::Responses::Response::PromptCacheRetention, nil] + optional :prompt_cache_retention, + enum: -> { OpenAI::Responses::Response::PromptCacheRetention }, + nil?: true + # @!attribute reasoning # **gpt-5 and o-series models only** # @@ -307,7 +318,7 @@ def output_text texts.join end - # @!method initialize(id:, created_at:, error:, incomplete_details:, instructions:, metadata:, model:, output:, parallel_tool_calls:, temperature:, tool_choice:, tools:, top_p:, background: nil, conversation: nil, max_output_tokens: nil, max_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, status: nil, text: nil, top_logprobs: nil, truncation: nil, usage: nil, user: nil, object: :response) + # @!method initialize(id:, created_at:, error:, incomplete_details:, instructions:, metadata:, model:, output:, parallel_tool_calls:, temperature:, tool_choice:, tools:, top_p:, background: nil, conversation: nil, max_output_tokens: nil, max_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, prompt_cache_retention: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, status: nil, text: nil, top_logprobs: nil, truncation: nil, usage: nil, user: nil, object: :response) # Some parameter documentations has been truncated, see # {OpenAI::Models::Responses::Response} for more details. # @@ -319,21 +330,21 @@ def output_text # # @param incomplete_details [OpenAI::Models::Responses::Response::IncompleteDetails, nil] Details about why the response is incomplete. # - # @param instructions [String, Array, nil] A system (or developer) message inserted into the model's context. + # @param instructions [String, Array, nil] A system (or developer) message inserted into the model's context. # # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be # # @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI # - # @param output [Array] An array of content items generated by the model. + # @param output [Array] An array of content items generated by the model. # # @param parallel_tool_calls [Boolean] Whether to allow the model to run tool calls in parallel. # # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m # - # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom] How the model should select which tool (or tools) to use when generating + # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, OpenAI::Models::Responses::ToolChoiceApplyPatch, OpenAI::Models::Responses::ToolChoiceShell] How the model should select which tool (or tools) to use when generating # - # @param tools [Array] An array of tools the model may call while generating a response. You + # @param tools [Array] An array of tools the model may call while generating a response. You # # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, # @@ -351,6 +362,8 @@ def output_text # # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi # + # @param prompt_cache_retention [Symbol, OpenAI::Models::Responses::Response::PromptCacheRetention, nil] The retention policy for the prompt cache. Set to `24h` to enable extended promp + # # @param reasoning [OpenAI::Models::Reasoning, nil] **gpt-5 and o-series models only** # # @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi @@ -417,7 +430,7 @@ module Instructions variant -> { OpenAI::Models::Responses::Response::Instructions::ResponseInputItemArray } # @!method self.variants - # @return [Array(String, Array)] + # @return [Array(String, Array)] # @type [OpenAI::Internal::Type::Converter] ResponseInputItemArray = @@ -458,8 +471,14 @@ module ToolChoice # Use this option to force the model to call a specific custom tool. variant -> { OpenAI::Responses::ToolChoiceCustom } + # Forces the model to call the apply_patch tool when executing a tool call. + variant -> { OpenAI::Responses::ToolChoiceApplyPatch } + + # Forces the model to call the function shell tool when a tool call is required. + variant -> { OpenAI::Responses::ToolChoiceShell } + # @!method self.variants - # @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom)] + # @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, OpenAI::Models::Responses::ToolChoiceApplyPatch, OpenAI::Models::Responses::ToolChoiceShell)] end # @see OpenAI::Models::Responses::Response#conversation @@ -477,6 +496,22 @@ class Conversation < OpenAI::Internal::Type::BaseModel # @param id [String] The unique ID of the conversation. end + # The retention policy for the prompt cache. Set to `24h` to enable extended + # prompt caching, which keeps cached prefixes active for longer, up to a maximum + # of 24 hours. + # [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + # + # @see OpenAI::Models::Responses::Response#prompt_cache_retention + module PromptCacheRetention + extend OpenAI::Internal::Type::Enum + + IN_MEMORY = :"in-memory" + PROMPT_CACHE_RETENTION_24H = :"24h" + + # @!method self.values + # @return [Array] + end + # Specifies the processing type used for serving the request. # # - If set to 'auto', then the request will be processed with the service tier diff --git a/lib/openai/models/responses/response_apply_patch_tool_call.rb b/lib/openai/models/responses/response_apply_patch_tool_call.rb new file mode 100644 index 00000000..2882727c --- /dev/null +++ b/lib/openai/models/responses/response_apply_patch_tool_call.rb @@ -0,0 +1,179 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseApplyPatchToolCall < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the apply patch tool call. Populated when this item is returned + # via API. + # + # @return [String] + required :id, String + + # @!attribute call_id + # The unique ID of the apply patch tool call generated by the model. + # + # @return [String] + required :call_id, String + + # @!attribute status + # The status of the apply patch tool call. One of `in_progress` or `completed`. + # + # @return [Symbol, OpenAI::Models::Responses::ResponseApplyPatchToolCall::Status] + required :status, enum: -> { OpenAI::Responses::ResponseApplyPatchToolCall::Status } + + # @!attribute type + # The type of the item. Always `apply_patch_call`. + # + # @return [Symbol, :apply_patch_call] + required :type, const: :apply_patch_call + + # @!attribute created_by + # The ID of the entity that created this tool call. + # + # @return [String, nil] + optional :created_by, String + + # @!attribute operation + # One of the create_file, delete_file, or update_file operations applied via + # apply_patch. + # + # @return [OpenAI::Models::Responses::ResponseApplyPatchToolCall::Operation::CreateFile, OpenAI::Models::Responses::ResponseApplyPatchToolCall::Operation::DeleteFile, OpenAI::Models::Responses::ResponseApplyPatchToolCall::Operation::UpdateFile, nil] + optional :operation, union: -> { OpenAI::Responses::ResponseApplyPatchToolCall::Operation } + + # @!method initialize(id:, call_id:, status:, created_by: nil, operation: nil, type: :apply_patch_call) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseApplyPatchToolCall} for more details. + # + # A tool call that applies file diffs by creating, deleting, or updating files. + # + # @param id [String] The unique ID of the apply patch tool call. Populated when this item is returned + # + # @param call_id [String] The unique ID of the apply patch tool call generated by the model. + # + # @param status [Symbol, OpenAI::Models::Responses::ResponseApplyPatchToolCall::Status] The status of the apply patch tool call. One of `in_progress` or `completed`. + # + # @param created_by [String] The ID of the entity that created this tool call. + # + # @param operation [OpenAI::Models::Responses::ResponseApplyPatchToolCall::Operation::CreateFile, OpenAI::Models::Responses::ResponseApplyPatchToolCall::Operation::DeleteFile, OpenAI::Models::Responses::ResponseApplyPatchToolCall::Operation::UpdateFile] One of the create_file, delete_file, or update_file operations applied via apply + # + # @param type [Symbol, :apply_patch_call] The type of the item. Always `apply_patch_call`. + + # The status of the apply patch tool call. One of `in_progress` or `completed`. + # + # @see OpenAI::Models::Responses::ResponseApplyPatchToolCall#status + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS = :in_progress + COMPLETED = :completed + + # @!method self.values + # @return [Array] + end + + # One of the create_file, delete_file, or update_file operations applied via + # apply_patch. + # + # @see OpenAI::Models::Responses::ResponseApplyPatchToolCall#operation + module Operation + extend OpenAI::Internal::Type::Union + + discriminator :type + + # Instruction describing how to create a file via the apply_patch tool. + variant :create_file, -> { OpenAI::Responses::ResponseApplyPatchToolCall::Operation::CreateFile } + + # Instruction describing how to delete a file via the apply_patch tool. + variant :delete_file, -> { OpenAI::Responses::ResponseApplyPatchToolCall::Operation::DeleteFile } + + # Instruction describing how to update a file via the apply_patch tool. + variant :update_file, -> { OpenAI::Responses::ResponseApplyPatchToolCall::Operation::UpdateFile } + + class CreateFile < OpenAI::Internal::Type::BaseModel + # @!attribute diff + # Diff to apply. + # + # @return [String] + required :diff, String + + # @!attribute path + # Path of the file to create. + # + # @return [String] + required :path, String + + # @!attribute type + # Create a new file with the provided diff. + # + # @return [Symbol, :create_file] + required :type, const: :create_file + + # @!method initialize(diff:, path:, type: :create_file) + # Instruction describing how to create a file via the apply_patch tool. + # + # @param diff [String] Diff to apply. + # + # @param path [String] Path of the file to create. + # + # @param type [Symbol, :create_file] Create a new file with the provided diff. + end + + class DeleteFile < OpenAI::Internal::Type::BaseModel + # @!attribute path + # Path of the file to delete. + # + # @return [String] + required :path, String + + # @!attribute type + # Delete the specified file. + # + # @return [Symbol, :delete_file] + required :type, const: :delete_file + + # @!method initialize(path:, type: :delete_file) + # Instruction describing how to delete a file via the apply_patch tool. + # + # @param path [String] Path of the file to delete. + # + # @param type [Symbol, :delete_file] Delete the specified file. + end + + class UpdateFile < OpenAI::Internal::Type::BaseModel + # @!attribute diff + # Diff to apply. + # + # @return [String] + required :diff, String + + # @!attribute path + # Path of the file to update. + # + # @return [String] + required :path, String + + # @!attribute type + # Update an existing file with the provided diff. + # + # @return [Symbol, :update_file] + required :type, const: :update_file + + # @!method initialize(diff:, path:, type: :update_file) + # Instruction describing how to update a file via the apply_patch tool. + # + # @param diff [String] Diff to apply. + # + # @param path [String] Path of the file to update. + # + # @param type [Symbol, :update_file] Update an existing file with the provided diff. + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Responses::ResponseApplyPatchToolCall::Operation::CreateFile, OpenAI::Models::Responses::ResponseApplyPatchToolCall::Operation::DeleteFile, OpenAI::Models::Responses::ResponseApplyPatchToolCall::Operation::UpdateFile)] + end + end + end + end +end diff --git a/lib/openai/models/responses/response_apply_patch_tool_call_output.rb b/lib/openai/models/responses/response_apply_patch_tool_call_output.rb new file mode 100644 index 00000000..2b6cbcdf --- /dev/null +++ b/lib/openai/models/responses/response_apply_patch_tool_call_output.rb @@ -0,0 +1,77 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseApplyPatchToolCallOutput < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the apply patch tool call output. Populated when this item is + # returned via API. + # + # @return [String] + required :id, String + + # @!attribute call_id + # The unique ID of the apply patch tool call generated by the model. + # + # @return [String] + required :call_id, String + + # @!attribute output + # Optional textual output returned by the apply patch tool. + # + # @return [String, nil] + required :output, String, nil?: true + + # @!attribute status + # The status of the apply patch tool call output. One of `completed` or `failed`. + # + # @return [Symbol, OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput::Status] + required :status, enum: -> { OpenAI::Responses::ResponseApplyPatchToolCallOutput::Status } + + # @!attribute type + # The type of the item. Always `apply_patch_call_output`. + # + # @return [Symbol, :apply_patch_call_output] + required :type, const: :apply_patch_call_output + + # @!attribute created_by + # The ID of the entity that created this tool call output. + # + # @return [String, nil] + optional :created_by, String + + # @!method initialize(id:, call_id:, output:, status:, created_by: nil, type: :apply_patch_call_output) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput} for more details. + # + # The output emitted by an apply patch tool call. + # + # @param id [String] The unique ID of the apply patch tool call output. Populated when this item is r + # + # @param call_id [String] The unique ID of the apply patch tool call generated by the model. + # + # @param output [String, nil] Optional textual output returned by the apply patch tool. + # + # @param status [Symbol, OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput::Status] The status of the apply patch tool call output. One of `completed` or `failed`. + # + # @param created_by [String] The ID of the entity that created this tool call output. + # + # @param type [Symbol, :apply_patch_call_output] The type of the item. Always `apply_patch_call_output`. + + # The status of the apply patch tool call output. One of `completed` or `failed`. + # + # @see OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput#status + module Status + extend OpenAI::Internal::Type::Enum + + COMPLETED = :completed + FAILED = :failed + + # @!method self.values + # @return [Array] + end + end + end + end +end diff --git a/lib/openai/models/responses/response_create_params.rb b/lib/openai/models/responses/response_create_params.rb index a5a6ba45..c90bd4ca 100644 --- a/lib/openai/models/responses/response_create_params.rb +++ b/lib/openai/models/responses/response_create_params.rb @@ -66,7 +66,7 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) # - [Function calling](https://platform.openai.com/docs/guides/function-calling) # - # @return [String, Array, nil] + # @return [String, Array, nil] optional :input, union: -> { OpenAI::Responses::ResponseCreateParams::Input } # @!attribute instructions @@ -147,6 +147,17 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel # @return [String, nil] optional :prompt_cache_key, String + # @!attribute prompt_cache_retention + # The retention policy for the prompt cache. Set to `24h` to enable extended + # prompt caching, which keeps cached prefixes active for longer, up to a maximum + # of 24 hours. + # [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + # + # @return [Symbol, OpenAI::Models::Responses::ResponseCreateParams::PromptCacheRetention, nil] + optional :prompt_cache_retention, + enum: -> { OpenAI::Responses::ResponseCreateParams::PromptCacheRetention }, + nil?: true + # @!attribute reasoning # **gpt-5 and o-series models only** # @@ -229,7 +240,7 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel # response. See the `tools` parameter to see how to specify which tools the model # can call. # - # @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, nil] + # @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, OpenAI::Models::Responses::ToolChoiceApplyPatch, OpenAI::Models::Responses::ToolChoiceShell, nil] optional :tool_choice, union: -> { OpenAI::Responses::ResponseCreateParams::ToolChoice } # @!attribute tools @@ -253,7 +264,7 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel # [function calling](https://platform.openai.com/docs/guides/function-calling). # You can also use custom tools to call your own code. # - # @return [Array, nil] + # @return [Array, nil] optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] } # @!attribute top_logprobs @@ -297,7 +308,7 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel # @return [String, nil] optional :user, String - # @!method initialize(background: nil, conversation: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {}) + # @!method initialize(background: nil, conversation: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, prompt_cache_retention: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {}) # Some parameter documentations has been truncated, see # {OpenAI::Models::Responses::ResponseCreateParams} for more details. # @@ -307,7 +318,7 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel # # @param include [Array, nil] Specify additional output data to include in the model response. Currently suppo # - # @param input [String, Array] Text, image, or file inputs to the model, used to generate a response. + # @param input [String, Array] Text, image, or file inputs to the model, used to generate a response. # # @param instructions [String, nil] A system (or developer) message inserted into the model's context. # @@ -327,6 +338,8 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel # # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi # + # @param prompt_cache_retention [Symbol, OpenAI::Models::Responses::ResponseCreateParams::PromptCacheRetention, nil] The retention policy for the prompt cache. Set to `24h` to enable extended promp + # # @param reasoning [OpenAI::Models::Reasoning, nil] **gpt-5 and o-series models only** # # @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi @@ -341,9 +354,9 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel # # @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain # - # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom] How the model should select which tool (or tools) to use when generating + # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, OpenAI::Models::Responses::ToolChoiceApplyPatch, OpenAI::Models::Responses::ToolChoiceShell] How the model should select which tool (or tools) to use when generating # - # @param tools [Array] An array of tools the model may call while generating a response. You + # @param tools [Array] An array of tools the model may call while generating a response. You # # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to # @@ -393,7 +406,21 @@ module Input variant -> { OpenAI::Responses::ResponseInput } # @!method self.variants - # @return [Array(String, Array)] + # @return [Array(String, Array)] + end + + # The retention policy for the prompt cache. Set to `24h` to enable extended + # prompt caching, which keeps cached prefixes active for longer, up to a maximum + # of 24 hours. + # [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + module PromptCacheRetention + extend OpenAI::Internal::Type::Enum + + IN_MEMORY = :"in-memory" + PROMPT_CACHE_RETENTION_24H = :"24h" + + # @!method self.values + # @return [Array] end # Specifies the processing type used for serving the request. @@ -479,8 +506,14 @@ module ToolChoice # Use this option to force the model to call a specific custom tool. variant -> { OpenAI::Responses::ToolChoiceCustom } + # Forces the model to call the apply_patch tool when executing a tool call. + variant -> { OpenAI::Responses::ToolChoiceApplyPatch } + + # Forces the model to call the function shell tool when a tool call is required. + variant -> { OpenAI::Responses::ToolChoiceShell } + # @!method self.variants - # @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom)] + # @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, OpenAI::Models::Responses::ToolChoiceApplyPatch, OpenAI::Models::Responses::ToolChoiceShell)] end # The truncation strategy to use for the model response. diff --git a/lib/openai/models/responses/response_function_shell_call_output_content.rb b/lib/openai/models/responses/response_function_shell_call_output_content.rb new file mode 100644 index 00000000..39f378b2 --- /dev/null +++ b/lib/openai/models/responses/response_function_shell_call_output_content.rb @@ -0,0 +1,88 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseFunctionShellCallOutputContent < OpenAI::Internal::Type::BaseModel + # @!attribute outcome + # The exit or timeout outcome associated with this chunk. + # + # @return [OpenAI::Models::Responses::ResponseFunctionShellCallOutputContent::Outcome::Timeout, OpenAI::Models::Responses::ResponseFunctionShellCallOutputContent::Outcome::Exit] + required :outcome, union: -> { OpenAI::Responses::ResponseFunctionShellCallOutputContent::Outcome } + + # @!attribute stderr + # Captured stderr output for this chunk of the shell call. + # + # @return [String] + required :stderr, String + + # @!attribute stdout + # Captured stdout output for this chunk of the shell call. + # + # @return [String] + required :stdout, String + + # @!method initialize(outcome:, stderr:, stdout:) + # Captured stdout and stderr for a portion of a function shell tool call output. + # + # @param outcome [OpenAI::Models::Responses::ResponseFunctionShellCallOutputContent::Outcome::Timeout, OpenAI::Models::Responses::ResponseFunctionShellCallOutputContent::Outcome::Exit] The exit or timeout outcome associated with this chunk. + # + # @param stderr [String] Captured stderr output for this chunk of the shell call. + # + # @param stdout [String] Captured stdout output for this chunk of the shell call. + + # The exit or timeout outcome associated with this chunk. + # + # @see OpenAI::Models::Responses::ResponseFunctionShellCallOutputContent#outcome + module Outcome + extend OpenAI::Internal::Type::Union + + discriminator :type + + # Indicates that the function shell call exceeded its configured time limit. + variant :timeout, -> { OpenAI::Responses::ResponseFunctionShellCallOutputContent::Outcome::Timeout } + + # Indicates that the shell commands finished and returned an exit code. + variant :exit, -> { OpenAI::Responses::ResponseFunctionShellCallOutputContent::Outcome::Exit } + + class Timeout < OpenAI::Internal::Type::BaseModel + # @!attribute type + # The outcome type. Always `timeout`. + # + # @return [Symbol, :timeout] + required :type, const: :timeout + + # @!method initialize(type: :timeout) + # Indicates that the function shell call exceeded its configured time limit. + # + # @param type [Symbol, :timeout] The outcome type. Always `timeout`. + end + + class Exit < OpenAI::Internal::Type::BaseModel + # @!attribute exit_code + # The exit code returned by the shell process. + # + # @return [Integer] + required :exit_code, Integer + + # @!attribute type + # The outcome type. Always `exit`. + # + # @return [Symbol, :exit] + required :type, const: :exit + + # @!method initialize(exit_code:, type: :exit) + # Indicates that the shell commands finished and returned an exit code. + # + # @param exit_code [Integer] The exit code returned by the shell process. + # + # @param type [Symbol, :exit] The outcome type. Always `exit`. + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Responses::ResponseFunctionShellCallOutputContent::Outcome::Timeout, OpenAI::Models::Responses::ResponseFunctionShellCallOutputContent::Outcome::Exit)] + end + end + end + end +end diff --git a/lib/openai/models/responses/response_function_shell_tool_call.rb b/lib/openai/models/responses/response_function_shell_tool_call.rb new file mode 100644 index 00000000..6222003e --- /dev/null +++ b/lib/openai/models/responses/response_function_shell_tool_call.rb @@ -0,0 +1,109 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseFunctionShellToolCall < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the function shell tool call. Populated when this item is + # returned via API. + # + # @return [String] + required :id, String + + # @!attribute action + # The shell commands and limits that describe how to run the tool call. + # + # @return [OpenAI::Models::Responses::ResponseFunctionShellToolCall::Action] + required :action, -> { OpenAI::Responses::ResponseFunctionShellToolCall::Action } + + # @!attribute call_id + # The unique ID of the function shell tool call generated by the model. + # + # @return [String] + required :call_id, String + + # @!attribute status + # The status of the shell call. One of `in_progress`, `completed`, or + # `incomplete`. + # + # @return [Symbol, OpenAI::Models::Responses::ResponseFunctionShellToolCall::Status] + required :status, enum: -> { OpenAI::Responses::ResponseFunctionShellToolCall::Status } + + # @!attribute type + # The type of the item. Always `shell_call`. + # + # @return [Symbol, :shell_call] + required :type, const: :shell_call + + # @!attribute created_by + # The ID of the entity that created this tool call. + # + # @return [String, nil] + optional :created_by, String + + # @!method initialize(id:, action:, call_id:, status:, created_by: nil, type: :shell_call) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseFunctionShellToolCall} for more details. + # + # A tool call that executes one or more shell commands in a managed environment. + # + # @param id [String] The unique ID of the function shell tool call. Populated when this item is retur + # + # @param action [OpenAI::Models::Responses::ResponseFunctionShellToolCall::Action] The shell commands and limits that describe how to run the tool call. + # + # @param call_id [String] The unique ID of the function shell tool call generated by the model. + # + # @param status [Symbol, OpenAI::Models::Responses::ResponseFunctionShellToolCall::Status] The status of the shell call. One of `in_progress`, `completed`, or `incomplete` + # + # @param created_by [String] The ID of the entity that created this tool call. + # + # @param type [Symbol, :shell_call] The type of the item. Always `shell_call`. + + # @see OpenAI::Models::Responses::ResponseFunctionShellToolCall#action + class Action < OpenAI::Internal::Type::BaseModel + # @!attribute commands + # + # @return [Array] + required :commands, OpenAI::Internal::Type::ArrayOf[String] + + # @!attribute max_output_length + # Optional maximum number of characters to return from each command. + # + # @return [Integer, nil] + required :max_output_length, Integer, nil?: true + + # @!attribute timeout_ms + # Optional timeout in milliseconds for the commands. + # + # @return [Integer, nil] + required :timeout_ms, Integer, nil?: true + + # @!method initialize(commands:, max_output_length:, timeout_ms:) + # The shell commands and limits that describe how to run the tool call. + # + # @param commands [Array] + # + # @param max_output_length [Integer, nil] Optional maximum number of characters to return from each command. + # + # @param timeout_ms [Integer, nil] Optional timeout in milliseconds for the commands. + end + + # The status of the shell call. One of `in_progress`, `completed`, or + # `incomplete`. + # + # @see OpenAI::Models::Responses::ResponseFunctionShellToolCall#status + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS = :in_progress + COMPLETED = :completed + INCOMPLETE = :incomplete + + # @!method self.values + # @return [Array] + end + end + end + end +end diff --git a/lib/openai/models/responses/response_function_shell_tool_call_output.rb b/lib/openai/models/responses/response_function_shell_tool_call_output.rb new file mode 100644 index 00000000..cad9b325 --- /dev/null +++ b/lib/openai/models/responses/response_function_shell_tool_call_output.rb @@ -0,0 +1,158 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseFunctionShellToolCallOutput < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the shell call output. Populated when this item is returned via + # API. + # + # @return [String] + required :id, String + + # @!attribute call_id + # The unique ID of the shell tool call generated by the model. + # + # @return [String] + required :call_id, String + + # @!attribute max_output_length + # The maximum length of the shell command output. This is generated by the model + # and should be passed back with the raw output. + # + # @return [Integer, nil] + required :max_output_length, Integer, nil?: true + + # @!attribute output + # An array of shell call output contents + # + # @return [Array] + required :output, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseFunctionShellToolCallOutput::Output] } + + # @!attribute type + # The type of the shell call output. Always `shell_call_output`. + # + # @return [Symbol, :shell_call_output] + required :type, const: :shell_call_output + + # @!attribute created_by + # + # @return [String, nil] + optional :created_by, String + + # @!method initialize(id:, call_id:, max_output_length:, output:, created_by: nil, type: :shell_call_output) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput} for more + # details. + # + # The output of a shell tool call. + # + # @param id [String] The unique ID of the shell call output. Populated when this item is returned via + # + # @param call_id [String] The unique ID of the shell tool call generated by the model. + # + # @param max_output_length [Integer, nil] The maximum length of the shell command output. This is generated by the model a + # + # @param output [Array] An array of shell call output contents + # + # @param created_by [String] + # + # @param type [Symbol, :shell_call_output] The type of the shell call output. Always `shell_call_output`. + + class Output < OpenAI::Internal::Type::BaseModel + # @!attribute outcome + # Represents either an exit outcome (with an exit code) or a timeout outcome for a + # shell call output chunk. + # + # @return [OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput::Output::Outcome::Timeout, OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput::Output::Outcome::Exit] + required :outcome, union: -> { OpenAI::Responses::ResponseFunctionShellToolCallOutput::Output::Outcome } + + # @!attribute stderr + # + # @return [String] + required :stderr, String + + # @!attribute stdout + # + # @return [String] + required :stdout, String + + # @!attribute created_by + # + # @return [String, nil] + optional :created_by, String + + # @!method initialize(outcome:, stderr:, stdout:, created_by: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput::Output} for + # more details. + # + # The content of a shell call output. + # + # @param outcome [OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput::Output::Outcome::Timeout, OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput::Output::Outcome::Exit] Represents either an exit outcome (with an exit code) or a timeout outcome for a + # + # @param stderr [String] + # + # @param stdout [String] + # + # @param created_by [String] + + # Represents either an exit outcome (with an exit code) or a timeout outcome for a + # shell call output chunk. + # + # @see OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput::Output#outcome + module Outcome + extend OpenAI::Internal::Type::Union + + discriminator :type + + # Indicates that the function shell call exceeded its configured time limit. + variant :timeout, -> { OpenAI::Responses::ResponseFunctionShellToolCallOutput::Output::Outcome::Timeout } + + # Indicates that the shell commands finished and returned an exit code. + variant :exit, -> { OpenAI::Responses::ResponseFunctionShellToolCallOutput::Output::Outcome::Exit } + + class Timeout < OpenAI::Internal::Type::BaseModel + # @!attribute type + # The outcome type. Always `timeout`. + # + # @return [Symbol, :timeout] + required :type, const: :timeout + + # @!method initialize(type: :timeout) + # Indicates that the function shell call exceeded its configured time limit. + # + # @param type [Symbol, :timeout] The outcome type. Always `timeout`. + end + + class Exit < OpenAI::Internal::Type::BaseModel + # @!attribute exit_code + # Exit code from the shell process. + # + # @return [Integer] + required :exit_code, Integer + + # @!attribute type + # The outcome type. Always `exit`. + # + # @return [Symbol, :exit] + required :type, const: :exit + + # @!method initialize(exit_code:, type: :exit) + # Indicates that the shell commands finished and returned an exit code. + # + # @param exit_code [Integer] Exit code from the shell process. + # + # @param type [Symbol, :exit] The outcome type. Always `exit`. + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput::Output::Outcome::Timeout, OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput::Output::Outcome::Exit)] + end + end + end + end + end +end diff --git a/lib/openai/models/responses/response_input_item.rb b/lib/openai/models/responses/response_input_item.rb index 0ff9bfc7..f13d9ba1 100644 --- a/lib/openai/models/responses/response_input_item.rb +++ b/lib/openai/models/responses/response_input_item.rb @@ -68,6 +68,18 @@ module ResponseInputItem # The output of a local shell tool call. variant :local_shell_call_output, -> { OpenAI::Responses::ResponseInputItem::LocalShellCallOutput } + # A tool representing a request to execute one or more shell commands. + variant :shell_call, -> { OpenAI::Responses::ResponseInputItem::ShellCall } + + # The streamed output items emitted by a function shell tool call. + variant :shell_call_output, -> { OpenAI::Responses::ResponseInputItem::ShellCallOutput } + + # A tool call representing a request to create, delete, or update files using diff patches. + variant :apply_patch_call, -> { OpenAI::Responses::ResponseInputItem::ApplyPatchCall } + + # The streamed output emitted by an apply patch tool call. + variant :apply_patch_call_output, -> { OpenAI::Responses::ResponseInputItem::ApplyPatchCallOutput } + # A list of tools available on an MCP server. variant :mcp_list_tools, -> { OpenAI::Responses::ResponseInputItem::McpListTools } @@ -595,6 +607,388 @@ module Status end end + class ShellCall < OpenAI::Internal::Type::BaseModel + # @!attribute action + # The shell commands and limits that describe how to run the tool call. + # + # @return [OpenAI::Models::Responses::ResponseInputItem::ShellCall::Action] + required :action, -> { OpenAI::Responses::ResponseInputItem::ShellCall::Action } + + # @!attribute call_id + # The unique ID of the function shell tool call generated by the model. + # + # @return [String] + required :call_id, String + + # @!attribute type + # The type of the item. Always `function_shell_call`. + # + # @return [Symbol, :shell_call] + required :type, const: :shell_call + + # @!attribute id + # The unique ID of the function shell tool call. Populated when this item is + # returned via API. + # + # @return [String, nil] + optional :id, String, nil?: true + + # @!attribute status + # The status of the shell call. One of `in_progress`, `completed`, or + # `incomplete`. + # + # @return [Symbol, OpenAI::Models::Responses::ResponseInputItem::ShellCall::Status, nil] + optional :status, enum: -> { OpenAI::Responses::ResponseInputItem::ShellCall::Status }, nil?: true + + # @!method initialize(action:, call_id:, id: nil, status: nil, type: :shell_call) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseInputItem::ShellCall} for more details. + # + # A tool representing a request to execute one or more shell commands. + # + # @param action [OpenAI::Models::Responses::ResponseInputItem::ShellCall::Action] The shell commands and limits that describe how to run the tool call. + # + # @param call_id [String] The unique ID of the function shell tool call generated by the model. + # + # @param id [String, nil] The unique ID of the function shell tool call. Populated when this item is retur + # + # @param status [Symbol, OpenAI::Models::Responses::ResponseInputItem::ShellCall::Status, nil] The status of the shell call. One of `in_progress`, `completed`, or `incomplete` + # + # @param type [Symbol, :shell_call] The type of the item. Always `function_shell_call`. + + # @see OpenAI::Models::Responses::ResponseInputItem::ShellCall#action + class Action < OpenAI::Internal::Type::BaseModel + # @!attribute commands + # Ordered shell commands for the execution environment to run. + # + # @return [Array] + required :commands, OpenAI::Internal::Type::ArrayOf[String] + + # @!attribute max_output_length + # Maximum number of UTF-8 characters to capture from combined stdout and stderr + # output. + # + # @return [Integer, nil] + optional :max_output_length, Integer, nil?: true + + # @!attribute timeout_ms + # Maximum wall-clock time in milliseconds to allow the shell commands to run. + # + # @return [Integer, nil] + optional :timeout_ms, Integer, nil?: true + + # @!method initialize(commands:, max_output_length: nil, timeout_ms: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseInputItem::ShellCall::Action} for more + # details. + # + # The shell commands and limits that describe how to run the tool call. + # + # @param commands [Array] Ordered shell commands for the execution environment to run. + # + # @param max_output_length [Integer, nil] Maximum number of UTF-8 characters to capture from combined stdout and stderr ou + # + # @param timeout_ms [Integer, nil] Maximum wall-clock time in milliseconds to allow the shell commands to run. + end + + # The status of the shell call. One of `in_progress`, `completed`, or + # `incomplete`. + # + # @see OpenAI::Models::Responses::ResponseInputItem::ShellCall#status + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS = :in_progress + COMPLETED = :completed + INCOMPLETE = :incomplete + + # @!method self.values + # @return [Array] + end + end + + class ShellCallOutput < OpenAI::Internal::Type::BaseModel + # @!attribute call_id + # The unique ID of the function shell tool call generated by the model. + # + # @return [String] + required :call_id, String + + # @!attribute output + # Captured chunks of stdout and stderr output, along with their associated + # outcomes. + # + # @return [Array] + required :output, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseFunctionShellCallOutputContent] } + + # @!attribute type + # The type of the item. Always `function_shell_call_output`. + # + # @return [Symbol, :shell_call_output] + required :type, const: :shell_call_output + + # @!attribute id + # The unique ID of the function shell tool call output. Populated when this item + # is returned via API. + # + # @return [String, nil] + optional :id, String, nil?: true + + # @!attribute max_output_length + # The maximum number of UTF-8 characters captured for this shell call's combined + # output. + # + # @return [Integer, nil] + optional :max_output_length, Integer, nil?: true + + # @!method initialize(call_id:, output:, id: nil, max_output_length: nil, type: :shell_call_output) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput} for more + # details. + # + # The streamed output items emitted by a function shell tool call. + # + # @param call_id [String] The unique ID of the function shell tool call generated by the model. + # + # @param output [Array] Captured chunks of stdout and stderr output, along with their associated outcome + # + # @param id [String, nil] The unique ID of the function shell tool call output. Populated when this item i + # + # @param max_output_length [Integer, nil] The maximum number of UTF-8 characters captured for this shell call's combined o + # + # @param type [Symbol, :shell_call_output] The type of the item. Always `function_shell_call_output`. + end + + class ApplyPatchCall < OpenAI::Internal::Type::BaseModel + # @!attribute call_id + # The unique ID of the apply patch tool call generated by the model. + # + # @return [String] + required :call_id, String + + # @!attribute operation + # The specific create, delete, or update instruction for the apply_patch tool + # call. + # + # @return [OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall::Operation::CreateFile, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall::Operation::DeleteFile, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall::Operation::UpdateFile] + required :operation, union: -> { OpenAI::Responses::ResponseInputItem::ApplyPatchCall::Operation } + + # @!attribute status + # The status of the apply patch tool call. One of `in_progress` or `completed`. + # + # @return [Symbol, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall::Status] + required :status, enum: -> { OpenAI::Responses::ResponseInputItem::ApplyPatchCall::Status } + + # @!attribute type + # The type of the item. Always `apply_patch_call`. + # + # @return [Symbol, :apply_patch_call] + required :type, const: :apply_patch_call + + # @!attribute id + # The unique ID of the apply patch tool call. Populated when this item is returned + # via API. + # + # @return [String, nil] + optional :id, String, nil?: true + + # @!method initialize(call_id:, operation:, status:, id: nil, type: :apply_patch_call) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall} for more details. + # + # A tool call representing a request to create, delete, or update files using diff + # patches. + # + # @param call_id [String] The unique ID of the apply patch tool call generated by the model. + # + # @param operation [OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall::Operation::CreateFile, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall::Operation::DeleteFile, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall::Operation::UpdateFile] The specific create, delete, or update instruction for the apply_patch tool call + # + # @param status [Symbol, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall::Status] The status of the apply patch tool call. One of `in_progress` or `completed`. + # + # @param id [String, nil] The unique ID of the apply patch tool call. Populated when this item is returned + # + # @param type [Symbol, :apply_patch_call] The type of the item. Always `apply_patch_call`. + + # The specific create, delete, or update instruction for the apply_patch tool + # call. + # + # @see OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall#operation + module Operation + extend OpenAI::Internal::Type::Union + + discriminator :type + + # Instruction for creating a new file via the apply_patch tool. + variant :create_file, -> { OpenAI::Responses::ResponseInputItem::ApplyPatchCall::Operation::CreateFile } + + # Instruction for deleting an existing file via the apply_patch tool. + variant :delete_file, -> { OpenAI::Responses::ResponseInputItem::ApplyPatchCall::Operation::DeleteFile } + + # Instruction for updating an existing file via the apply_patch tool. + variant :update_file, -> { OpenAI::Responses::ResponseInputItem::ApplyPatchCall::Operation::UpdateFile } + + class CreateFile < OpenAI::Internal::Type::BaseModel + # @!attribute diff + # Unified diff content to apply when creating the file. + # + # @return [String] + required :diff, String + + # @!attribute path + # Path of the file to create relative to the workspace root. + # + # @return [String] + required :path, String + + # @!attribute type + # The operation type. Always `create_file`. + # + # @return [Symbol, :create_file] + required :type, const: :create_file + + # @!method initialize(diff:, path:, type: :create_file) + # Instruction for creating a new file via the apply_patch tool. + # + # @param diff [String] Unified diff content to apply when creating the file. + # + # @param path [String] Path of the file to create relative to the workspace root. + # + # @param type [Symbol, :create_file] The operation type. Always `create_file`. + end + + class DeleteFile < OpenAI::Internal::Type::BaseModel + # @!attribute path + # Path of the file to delete relative to the workspace root. + # + # @return [String] + required :path, String + + # @!attribute type + # The operation type. Always `delete_file`. + # + # @return [Symbol, :delete_file] + required :type, const: :delete_file + + # @!method initialize(path:, type: :delete_file) + # Instruction for deleting an existing file via the apply_patch tool. + # + # @param path [String] Path of the file to delete relative to the workspace root. + # + # @param type [Symbol, :delete_file] The operation type. Always `delete_file`. + end + + class UpdateFile < OpenAI::Internal::Type::BaseModel + # @!attribute diff + # Unified diff content to apply to the existing file. + # + # @return [String] + required :diff, String + + # @!attribute path + # Path of the file to update relative to the workspace root. + # + # @return [String] + required :path, String + + # @!attribute type + # The operation type. Always `update_file`. + # + # @return [Symbol, :update_file] + required :type, const: :update_file + + # @!method initialize(diff:, path:, type: :update_file) + # Instruction for updating an existing file via the apply_patch tool. + # + # @param diff [String] Unified diff content to apply to the existing file. + # + # @param path [String] Path of the file to update relative to the workspace root. + # + # @param type [Symbol, :update_file] The operation type. Always `update_file`. + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall::Operation::CreateFile, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall::Operation::DeleteFile, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall::Operation::UpdateFile)] + end + + # The status of the apply patch tool call. One of `in_progress` or `completed`. + # + # @see OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall#status + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS = :in_progress + COMPLETED = :completed + + # @!method self.values + # @return [Array] + end + end + + class ApplyPatchCallOutput < OpenAI::Internal::Type::BaseModel + # @!attribute call_id + # The unique ID of the apply patch tool call generated by the model. + # + # @return [String] + required :call_id, String + + # @!attribute status + # The status of the apply patch tool call output. One of `completed` or `failed`. + # + # @return [Symbol, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput::Status] + required :status, enum: -> { OpenAI::Responses::ResponseInputItem::ApplyPatchCallOutput::Status } + + # @!attribute type + # The type of the item. Always `apply_patch_call_output`. + # + # @return [Symbol, :apply_patch_call_output] + required :type, const: :apply_patch_call_output + + # @!attribute id + # The unique ID of the apply patch tool call output. Populated when this item is + # returned via API. + # + # @return [String, nil] + optional :id, String, nil?: true + + # @!attribute output + # Optional human-readable log text from the apply patch tool (e.g., patch results + # or errors). + # + # @return [String, nil] + optional :output, String + + # @!method initialize(call_id:, status:, id: nil, output: nil, type: :apply_patch_call_output) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput} for more + # details. + # + # The streamed output emitted by an apply patch tool call. + # + # @param call_id [String] The unique ID of the apply patch tool call generated by the model. + # + # @param status [Symbol, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput::Status] The status of the apply patch tool call output. One of `completed` or `failed`. + # + # @param id [String, nil] The unique ID of the apply patch tool call output. Populated when this item is r + # + # @param output [String] Optional human-readable log text from the apply patch tool (e.g., patch results + # + # @param type [Symbol, :apply_patch_call_output] The type of the item. Always `apply_patch_call_output`. + + # The status of the apply patch tool call output. One of `completed` or `failed`. + # + # @see OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput#status + module Status + extend OpenAI::Internal::Type::Enum + + COMPLETED = :completed + FAILED = :failed + + # @!method self.values + # @return [Array] + end + end + class McpListTools < OpenAI::Internal::Type::BaseModel # @!attribute id # The unique ID of the list. @@ -917,7 +1311,7 @@ module Type end # @!method self.variants - # @return [Array(OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference)] + # @return [Array(OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference)] end end end diff --git a/lib/openai/models/responses/response_item.rb b/lib/openai/models/responses/response_item.rb index 47cbb16f..363da875 100644 --- a/lib/openai/models/responses/response_item.rb +++ b/lib/openai/models/responses/response_item.rb @@ -46,6 +46,18 @@ module ResponseItem # The output of a local shell tool call. variant :local_shell_call_output, -> { OpenAI::Responses::ResponseItem::LocalShellCallOutput } + # A tool call that executes one or more shell commands in a managed environment. + variant :shell_call, -> { OpenAI::Responses::ResponseFunctionShellToolCall } + + # The output of a shell tool call. + variant :shell_call_output, -> { OpenAI::Responses::ResponseFunctionShellToolCallOutput } + + # A tool call that applies file diffs by creating, deleting, or updating files. + variant :apply_patch_call, -> { OpenAI::Responses::ResponseApplyPatchToolCall } + + # The output emitted by an apply patch tool call. + variant :apply_patch_call_output, -> { OpenAI::Responses::ResponseApplyPatchToolCallOutput } + # A list of tools available on an MCP server. variant :mcp_list_tools, -> { OpenAI::Responses::ResponseItem::McpListTools } @@ -574,7 +586,7 @@ module Status end # @!method self.variants - # @return [Array(OpenAI::Models::Responses::ResponseInputMessageItem, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseItem::LocalShellCall, OpenAI::Models::Responses::ResponseItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseItem::McpListTools, OpenAI::Models::Responses::ResponseItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseItem::McpCall)] + # @return [Array(OpenAI::Models::Responses::ResponseInputMessageItem, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseItem::LocalShellCall, OpenAI::Models::Responses::ResponseItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseFunctionShellToolCall, OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput, OpenAI::Models::Responses::ResponseApplyPatchToolCall, OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput, OpenAI::Models::Responses::ResponseItem::McpListTools, OpenAI::Models::Responses::ResponseItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseItem::McpCall)] end end end diff --git a/lib/openai/models/responses/response_item_list.rb b/lib/openai/models/responses/response_item_list.rb index ea2b6f1b..037d1871 100644 --- a/lib/openai/models/responses/response_item_list.rb +++ b/lib/openai/models/responses/response_item_list.rb @@ -7,7 +7,7 @@ class ResponseItemList < OpenAI::Internal::Type::BaseModel # @!attribute data # A list of items used to generate this response. # - # @return [Array] + # @return [Array] required :data, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseItem] } # @!attribute first_id @@ -37,7 +37,7 @@ class ResponseItemList < OpenAI::Internal::Type::BaseModel # @!method initialize(data:, first_id:, has_more:, last_id:, object: :list) # A list of Response items. # - # @param data [Array] A list of items used to generate this response. + # @param data [Array] A list of items used to generate this response. # # @param first_id [String] The ID of the first item in the list. # diff --git a/lib/openai/models/responses/response_output_item.rb b/lib/openai/models/responses/response_output_item.rb index 857b16da..caefb638 100644 --- a/lib/openai/models/responses/response_output_item.rb +++ b/lib/openai/models/responses/response_output_item.rb @@ -43,6 +43,18 @@ module ResponseOutputItem # A tool call to run a command on the local shell. variant :local_shell_call, -> { OpenAI::Responses::ResponseOutputItem::LocalShellCall } + # A tool call that executes one or more shell commands in a managed environment. + variant :shell_call, -> { OpenAI::Responses::ResponseFunctionShellToolCall } + + # The output of a shell tool call. + variant :shell_call_output, -> { OpenAI::Responses::ResponseFunctionShellToolCallOutput } + + # A tool call that applies file diffs by creating, deleting, or updating files. + variant :apply_patch_call, -> { OpenAI::Responses::ResponseApplyPatchToolCall } + + # The output emitted by an apply patch tool call. + variant :apply_patch_call_output, -> { OpenAI::Responses::ResponseApplyPatchToolCallOutput } + # An invocation of a tool on an MCP server. variant :mcp_call, -> { OpenAI::Responses::ResponseOutputItem::McpCall } @@ -468,7 +480,7 @@ class McpApprovalRequest < OpenAI::Internal::Type::BaseModel end # @!method self.variants - # @return [Array(OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall)] + # @return [Array(OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseFunctionShellToolCall, OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput, OpenAI::Models::Responses::ResponseApplyPatchToolCall, OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall)] end end end diff --git a/lib/openai/models/responses/response_output_item_added_event.rb b/lib/openai/models/responses/response_output_item_added_event.rb index 1ca221ee..2485cbea 100644 --- a/lib/openai/models/responses/response_output_item_added_event.rb +++ b/lib/openai/models/responses/response_output_item_added_event.rb @@ -7,7 +7,7 @@ class ResponseOutputItemAddedEvent < OpenAI::Internal::Type::BaseModel # @!attribute item # The output item that was added. # - # @return [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall] + # @return [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseFunctionShellToolCall, OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput, OpenAI::Models::Responses::ResponseApplyPatchToolCall, OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall] required :item, union: -> { OpenAI::Responses::ResponseOutputItem } # @!attribute output_index @@ -34,7 +34,7 @@ class ResponseOutputItemAddedEvent < OpenAI::Internal::Type::BaseModel # # Emitted when a new output item is added. # - # @param item [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall] The output item that was added. + # @param item [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseFunctionShellToolCall, OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput, OpenAI::Models::Responses::ResponseApplyPatchToolCall, OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall] The output item that was added. # # @param output_index [Integer] The index of the output item that was added. # diff --git a/lib/openai/models/responses/response_output_item_done_event.rb b/lib/openai/models/responses/response_output_item_done_event.rb index f96435eb..61f3dfac 100644 --- a/lib/openai/models/responses/response_output_item_done_event.rb +++ b/lib/openai/models/responses/response_output_item_done_event.rb @@ -7,7 +7,7 @@ class ResponseOutputItemDoneEvent < OpenAI::Internal::Type::BaseModel # @!attribute item # The output item that was marked done. # - # @return [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall] + # @return [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseFunctionShellToolCall, OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput, OpenAI::Models::Responses::ResponseApplyPatchToolCall, OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall] required :item, union: -> { OpenAI::Responses::ResponseOutputItem } # @!attribute output_index @@ -34,7 +34,7 @@ class ResponseOutputItemDoneEvent < OpenAI::Internal::Type::BaseModel # # Emitted when an output item is marked done. # - # @param item [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall] The output item that was marked done. + # @param item [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseFunctionShellToolCall, OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput, OpenAI::Models::Responses::ResponseApplyPatchToolCall, OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall] The output item that was marked done. # # @param output_index [Integer] The index of the output item that was marked done. # diff --git a/lib/openai/models/responses/tool.rb b/lib/openai/models/responses/tool.rb index 97f3c7b1..b06771b0 100644 --- a/lib/openai/models/responses/tool.rb +++ b/lib/openai/models/responses/tool.rb @@ -33,9 +33,15 @@ module Tool # A tool that allows the model to execute shell commands in a local environment. variant :local_shell, -> { OpenAI::Responses::Tool::LocalShell } + # A tool that allows the model to execute shell commands. + variant :shell, -> { OpenAI::Responses::FunctionShellTool } + # A custom tool that processes input using a specified format. Learn more about [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) variant :custom, -> { OpenAI::Responses::CustomTool } + # Allows the assistant to create, delete, or update files using unified diffs. + variant :apply_patch, -> { OpenAI::Responses::ApplyPatchTool } + # Search the Internet for sources related to the prompt. Learn more about the # [web search tool](https://platform.openai.com/docs/guides/tools-web-search). variant -> { OpenAI::Responses::WebSearchTool } @@ -682,7 +688,7 @@ class LocalShell < OpenAI::Internal::Type::BaseModel end # @!method self.variants - # @return [Array(OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool)] + # @return [Array(OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::FunctionShellTool, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::ApplyPatchTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool)] end end end diff --git a/lib/openai/models/responses/tool_choice_apply_patch.rb b/lib/openai/models/responses/tool_choice_apply_patch.rb new file mode 100644 index 00000000..bab518df --- /dev/null +++ b/lib/openai/models/responses/tool_choice_apply_patch.rb @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ToolChoiceApplyPatch < OpenAI::Internal::Type::BaseModel + # @!attribute type + # The tool to call. Always `apply_patch`. + # + # @return [Symbol, :apply_patch] + required :type, const: :apply_patch + + # @!method initialize(type: :apply_patch) + # Forces the model to call the apply_patch tool when executing a tool call. + # + # @param type [Symbol, :apply_patch] The tool to call. Always `apply_patch`. + end + end + end +end diff --git a/lib/openai/models/responses/tool_choice_shell.rb b/lib/openai/models/responses/tool_choice_shell.rb new file mode 100644 index 00000000..4b5b2b46 --- /dev/null +++ b/lib/openai/models/responses/tool_choice_shell.rb @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ToolChoiceShell < OpenAI::Internal::Type::BaseModel + # @!attribute type + # The tool to call. Always `shell`. + # + # @return [Symbol, :shell] + required :type, const: :shell + + # @!method initialize(type: :shell) + # Forces the model to call the function shell tool when a tool call is required. + # + # @param type [Symbol, :shell] The tool to call. Always `shell`. + end + end + end +end diff --git a/lib/openai/resources/chat/completions.rb b/lib/openai/resources/chat/completions.rb index 33034b22..ed833d5c 100644 --- a/lib/openai/resources/chat/completions.rb +++ b/lib/openai/resources/chat/completions.rb @@ -30,7 +30,7 @@ class Completions # unsupported parameters in reasoning models, # [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). # - # @overload create(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, verbosity: nil, web_search_options: nil, request_options: {}) + # @overload create(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, prompt_cache_retention: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, verbosity: nil, web_search_options: nil, request_options: {}) # # @param messages [Array] A list of messages comprising the conversation so far. Depending on the # @@ -66,6 +66,8 @@ class Completions # # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi # + # @param prompt_cache_retention [Symbol, OpenAI::Models::Chat::CompletionCreateParams::PromptCacheRetention, nil] The retention policy for the prompt cache. Set to `24h` to enable extended promp + # # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for # # @param response_format [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output. @@ -275,7 +277,7 @@ def stream(params) # unsupported parameters in reasoning models, # [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). # - # @overload stream_raw(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, verbosity: nil, web_search_options: nil, request_options: {}) + # @overload stream_raw(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, prompt_cache_retention: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, verbosity: nil, web_search_options: nil, request_options: {}) # # @param messages [Array] A list of messages comprising the conversation so far. Depending on the # @@ -311,6 +313,8 @@ def stream(params) # # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi # + # @param prompt_cache_retention [Symbol, OpenAI::Models::Chat::CompletionCreateParams::PromptCacheRetention, nil] The retention policy for the prompt cache. Set to `24h` to enable extended promp + # # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for # # @param response_format [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output. diff --git a/lib/openai/resources/conversations.rb b/lib/openai/resources/conversations.rb index 05c9823e..5c280476 100644 --- a/lib/openai/resources/conversations.rb +++ b/lib/openai/resources/conversations.rb @@ -13,7 +13,7 @@ class Conversations # # @overload create(items: nil, metadata: nil, request_options: {}) # - # @param items [Array, nil] Initial items to include in the conversation context. You may add up to 20 items + # @param items [Array, nil] Initial items to include in the conversation context. You may add up to 20 items # # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be # diff --git a/lib/openai/resources/conversations/items.rb b/lib/openai/resources/conversations/items.rb index aabf3cd7..8ea98a29 100644 --- a/lib/openai/resources/conversations/items.rb +++ b/lib/openai/resources/conversations/items.rb @@ -13,7 +13,7 @@ class Items # # @param conversation_id [String] Path param: The ID of the conversation to add the item to. # - # @param items [Array] Body param: The items to add to the conversation. You may add up to 20 items at + # @param items [Array] Body param: The items to add to the conversation. You may add up to 20 items at # # @param include [Array] Query param: Additional fields to include in the response. See the `include` # @@ -50,7 +50,7 @@ def create(conversation_id, params) # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # - # @return [OpenAI::Models::Conversations::Message, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput, OpenAI::Models::Conversations::ConversationItem::McpListTools, OpenAI::Models::Conversations::ConversationItem::McpApprovalRequest, OpenAI::Models::Conversations::ConversationItem::McpApprovalResponse, OpenAI::Models::Conversations::ConversationItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput] + # @return [OpenAI::Models::Conversations::Message, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseFunctionShellToolCall, OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput, OpenAI::Models::Responses::ResponseApplyPatchToolCall, OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput, OpenAI::Models::Conversations::ConversationItem::McpListTools, OpenAI::Models::Conversations::ConversationItem::McpApprovalRequest, OpenAI::Models::Conversations::ConversationItem::McpApprovalResponse, OpenAI::Models::Conversations::ConversationItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput] # # @see OpenAI::Models::Conversations::ItemRetrieveParams def retrieve(item_id, params) @@ -87,7 +87,7 @@ def retrieve(item_id, params) # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # - # @return [OpenAI::Internal::ConversationCursorPage] + # @return [OpenAI::Internal::ConversationCursorPage] # # @see OpenAI::Models::Conversations::ItemListParams def list(conversation_id, params = {}) diff --git a/lib/openai/resources/responses.rb b/lib/openai/resources/responses.rb index 9172f36e..b0b91c14 100644 --- a/lib/openai/resources/responses.rb +++ b/lib/openai/resources/responses.rb @@ -26,7 +26,7 @@ class Responses # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use # your own data as input for the model's response. # - # @overload create(background: nil, conversation: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {}) + # @overload create(background: nil, conversation: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, prompt_cache_retention: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {}) # # @param background [Boolean, nil] Whether to run the model response in the background. # @@ -34,7 +34,7 @@ class Responses # # @param include [Array, nil] Specify additional output data to include in the model response. Currently suppo # - # @param input [String, Array] Text, image, or file inputs to the model, used to generate a response. + # @param input [String, Array] Text, image, or file inputs to the model, used to generate a response. # # @param instructions [String, nil] A system (or developer) message inserted into the model's context. # @@ -54,6 +54,8 @@ class Responses # # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi # + # @param prompt_cache_retention [Symbol, OpenAI::Models::Responses::ResponseCreateParams::PromptCacheRetention, nil] The retention policy for the prompt cache. Set to `24h` to enable extended promp + # # @param reasoning [OpenAI::Models::Reasoning, nil] **gpt-5 and o-series models only** # # @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi @@ -68,9 +70,9 @@ class Responses # # @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain # - # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom] How the model should select which tool (or tools) to use when generating + # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, OpenAI::Models::Responses::ToolChoiceApplyPatch, OpenAI::Models::Responses::ToolChoiceShell] How the model should select which tool (or tools) to use when generating # - # @param tools [Array] An array of tools the model may call while generating a response. You + # @param tools [Array] An array of tools the model may call while generating a response. You # # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to # @@ -125,7 +127,7 @@ def create(params = {}) # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use # your own data as input for the model's response. # - # @overload stream(background: nil, conversation: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {}) + # @overload stream(background: nil, conversation: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, prompt_cache_retention: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {}) # # @param background [Boolean, nil] Whether to run the model response in the background. # @@ -133,7 +135,7 @@ def create(params = {}) # # @param include [Array, nil] Specify additional output data to include in the model response. Currently suppo # - # @param input [String, Array] Text, image, or file inputs to the model, used to generate a response. + # @param input [String, Array] Text, image, or file inputs to the model, used to generate a response. # # @param instructions [String, nil] A system (or developer) message inserted into the model's context. # @@ -271,6 +273,8 @@ def stream(params) # # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi # + # @param prompt_cache_retention [Symbol, OpenAI::Models::Responses::ResponseCreateParams::PromptCacheRetention, nil] The retention policy for the prompt cache. Set to `24h` to enable extended promp + # # @param reasoning [OpenAI::Models::Reasoning, nil] **gpt-5 and o-series models only** # # @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi @@ -285,9 +289,9 @@ def stream(params) # # @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain # - # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom] How the model should select which tool (or tools) to use when generating + # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, OpenAI::Models::Responses::ToolChoiceApplyPatch, OpenAI::Models::Responses::ToolChoiceShell] How the model should select which tool (or tools) to use when generating # - # @param tools [Array] An array of tools the model may call while generating a response. You + # @param tools [Array] An array of tools the model may call while generating a response. You # # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to # diff --git a/lib/openai/resources/responses/input_items.rb b/lib/openai/resources/responses/input_items.rb index 551f4f38..c3c671b8 100644 --- a/lib/openai/resources/responses/input_items.rb +++ b/lib/openai/resources/responses/input_items.rb @@ -23,7 +23,7 @@ class InputItems # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # - # @return [OpenAI::Internal::CursorPage] + # @return [OpenAI::Internal::CursorPage] # # @see OpenAI::Models::Responses::InputItemListParams def list(response_id, params = {}) diff --git a/lib/openai/resources/responses/input_tokens.rb b/lib/openai/resources/responses/input_tokens.rb index a88056a4..3ea5b08d 100644 --- a/lib/openai/resources/responses/input_tokens.rb +++ b/lib/openai/resources/responses/input_tokens.rb @@ -13,7 +13,7 @@ class InputTokens # # @param conversation [String, OpenAI::Models::Responses::ResponseConversationParam, nil] The conversation that this response belongs to. Items from this conversation are # - # @param input [String, Array, nil] Text, image, or file inputs to the model, used to generate a response + # @param input [String, Array, nil] Text, image, or file inputs to the model, used to generate a response # # @param instructions [String, nil] A system (or developer) message inserted into the model's context. # @@ -27,9 +27,9 @@ class InputTokens # # @param text [OpenAI::Models::Responses::InputTokenCountParams::Text, nil] Configuration options for a text response from the model. Can be plain # - # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, nil] How the model should select which tool (or tools) to use when generating + # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, OpenAI::Models::Responses::ToolChoiceApplyPatch, OpenAI::Models::Responses::ToolChoiceShell, nil] How the model should select which tool (or tools) to use when generating # - # @param tools [Array, nil] An array of tools the model may call while generating a response. You can specif + # @param tools [Array, nil] An array of tools the model may call while generating a response. You can specif # # @param truncation [Symbol, OpenAI::Models::Responses::InputTokenCountParams::Truncation] The truncation strategy to use for the model response. - `auto`: If the input to # diff --git a/lib/openai/version.rb b/lib/openai/version.rb index 711a6446..81eb5ec6 100644 --- a/lib/openai/version.rb +++ b/lib/openai/version.rb @@ -1,5 +1,5 @@ # frozen_string_literal: true module OpenAI - VERSION = "0.35.2" + VERSION = "0.36.0" end diff --git a/rbi/openai/models/batch_create_params.rbi b/rbi/openai/models/batch_create_params.rbi index c15c791a..35374184 100644 --- a/rbi/openai/models/batch_create_params.rbi +++ b/rbi/openai/models/batch_create_params.rbi @@ -17,9 +17,10 @@ module OpenAI attr_accessor :completion_window # The endpoint to be used for all requests in the batch. Currently - # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` - # are supported. Note that `/v1/embeddings` batches are also restricted to a - # maximum of 50,000 embedding inputs across all requests in the batch. + # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, `/v1/completions`, + # and `/v1/moderations` are supported. Note that `/v1/embeddings` batches are also + # restricted to a maximum of 50,000 embedding inputs across all requests in the + # batch. sig { returns(OpenAI::BatchCreateParams::Endpoint::OrSymbol) } attr_accessor :endpoint @@ -74,9 +75,10 @@ module OpenAI # is supported. completion_window:, # The endpoint to be used for all requests in the batch. Currently - # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` - # are supported. Note that `/v1/embeddings` batches are also restricted to a - # maximum of 50,000 embedding inputs across all requests in the batch. + # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, `/v1/completions`, + # and `/v1/moderations` are supported. Note that `/v1/embeddings` batches are also + # restricted to a maximum of 50,000 embedding inputs across all requests in the + # batch. endpoint:, # The ID of an uploaded file that contains requests for the new batch. # @@ -145,9 +147,10 @@ module OpenAI end # The endpoint to be used for all requests in the batch. Currently - # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` - # are supported. Note that `/v1/embeddings` batches are also restricted to a - # maximum of 50,000 embedding inputs across all requests in the batch. + # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, `/v1/completions`, + # and `/v1/moderations` are supported. Note that `/v1/embeddings` batches are also + # restricted to a maximum of 50,000 embedding inputs across all requests in the + # batch. module Endpoint extend OpenAI::Internal::Type::Enum @@ -175,6 +178,11 @@ module OpenAI :"/v1/completions", OpenAI::BatchCreateParams::Endpoint::TaggedSymbol ) + V1_MODERATIONS = + T.let( + :"/v1/moderations", + OpenAI::BatchCreateParams::Endpoint::TaggedSymbol + ) sig do override.returns( diff --git a/rbi/openai/models/beta/assistant_create_params.rbi b/rbi/openai/models/beta/assistant_create_params.rbi index 034e3437..b102f86e 100644 --- a/rbi/openai/models/beta/assistant_create_params.rbi +++ b/rbi/openai/models/beta/assistant_create_params.rbi @@ -47,12 +47,16 @@ module OpenAI # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) } attr_accessor :reasoning_effort @@ -212,12 +216,16 @@ module OpenAI name: nil, # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. reasoning_effort: nil, # Specifies the format that the model must output. Compatible with # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), diff --git a/rbi/openai/models/beta/assistant_update_params.rbi b/rbi/openai/models/beta/assistant_update_params.rbi index f07f224c..1febe2c4 100644 --- a/rbi/openai/models/beta/assistant_update_params.rbi +++ b/rbi/openai/models/beta/assistant_update_params.rbi @@ -67,12 +67,16 @@ module OpenAI # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) } attr_accessor :reasoning_effort @@ -236,12 +240,16 @@ module OpenAI name: nil, # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. reasoning_effort: nil, # Specifies the format that the model must output. Compatible with # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), diff --git a/rbi/openai/models/beta/threads/run_create_params.rbi b/rbi/openai/models/beta/threads/run_create_params.rbi index ddcb19d4..b5eec248 100644 --- a/rbi/openai/models/beta/threads/run_create_params.rbi +++ b/rbi/openai/models/beta/threads/run_create_params.rbi @@ -113,12 +113,16 @@ module OpenAI # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) } attr_accessor :reasoning_effort @@ -334,12 +338,16 @@ module OpenAI parallel_tool_calls: nil, # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. reasoning_effort: nil, # Specifies the format that the model must output. Compatible with # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), diff --git a/rbi/openai/models/chat/completion_create_params.rbi b/rbi/openai/models/chat/completion_create_params.rbi index e934b1e2..b92e12d0 100644 --- a/rbi/openai/models/chat/completion_create_params.rbi +++ b/rbi/openai/models/chat/completion_create_params.rbi @@ -225,14 +225,31 @@ module OpenAI sig { params(prompt_cache_key: String).void } attr_writer :prompt_cache_key + # The retention policy for the prompt cache. Set to `24h` to enable extended + # prompt caching, which keeps cached prefixes active for longer, up to a maximum + # of 24 hours. + # [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + sig do + returns( + T.nilable( + OpenAI::Chat::CompletionCreateParams::PromptCacheRetention::OrSymbol + ) + ) + end + attr_accessor :prompt_cache_retention + # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) } attr_accessor :reasoning_effort @@ -515,6 +532,10 @@ module OpenAI T.nilable(OpenAI::Chat::ChatCompletionPredictionContent::OrHash), presence_penalty: T.nilable(Float), prompt_cache_key: String, + prompt_cache_retention: + T.nilable( + OpenAI::Chat::CompletionCreateParams::PromptCacheRetention::OrSymbol + ), reasoning_effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol), response_format: T.any( @@ -665,14 +686,23 @@ module OpenAI # hit rates. Replaces the `user` field. # [Learn more](https://platform.openai.com/docs/guides/prompt-caching). prompt_cache_key: nil, + # The retention policy for the prompt cache. Set to `24h` to enable extended + # prompt caching, which keeps cached prefixes active for longer, up to a maximum + # of 24 hours. + # [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + prompt_cache_retention: nil, # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. reasoning_effort: nil, # An object specifying the format that the model must output. # @@ -815,6 +845,10 @@ module OpenAI T.nilable(OpenAI::Chat::ChatCompletionPredictionContent), presence_penalty: T.nilable(Float), prompt_cache_key: String, + prompt_cache_retention: + T.nilable( + OpenAI::Chat::CompletionCreateParams::PromptCacheRetention::OrSymbol + ), reasoning_effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol), response_format: T.any( @@ -1062,6 +1096,44 @@ module OpenAI end end + # The retention policy for the prompt cache. Set to `24h` to enable extended + # prompt caching, which keeps cached prefixes active for longer, up to a maximum + # of 24 hours. + # [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + module PromptCacheRetention + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Chat::CompletionCreateParams::PromptCacheRetention + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_MEMORY = + T.let( + :"in-memory", + OpenAI::Chat::CompletionCreateParams::PromptCacheRetention::TaggedSymbol + ) + PROMPT_CACHE_RETENTION_24H = + T.let( + :"24h", + OpenAI::Chat::CompletionCreateParams::PromptCacheRetention::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Chat::CompletionCreateParams::PromptCacheRetention::TaggedSymbol + ] + ) + end + def self.values + end + end + # An object specifying the format that the model must output. # # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured diff --git a/rbi/openai/models/chat_model.rbi b/rbi/openai/models/chat_model.rbi index 7ba1f29a..5cbcce0d 100644 --- a/rbi/openai/models/chat_model.rbi +++ b/rbi/openai/models/chat_model.rbi @@ -8,6 +8,13 @@ module OpenAI TaggedSymbol = T.type_alias { T.all(Symbol, OpenAI::ChatModel) } OrSymbol = T.type_alias { T.any(Symbol, String) } + GPT_5_1 = T.let(:"gpt-5.1", OpenAI::ChatModel::TaggedSymbol) + GPT_5_1_2025_11_13 = + T.let(:"gpt-5.1-2025-11-13", OpenAI::ChatModel::TaggedSymbol) + GPT_5_1_CODEX = T.let(:"gpt-5.1-codex", OpenAI::ChatModel::TaggedSymbol) + GPT_5_1_MINI = T.let(:"gpt-5.1-mini", OpenAI::ChatModel::TaggedSymbol) + GPT_5_1_CHAT_LATEST = + T.let(:"gpt-5.1-chat-latest", OpenAI::ChatModel::TaggedSymbol) GPT_5 = T.let(:"gpt-5", OpenAI::ChatModel::TaggedSymbol) GPT_5_MINI = T.let(:"gpt-5-mini", OpenAI::ChatModel::TaggedSymbol) GPT_5_NANO = T.let(:"gpt-5-nano", OpenAI::ChatModel::TaggedSymbol) diff --git a/rbi/openai/models/conversations/conversation_create_params.rbi b/rbi/openai/models/conversations/conversation_create_params.rbi index cf8283d6..20f9e584 100644 --- a/rbi/openai/models/conversations/conversation_create_params.rbi +++ b/rbi/openai/models/conversations/conversation_create_params.rbi @@ -36,6 +36,10 @@ module OpenAI OpenAI::Responses::ResponseCodeInterpreterToolCall, OpenAI::Responses::ResponseInputItem::LocalShellCall, OpenAI::Responses::ResponseInputItem::LocalShellCallOutput, + OpenAI::Responses::ResponseInputItem::ShellCall, + OpenAI::Responses::ResponseInputItem::ShellCallOutput, + OpenAI::Responses::ResponseInputItem::ApplyPatchCall, + OpenAI::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Responses::ResponseInputItem::McpListTools, OpenAI::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Responses::ResponseInputItem::McpApprovalResponse, @@ -79,6 +83,10 @@ module OpenAI OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash, OpenAI::Responses::ResponseInputItem::LocalShellCall::OrHash, OpenAI::Responses::ResponseInputItem::LocalShellCallOutput::OrHash, + OpenAI::Responses::ResponseInputItem::ShellCall::OrHash, + OpenAI::Responses::ResponseInputItem::ShellCallOutput::OrHash, + OpenAI::Responses::ResponseInputItem::ApplyPatchCall::OrHash, + OpenAI::Responses::ResponseInputItem::ApplyPatchCallOutput::OrHash, OpenAI::Responses::ResponseInputItem::McpListTools::OrHash, OpenAI::Responses::ResponseInputItem::McpApprovalRequest::OrHash, OpenAI::Responses::ResponseInputItem::McpApprovalResponse::OrHash, @@ -129,6 +137,10 @@ module OpenAI OpenAI::Responses::ResponseCodeInterpreterToolCall, OpenAI::Responses::ResponseInputItem::LocalShellCall, OpenAI::Responses::ResponseInputItem::LocalShellCallOutput, + OpenAI::Responses::ResponseInputItem::ShellCall, + OpenAI::Responses::ResponseInputItem::ShellCallOutput, + OpenAI::Responses::ResponseInputItem::ApplyPatchCall, + OpenAI::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Responses::ResponseInputItem::McpListTools, OpenAI::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Responses::ResponseInputItem::McpApprovalResponse, diff --git a/rbi/openai/models/conversations/conversation_item.rbi b/rbi/openai/models/conversations/conversation_item.rbi index 870922f8..11a6f515 100644 --- a/rbi/openai/models/conversations/conversation_item.rbi +++ b/rbi/openai/models/conversations/conversation_item.rbi @@ -26,6 +26,10 @@ module OpenAI OpenAI::Responses::ResponseCodeInterpreterToolCall, OpenAI::Conversations::ConversationItem::LocalShellCall, OpenAI::Conversations::ConversationItem::LocalShellCallOutput, + OpenAI::Responses::ResponseFunctionShellToolCall, + OpenAI::Responses::ResponseFunctionShellToolCallOutput, + OpenAI::Responses::ResponseApplyPatchToolCall, + OpenAI::Responses::ResponseApplyPatchToolCallOutput, OpenAI::Conversations::ConversationItem::McpListTools, OpenAI::Conversations::ConversationItem::McpApprovalRequest, OpenAI::Conversations::ConversationItem::McpApprovalResponse, diff --git a/rbi/openai/models/conversations/conversation_item_list.rbi b/rbi/openai/models/conversations/conversation_item_list.rbi index f8931151..d11156c1 100644 --- a/rbi/openai/models/conversations/conversation_item_list.rbi +++ b/rbi/openai/models/conversations/conversation_item_list.rbi @@ -54,6 +54,10 @@ module OpenAI OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash, OpenAI::Conversations::ConversationItem::LocalShellCall::OrHash, OpenAI::Conversations::ConversationItem::LocalShellCallOutput::OrHash, + OpenAI::Responses::ResponseFunctionShellToolCall::OrHash, + OpenAI::Responses::ResponseFunctionShellToolCallOutput::OrHash, + OpenAI::Responses::ResponseApplyPatchToolCall::OrHash, + OpenAI::Responses::ResponseApplyPatchToolCallOutput::OrHash, OpenAI::Conversations::ConversationItem::McpListTools::OrHash, OpenAI::Conversations::ConversationItem::McpApprovalRequest::OrHash, OpenAI::Conversations::ConversationItem::McpApprovalResponse::OrHash, diff --git a/rbi/openai/models/conversations/item_create_params.rbi b/rbi/openai/models/conversations/item_create_params.rbi index 2c68cbe9..d615e7f3 100644 --- a/rbi/openai/models/conversations/item_create_params.rbi +++ b/rbi/openai/models/conversations/item_create_params.rbi @@ -34,6 +34,10 @@ module OpenAI OpenAI::Responses::ResponseCodeInterpreterToolCall, OpenAI::Responses::ResponseInputItem::LocalShellCall, OpenAI::Responses::ResponseInputItem::LocalShellCallOutput, + OpenAI::Responses::ResponseInputItem::ShellCall, + OpenAI::Responses::ResponseInputItem::ShellCallOutput, + OpenAI::Responses::ResponseInputItem::ApplyPatchCall, + OpenAI::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Responses::ResponseInputItem::McpListTools, OpenAI::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Responses::ResponseInputItem::McpApprovalResponse, @@ -83,6 +87,10 @@ module OpenAI OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash, OpenAI::Responses::ResponseInputItem::LocalShellCall::OrHash, OpenAI::Responses::ResponseInputItem::LocalShellCallOutput::OrHash, + OpenAI::Responses::ResponseInputItem::ShellCall::OrHash, + OpenAI::Responses::ResponseInputItem::ShellCallOutput::OrHash, + OpenAI::Responses::ResponseInputItem::ApplyPatchCall::OrHash, + OpenAI::Responses::ResponseInputItem::ApplyPatchCallOutput::OrHash, OpenAI::Responses::ResponseInputItem::McpListTools::OrHash, OpenAI::Responses::ResponseInputItem::McpApprovalRequest::OrHash, OpenAI::Responses::ResponseInputItem::McpApprovalResponse::OrHash, @@ -127,6 +135,10 @@ module OpenAI OpenAI::Responses::ResponseCodeInterpreterToolCall, OpenAI::Responses::ResponseInputItem::LocalShellCall, OpenAI::Responses::ResponseInputItem::LocalShellCallOutput, + OpenAI::Responses::ResponseInputItem::ShellCall, + OpenAI::Responses::ResponseInputItem::ShellCallOutput, + OpenAI::Responses::ResponseInputItem::ApplyPatchCall, + OpenAI::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Responses::ResponseInputItem::McpListTools, OpenAI::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Responses::ResponseInputItem::McpApprovalResponse, diff --git a/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi b/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi index 7515578d..593acbc8 100644 --- a/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +++ b/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi @@ -889,12 +889,16 @@ module OpenAI # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) } attr_accessor :reasoning_effort @@ -992,12 +996,16 @@ module OpenAI max_completion_tokens: nil, # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. reasoning_effort: nil, # An object specifying the format that the model must output. # diff --git a/rbi/openai/models/evals/run_cancel_response.rbi b/rbi/openai/models/evals/run_cancel_response.rbi index 9f4e862f..55090d55 100644 --- a/rbi/openai/models/evals/run_cancel_response.rbi +++ b/rbi/openai/models/evals/run_cancel_response.rbi @@ -512,12 +512,16 @@ module OpenAI # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. sig do returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) end @@ -574,12 +578,16 @@ module OpenAI model: nil, # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. reasoning_effort: nil, # Sampling temperature. This is a query parameter used to select responses. temperature: nil, @@ -1123,12 +1131,16 @@ module OpenAI # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) } attr_accessor :reasoning_effort @@ -1199,7 +1211,9 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter::OrHash, OpenAI::Responses::Tool::ImageGeneration::OrHash, OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::FunctionShellTool::OrHash, OpenAI::Responses::CustomTool::OrHash, + OpenAI::Responses::ApplyPatchTool::OrHash, OpenAI::Responses::WebSearchTool::OrHash, OpenAI::Responses::WebSearchPreviewTool::OrHash ) @@ -1234,7 +1248,9 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter::OrHash, OpenAI::Responses::Tool::ImageGeneration::OrHash, OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::FunctionShellTool::OrHash, OpenAI::Responses::CustomTool::OrHash, + OpenAI::Responses::ApplyPatchTool::OrHash, OpenAI::Responses::WebSearchTool::OrHash, OpenAI::Responses::WebSearchPreviewTool::OrHash ) @@ -1247,12 +1263,16 @@ module OpenAI max_completion_tokens: nil, # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. reasoning_effort: nil, # A seed value to initialize the randomness, during sampling. seed: nil, diff --git a/rbi/openai/models/evals/run_create_params.rbi b/rbi/openai/models/evals/run_create_params.rbi index 9ba78fc7..ffc97de9 100644 --- a/rbi/openai/models/evals/run_create_params.rbi +++ b/rbi/openai/models/evals/run_create_params.rbi @@ -422,12 +422,16 @@ module OpenAI # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) } attr_accessor :reasoning_effort @@ -482,12 +486,16 @@ module OpenAI model: nil, # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. reasoning_effort: nil, # Sampling temperature. This is a query parameter used to select responses. temperature: nil, @@ -1081,12 +1089,16 @@ module OpenAI # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) } attr_accessor :reasoning_effort @@ -1152,7 +1164,9 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter, OpenAI::Responses::Tool::ImageGeneration, OpenAI::Responses::Tool::LocalShell, + OpenAI::Responses::FunctionShellTool, OpenAI::Responses::CustomTool, + OpenAI::Responses::ApplyPatchTool, OpenAI::Responses::WebSearchTool, OpenAI::Responses::WebSearchPreviewTool ) @@ -1174,7 +1188,9 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter::OrHash, OpenAI::Responses::Tool::ImageGeneration::OrHash, OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::FunctionShellTool::OrHash, OpenAI::Responses::CustomTool::OrHash, + OpenAI::Responses::ApplyPatchTool::OrHash, OpenAI::Responses::WebSearchTool::OrHash, OpenAI::Responses::WebSearchPreviewTool::OrHash ) @@ -1209,7 +1225,9 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter::OrHash, OpenAI::Responses::Tool::ImageGeneration::OrHash, OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::FunctionShellTool::OrHash, OpenAI::Responses::CustomTool::OrHash, + OpenAI::Responses::ApplyPatchTool::OrHash, OpenAI::Responses::WebSearchTool::OrHash, OpenAI::Responses::WebSearchPreviewTool::OrHash ) @@ -1222,12 +1240,16 @@ module OpenAI max_completion_tokens: nil, # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. reasoning_effort: nil, # A seed value to initialize the randomness, during sampling. seed: nil, @@ -1279,7 +1301,9 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter, OpenAI::Responses::Tool::ImageGeneration, OpenAI::Responses::Tool::LocalShell, + OpenAI::Responses::FunctionShellTool, OpenAI::Responses::CustomTool, + OpenAI::Responses::ApplyPatchTool, OpenAI::Responses::WebSearchTool, OpenAI::Responses::WebSearchPreviewTool ) diff --git a/rbi/openai/models/evals/run_create_response.rbi b/rbi/openai/models/evals/run_create_response.rbi index e60c4240..52a9f808 100644 --- a/rbi/openai/models/evals/run_create_response.rbi +++ b/rbi/openai/models/evals/run_create_response.rbi @@ -512,12 +512,16 @@ module OpenAI # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. sig do returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) end @@ -574,12 +578,16 @@ module OpenAI model: nil, # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. reasoning_effort: nil, # Sampling temperature. This is a query parameter used to select responses. temperature: nil, @@ -1123,12 +1131,16 @@ module OpenAI # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) } attr_accessor :reasoning_effort @@ -1199,7 +1211,9 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter::OrHash, OpenAI::Responses::Tool::ImageGeneration::OrHash, OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::FunctionShellTool::OrHash, OpenAI::Responses::CustomTool::OrHash, + OpenAI::Responses::ApplyPatchTool::OrHash, OpenAI::Responses::WebSearchTool::OrHash, OpenAI::Responses::WebSearchPreviewTool::OrHash ) @@ -1234,7 +1248,9 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter::OrHash, OpenAI::Responses::Tool::ImageGeneration::OrHash, OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::FunctionShellTool::OrHash, OpenAI::Responses::CustomTool::OrHash, + OpenAI::Responses::ApplyPatchTool::OrHash, OpenAI::Responses::WebSearchTool::OrHash, OpenAI::Responses::WebSearchPreviewTool::OrHash ) @@ -1247,12 +1263,16 @@ module OpenAI max_completion_tokens: nil, # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. reasoning_effort: nil, # A seed value to initialize the randomness, during sampling. seed: nil, diff --git a/rbi/openai/models/evals/run_list_response.rbi b/rbi/openai/models/evals/run_list_response.rbi index 0ec5ecc2..8f5ff7ec 100644 --- a/rbi/openai/models/evals/run_list_response.rbi +++ b/rbi/openai/models/evals/run_list_response.rbi @@ -508,12 +508,16 @@ module OpenAI # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. sig do returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) end @@ -570,12 +574,16 @@ module OpenAI model: nil, # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. reasoning_effort: nil, # Sampling temperature. This is a query parameter used to select responses. temperature: nil, @@ -1119,12 +1127,16 @@ module OpenAI # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) } attr_accessor :reasoning_effort @@ -1195,7 +1207,9 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter::OrHash, OpenAI::Responses::Tool::ImageGeneration::OrHash, OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::FunctionShellTool::OrHash, OpenAI::Responses::CustomTool::OrHash, + OpenAI::Responses::ApplyPatchTool::OrHash, OpenAI::Responses::WebSearchTool::OrHash, OpenAI::Responses::WebSearchPreviewTool::OrHash ) @@ -1230,7 +1244,9 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter::OrHash, OpenAI::Responses::Tool::ImageGeneration::OrHash, OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::FunctionShellTool::OrHash, OpenAI::Responses::CustomTool::OrHash, + OpenAI::Responses::ApplyPatchTool::OrHash, OpenAI::Responses::WebSearchTool::OrHash, OpenAI::Responses::WebSearchPreviewTool::OrHash ) @@ -1243,12 +1259,16 @@ module OpenAI max_completion_tokens: nil, # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. reasoning_effort: nil, # A seed value to initialize the randomness, during sampling. seed: nil, diff --git a/rbi/openai/models/evals/run_retrieve_response.rbi b/rbi/openai/models/evals/run_retrieve_response.rbi index 5bd74162..24a760bc 100644 --- a/rbi/openai/models/evals/run_retrieve_response.rbi +++ b/rbi/openai/models/evals/run_retrieve_response.rbi @@ -514,12 +514,16 @@ module OpenAI # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. sig do returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) end @@ -576,12 +580,16 @@ module OpenAI model: nil, # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. reasoning_effort: nil, # Sampling temperature. This is a query parameter used to select responses. temperature: nil, @@ -1125,12 +1133,16 @@ module OpenAI # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) } attr_accessor :reasoning_effort @@ -1201,7 +1213,9 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter::OrHash, OpenAI::Responses::Tool::ImageGeneration::OrHash, OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::FunctionShellTool::OrHash, OpenAI::Responses::CustomTool::OrHash, + OpenAI::Responses::ApplyPatchTool::OrHash, OpenAI::Responses::WebSearchTool::OrHash, OpenAI::Responses::WebSearchPreviewTool::OrHash ) @@ -1236,7 +1250,9 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter::OrHash, OpenAI::Responses::Tool::ImageGeneration::OrHash, OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::FunctionShellTool::OrHash, OpenAI::Responses::CustomTool::OrHash, + OpenAI::Responses::ApplyPatchTool::OrHash, OpenAI::Responses::WebSearchTool::OrHash, OpenAI::Responses::WebSearchPreviewTool::OrHash ) @@ -1249,12 +1265,16 @@ module OpenAI max_completion_tokens: nil, # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. reasoning_effort: nil, # A seed value to initialize the randomness, during sampling. seed: nil, diff --git a/rbi/openai/models/graders/score_model_grader.rbi b/rbi/openai/models/graders/score_model_grader.rbi index 6c8ddd69..124de333 100644 --- a/rbi/openai/models/graders/score_model_grader.rbi +++ b/rbi/openai/models/graders/score_model_grader.rbi @@ -396,12 +396,16 @@ module OpenAI # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) } attr_accessor :reasoning_effort @@ -432,12 +436,16 @@ module OpenAI max_completions_tokens: nil, # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. reasoning_effort: nil, # A seed value to initialize the randomness, during sampling. seed: nil, diff --git a/rbi/openai/models/reasoning.rbi b/rbi/openai/models/reasoning.rbi index 7d1e6b53..08438176 100644 --- a/rbi/openai/models/reasoning.rbi +++ b/rbi/openai/models/reasoning.rbi @@ -8,12 +8,16 @@ module OpenAI # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) } attr_accessor :effort @@ -48,12 +52,16 @@ module OpenAI def self.new( # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. effort: nil, # **Deprecated:** use `summary` instead. # diff --git a/rbi/openai/models/reasoning_effort.rbi b/rbi/openai/models/reasoning_effort.rbi index 83ef54a6..99f32e7d 100644 --- a/rbi/openai/models/reasoning_effort.rbi +++ b/rbi/openai/models/reasoning_effort.rbi @@ -4,18 +4,23 @@ module OpenAI module Models # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. module ReasoningEffort extend OpenAI::Internal::Type::Enum TaggedSymbol = T.type_alias { T.all(Symbol, OpenAI::ReasoningEffort) } OrSymbol = T.type_alias { T.any(Symbol, String) } + NONE = T.let(:none, OpenAI::ReasoningEffort::TaggedSymbol) MINIMAL = T.let(:minimal, OpenAI::ReasoningEffort::TaggedSymbol) LOW = T.let(:low, OpenAI::ReasoningEffort::TaggedSymbol) MEDIUM = T.let(:medium, OpenAI::ReasoningEffort::TaggedSymbol) diff --git a/rbi/openai/models/responses/apply_patch_tool.rbi b/rbi/openai/models/responses/apply_patch_tool.rbi new file mode 100644 index 00000000..a47da9a1 --- /dev/null +++ b/rbi/openai/models/responses/apply_patch_tool.rbi @@ -0,0 +1,30 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ApplyPatchTool < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Responses::ApplyPatchTool, OpenAI::Internal::AnyHash) + end + + # The type of the tool. Always `apply_patch`. + sig { returns(Symbol) } + attr_accessor :type + + # Allows the assistant to create, delete, or update files using unified diffs. + sig { params(type: Symbol).returns(T.attached_class) } + def self.new( + # The type of the tool. Always `apply_patch`. + type: :apply_patch + ) + end + + sig { override.returns({ type: Symbol }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/function_shell_tool.rbi b/rbi/openai/models/responses/function_shell_tool.rbi new file mode 100644 index 00000000..5ca05e9a --- /dev/null +++ b/rbi/openai/models/responses/function_shell_tool.rbi @@ -0,0 +1,33 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class FunctionShellTool < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::FunctionShellTool, + OpenAI::Internal::AnyHash + ) + end + + # The type of the shell tool. Always `shell`. + sig { returns(Symbol) } + attr_accessor :type + + # A tool that allows the model to execute shell commands. + sig { params(type: Symbol).returns(T.attached_class) } + def self.new( + # The type of the shell tool. Always `shell`. + type: :shell + ) + end + + sig { override.returns({ type: Symbol }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/input_token_count_params.rbi b/rbi/openai/models/responses/input_token_count_params.rbi index aa37bac8..3b65a389 100644 --- a/rbi/openai/models/responses/input_token_count_params.rbi +++ b/rbi/openai/models/responses/input_token_count_params.rbi @@ -100,7 +100,9 @@ module OpenAI OpenAI::Responses::ToolChoiceTypes, OpenAI::Responses::ToolChoiceFunction, OpenAI::Responses::ToolChoiceMcp, - OpenAI::Responses::ToolChoiceCustom + OpenAI::Responses::ToolChoiceCustom, + OpenAI::Responses::ToolChoiceApplyPatch, + OpenAI::Responses::ToolChoiceShell ) ) ) @@ -121,7 +123,9 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter, OpenAI::Responses::Tool::ImageGeneration, OpenAI::Responses::Tool::LocalShell, + OpenAI::Responses::FunctionShellTool, OpenAI::Responses::CustomTool, + OpenAI::Responses::ApplyPatchTool, OpenAI::Responses::WebSearchTool, OpenAI::Responses::WebSearchPreviewTool ) @@ -181,7 +185,9 @@ module OpenAI OpenAI::Responses::ToolChoiceTypes::OrHash, OpenAI::Responses::ToolChoiceFunction::OrHash, OpenAI::Responses::ToolChoiceMcp::OrHash, - OpenAI::Responses::ToolChoiceCustom::OrHash + OpenAI::Responses::ToolChoiceCustom::OrHash, + OpenAI::Responses::ToolChoiceApplyPatch::OrHash, + OpenAI::Responses::ToolChoiceShell::OrHash ) ), tools: @@ -195,7 +201,9 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter::OrHash, OpenAI::Responses::Tool::ImageGeneration::OrHash, OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::FunctionShellTool::OrHash, OpenAI::Responses::CustomTool::OrHash, + OpenAI::Responses::ApplyPatchTool::OrHash, OpenAI::Responses::WebSearchTool::OrHash, OpenAI::Responses::WebSearchPreviewTool::OrHash ) @@ -283,7 +291,9 @@ module OpenAI OpenAI::Responses::ToolChoiceTypes, OpenAI::Responses::ToolChoiceFunction, OpenAI::Responses::ToolChoiceMcp, - OpenAI::Responses::ToolChoiceCustom + OpenAI::Responses::ToolChoiceCustom, + OpenAI::Responses::ToolChoiceApplyPatch, + OpenAI::Responses::ToolChoiceShell ) ), tools: @@ -297,7 +307,9 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter, OpenAI::Responses::Tool::ImageGeneration, OpenAI::Responses::Tool::LocalShell, + OpenAI::Responses::FunctionShellTool, OpenAI::Responses::CustomTool, + OpenAI::Responses::ApplyPatchTool, OpenAI::Responses::WebSearchTool, OpenAI::Responses::WebSearchPreviewTool ) @@ -542,7 +554,9 @@ module OpenAI OpenAI::Responses::ToolChoiceTypes, OpenAI::Responses::ToolChoiceFunction, OpenAI::Responses::ToolChoiceMcp, - OpenAI::Responses::ToolChoiceCustom + OpenAI::Responses::ToolChoiceCustom, + OpenAI::Responses::ToolChoiceApplyPatch, + OpenAI::Responses::ToolChoiceShell ) end diff --git a/rbi/openai/models/responses/response.rbi b/rbi/openai/models/responses/response.rbi index 80f62655..2757c3e6 100644 --- a/rbi/openai/models/responses/response.rbi +++ b/rbi/openai/models/responses/response.rbi @@ -193,6 +193,19 @@ module OpenAI sig { params(prompt_cache_key: String).void } attr_writer :prompt_cache_key + # The retention policy for the prompt cache. Set to `24h` to enable extended + # prompt caching, which keeps cached prefixes active for longer, up to a maximum + # of 24 hours. + # [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + sig do + returns( + T.nilable( + OpenAI::Responses::Response::PromptCacheRetention::TaggedSymbol + ) + ) + end + attr_accessor :prompt_cache_retention + # **gpt-5 and o-series models only** # # Configuration options for @@ -324,6 +337,10 @@ module OpenAI OpenAI::Responses::ResponseOutputItem::ImageGenerationCall::OrHash, OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash, OpenAI::Responses::ResponseOutputItem::LocalShellCall::OrHash, + OpenAI::Responses::ResponseFunctionShellToolCall::OrHash, + OpenAI::Responses::ResponseFunctionShellToolCallOutput::OrHash, + OpenAI::Responses::ResponseApplyPatchToolCall::OrHash, + OpenAI::Responses::ResponseApplyPatchToolCallOutput::OrHash, OpenAI::Responses::ResponseOutputItem::McpCall::OrHash, OpenAI::Responses::ResponseOutputItem::McpListTools::OrHash, OpenAI::Responses::ResponseOutputItem::McpApprovalRequest::OrHash, @@ -339,7 +356,9 @@ module OpenAI OpenAI::Responses::ToolChoiceTypes::OrHash, OpenAI::Responses::ToolChoiceFunction::OrHash, OpenAI::Responses::ToolChoiceMcp::OrHash, - OpenAI::Responses::ToolChoiceCustom::OrHash + OpenAI::Responses::ToolChoiceCustom::OrHash, + OpenAI::Responses::ToolChoiceApplyPatch::OrHash, + OpenAI::Responses::ToolChoiceShell::OrHash ), tools: T::Array[ @@ -351,7 +370,9 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter::OrHash, OpenAI::Responses::Tool::ImageGeneration::OrHash, OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::FunctionShellTool::OrHash, OpenAI::Responses::CustomTool::OrHash, + OpenAI::Responses::ApplyPatchTool::OrHash, OpenAI::Responses::WebSearchTool::OrHash, OpenAI::Responses::WebSearchPreviewTool::OrHash ) @@ -365,6 +386,10 @@ module OpenAI previous_response_id: T.nilable(String), prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash), prompt_cache_key: String, + prompt_cache_retention: + T.nilable( + OpenAI::Responses::Response::PromptCacheRetention::OrSymbol + ), reasoning: T.nilable(OpenAI::Reasoning::OrHash), safety_identifier: String, service_tier: @@ -479,6 +504,11 @@ module OpenAI # hit rates. Replaces the `user` field. # [Learn more](https://platform.openai.com/docs/guides/prompt-caching). prompt_cache_key: nil, + # The retention policy for the prompt cache. Set to `24h` to enable extended + # prompt caching, which keeps cached prefixes active for longer, up to a maximum + # of 24 hours. + # [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + prompt_cache_retention: nil, # **gpt-5 and o-series models only** # # Configuration options for @@ -568,6 +598,10 @@ module OpenAI previous_response_id: T.nilable(String), prompt: T.nilable(OpenAI::Responses::ResponsePrompt), prompt_cache_key: String, + prompt_cache_retention: + T.nilable( + OpenAI::Responses::Response::PromptCacheRetention::TaggedSymbol + ), reasoning: T.nilable(OpenAI::Reasoning), safety_identifier: String, service_tier: @@ -723,7 +757,9 @@ module OpenAI OpenAI::Responses::ToolChoiceTypes, OpenAI::Responses::ToolChoiceFunction, OpenAI::Responses::ToolChoiceMcp, - OpenAI::Responses::ToolChoiceCustom + OpenAI::Responses::ToolChoiceCustom, + OpenAI::Responses::ToolChoiceApplyPatch, + OpenAI::Responses::ToolChoiceShell ) end @@ -763,6 +799,41 @@ module OpenAI end end + # The retention policy for the prompt cache. Set to `24h` to enable extended + # prompt caching, which keeps cached prefixes active for longer, up to a maximum + # of 24 hours. + # [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + module PromptCacheRetention + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Responses::Response::PromptCacheRetention) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_MEMORY = + T.let( + :"in-memory", + OpenAI::Responses::Response::PromptCacheRetention::TaggedSymbol + ) + PROMPT_CACHE_RETENTION_24H = + T.let( + :"24h", + OpenAI::Responses::Response::PromptCacheRetention::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::Response::PromptCacheRetention::TaggedSymbol + ] + ) + end + def self.values + end + end + # Specifies the processing type used for serving the request. # # - If set to 'auto', then the request will be processed with the service tier diff --git a/rbi/openai/models/responses/response_apply_patch_tool_call.rbi b/rbi/openai/models/responses/response_apply_patch_tool_call.rbi new file mode 100644 index 00000000..5db154ac --- /dev/null +++ b/rbi/openai/models/responses/response_apply_patch_tool_call.rbi @@ -0,0 +1,300 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseApplyPatchToolCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseApplyPatchToolCall, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the apply patch tool call. Populated when this item is returned + # via API. + sig { returns(String) } + attr_accessor :id + + # The unique ID of the apply patch tool call generated by the model. + sig { returns(String) } + attr_accessor :call_id + + # The status of the apply patch tool call. One of `in_progress` or `completed`. + sig do + returns( + OpenAI::Responses::ResponseApplyPatchToolCall::Status::TaggedSymbol + ) + end + attr_accessor :status + + # The type of the item. Always `apply_patch_call`. + sig { returns(Symbol) } + attr_accessor :type + + # The ID of the entity that created this tool call. + sig { returns(T.nilable(String)) } + attr_reader :created_by + + sig { params(created_by: String).void } + attr_writer :created_by + + # One of the create_file, delete_file, or update_file operations applied via + # apply_patch. + sig do + returns( + T.nilable( + OpenAI::Responses::ResponseApplyPatchToolCall::Operation::Variants + ) + ) + end + attr_reader :operation + + sig do + params( + operation: + T.any( + OpenAI::Responses::ResponseApplyPatchToolCall::Operation::CreateFile::OrHash, + OpenAI::Responses::ResponseApplyPatchToolCall::Operation::DeleteFile::OrHash, + OpenAI::Responses::ResponseApplyPatchToolCall::Operation::UpdateFile::OrHash + ) + ).void + end + attr_writer :operation + + # A tool call that applies file diffs by creating, deleting, or updating files. + sig do + params( + id: String, + call_id: String, + status: + OpenAI::Responses::ResponseApplyPatchToolCall::Status::OrSymbol, + created_by: String, + operation: + T.any( + OpenAI::Responses::ResponseApplyPatchToolCall::Operation::CreateFile::OrHash, + OpenAI::Responses::ResponseApplyPatchToolCall::Operation::DeleteFile::OrHash, + OpenAI::Responses::ResponseApplyPatchToolCall::Operation::UpdateFile::OrHash + ), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the apply patch tool call. Populated when this item is returned + # via API. + id:, + # The unique ID of the apply patch tool call generated by the model. + call_id:, + # The status of the apply patch tool call. One of `in_progress` or `completed`. + status:, + # The ID of the entity that created this tool call. + created_by: nil, + # One of the create_file, delete_file, or update_file operations applied via + # apply_patch. + operation: nil, + # The type of the item. Always `apply_patch_call`. + type: :apply_patch_call + ) + end + + sig do + override.returns( + { + id: String, + call_id: String, + status: + OpenAI::Responses::ResponseApplyPatchToolCall::Status::TaggedSymbol, + type: Symbol, + created_by: String, + operation: + OpenAI::Responses::ResponseApplyPatchToolCall::Operation::Variants + } + ) + end + def to_hash + end + + # The status of the apply patch tool call. One of `in_progress` or `completed`. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::ResponseApplyPatchToolCall::Status + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::Responses::ResponseApplyPatchToolCall::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::Responses::ResponseApplyPatchToolCall::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseApplyPatchToolCall::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + + # One of the create_file, delete_file, or update_file operations applied via + # apply_patch. + module Operation + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Responses::ResponseApplyPatchToolCall::Operation::CreateFile, + OpenAI::Responses::ResponseApplyPatchToolCall::Operation::DeleteFile, + OpenAI::Responses::ResponseApplyPatchToolCall::Operation::UpdateFile + ) + end + + class CreateFile < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseApplyPatchToolCall::Operation::CreateFile, + OpenAI::Internal::AnyHash + ) + end + + # Diff to apply. + sig { returns(String) } + attr_accessor :diff + + # Path of the file to create. + sig { returns(String) } + attr_accessor :path + + # Create a new file with the provided diff. + sig { returns(Symbol) } + attr_accessor :type + + # Instruction describing how to create a file via the apply_patch tool. + sig do + params(diff: String, path: String, type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # Diff to apply. + diff:, + # Path of the file to create. + path:, + # Create a new file with the provided diff. + type: :create_file + ) + end + + sig do + override.returns({ diff: String, path: String, type: Symbol }) + end + def to_hash + end + end + + class DeleteFile < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseApplyPatchToolCall::Operation::DeleteFile, + OpenAI::Internal::AnyHash + ) + end + + # Path of the file to delete. + sig { returns(String) } + attr_accessor :path + + # Delete the specified file. + sig { returns(Symbol) } + attr_accessor :type + + # Instruction describing how to delete a file via the apply_patch tool. + sig { params(path: String, type: Symbol).returns(T.attached_class) } + def self.new( + # Path of the file to delete. + path:, + # Delete the specified file. + type: :delete_file + ) + end + + sig { override.returns({ path: String, type: Symbol }) } + def to_hash + end + end + + class UpdateFile < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseApplyPatchToolCall::Operation::UpdateFile, + OpenAI::Internal::AnyHash + ) + end + + # Diff to apply. + sig { returns(String) } + attr_accessor :diff + + # Path of the file to update. + sig { returns(String) } + attr_accessor :path + + # Update an existing file with the provided diff. + sig { returns(Symbol) } + attr_accessor :type + + # Instruction describing how to update a file via the apply_patch tool. + sig do + params(diff: String, path: String, type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # Diff to apply. + diff:, + # Path of the file to update. + path:, + # Update an existing file with the provided diff. + type: :update_file + ) + end + + sig do + override.returns({ diff: String, path: String, type: Symbol }) + end + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseApplyPatchToolCall::Operation::Variants + ] + ) + end + def self.variants + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_apply_patch_tool_call_output.rbi b/rbi/openai/models/responses/response_apply_patch_tool_call_output.rbi new file mode 100644 index 00000000..0d29a037 --- /dev/null +++ b/rbi/openai/models/responses/response_apply_patch_tool_call_output.rbi @@ -0,0 +1,129 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseApplyPatchToolCallOutput < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseApplyPatchToolCallOutput, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the apply patch tool call output. Populated when this item is + # returned via API. + sig { returns(String) } + attr_accessor :id + + # The unique ID of the apply patch tool call generated by the model. + sig { returns(String) } + attr_accessor :call_id + + # Optional textual output returned by the apply patch tool. + sig { returns(T.nilable(String)) } + attr_accessor :output + + # The status of the apply patch tool call output. One of `completed` or `failed`. + sig do + returns( + OpenAI::Responses::ResponseApplyPatchToolCallOutput::Status::TaggedSymbol + ) + end + attr_accessor :status + + # The type of the item. Always `apply_patch_call_output`. + sig { returns(Symbol) } + attr_accessor :type + + # The ID of the entity that created this tool call output. + sig { returns(T.nilable(String)) } + attr_reader :created_by + + sig { params(created_by: String).void } + attr_writer :created_by + + # The output emitted by an apply patch tool call. + sig do + params( + id: String, + call_id: String, + output: T.nilable(String), + status: + OpenAI::Responses::ResponseApplyPatchToolCallOutput::Status::OrSymbol, + created_by: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the apply patch tool call output. Populated when this item is + # returned via API. + id:, + # The unique ID of the apply patch tool call generated by the model. + call_id:, + # Optional textual output returned by the apply patch tool. + output:, + # The status of the apply patch tool call output. One of `completed` or `failed`. + status:, + # The ID of the entity that created this tool call output. + created_by: nil, + # The type of the item. Always `apply_patch_call_output`. + type: :apply_patch_call_output + ) + end + + sig do + override.returns( + { + id: String, + call_id: String, + output: T.nilable(String), + status: + OpenAI::Responses::ResponseApplyPatchToolCallOutput::Status::TaggedSymbol, + type: Symbol, + created_by: String + } + ) + end + def to_hash + end + + # The status of the apply patch tool call output. One of `completed` or `failed`. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::ResponseApplyPatchToolCallOutput::Status + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + COMPLETED = + T.let( + :completed, + OpenAI::Responses::ResponseApplyPatchToolCallOutput::Status::TaggedSymbol + ) + FAILED = + T.let( + :failed, + OpenAI::Responses::ResponseApplyPatchToolCallOutput::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseApplyPatchToolCallOutput::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_create_params.rbi b/rbi/openai/models/responses/response_create_params.rbi index 42b3aaa1..3ff54af0 100644 --- a/rbi/openai/models/responses/response_create_params.rbi +++ b/rbi/openai/models/responses/response_create_params.rbi @@ -173,6 +173,19 @@ module OpenAI sig { params(prompt_cache_key: String).void } attr_writer :prompt_cache_key + # The retention policy for the prompt cache. Set to `24h` to enable extended + # prompt caching, which keeps cached prefixes active for longer, up to a maximum + # of 24 hours. + # [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + sig do + returns( + T.nilable( + OpenAI::Responses::ResponseCreateParams::PromptCacheRetention::OrSymbol + ) + ) + end + attr_accessor :prompt_cache_retention + # **gpt-5 and o-series models only** # # Configuration options for @@ -279,7 +292,9 @@ module OpenAI OpenAI::Responses::ToolChoiceTypes, OpenAI::Responses::ToolChoiceFunction, OpenAI::Responses::ToolChoiceMcp, - OpenAI::Responses::ToolChoiceCustom + OpenAI::Responses::ToolChoiceCustom, + OpenAI::Responses::ToolChoiceApplyPatch, + OpenAI::Responses::ToolChoiceShell ) ) ) @@ -295,7 +310,9 @@ module OpenAI OpenAI::Responses::ToolChoiceTypes::OrHash, OpenAI::Responses::ToolChoiceFunction::OrHash, OpenAI::Responses::ToolChoiceMcp::OrHash, - OpenAI::Responses::ToolChoiceCustom::OrHash + OpenAI::Responses::ToolChoiceCustom::OrHash, + OpenAI::Responses::ToolChoiceApplyPatch::OrHash, + OpenAI::Responses::ToolChoiceShell::OrHash ) ).void end @@ -332,7 +349,9 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter, OpenAI::Responses::Tool::ImageGeneration, OpenAI::Responses::Tool::LocalShell, + OpenAI::Responses::FunctionShellTool, OpenAI::Responses::CustomTool, + OpenAI::Responses::ApplyPatchTool, OpenAI::Responses::WebSearchTool, OpenAI::Responses::WebSearchPreviewTool ) @@ -354,7 +373,9 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter::OrHash, OpenAI::Responses::Tool::ImageGeneration::OrHash, OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::FunctionShellTool::OrHash, OpenAI::Responses::CustomTool::OrHash, + OpenAI::Responses::ApplyPatchTool::OrHash, OpenAI::Responses::WebSearchTool::OrHash, OpenAI::Responses::WebSearchPreviewTool::OrHash ) @@ -432,6 +453,10 @@ module OpenAI previous_response_id: T.nilable(String), prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash), prompt_cache_key: String, + prompt_cache_retention: + T.nilable( + OpenAI::Responses::ResponseCreateParams::PromptCacheRetention::OrSymbol + ), reasoning: T.nilable(OpenAI::Reasoning::OrHash), safety_identifier: String, service_tier: @@ -452,7 +477,9 @@ module OpenAI OpenAI::Responses::ToolChoiceTypes::OrHash, OpenAI::Responses::ToolChoiceFunction::OrHash, OpenAI::Responses::ToolChoiceMcp::OrHash, - OpenAI::Responses::ToolChoiceCustom::OrHash + OpenAI::Responses::ToolChoiceCustom::OrHash, + OpenAI::Responses::ToolChoiceApplyPatch::OrHash, + OpenAI::Responses::ToolChoiceShell::OrHash ), tools: T::Array[ @@ -464,7 +491,9 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter::OrHash, OpenAI::Responses::Tool::ImageGeneration::OrHash, OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::FunctionShellTool::OrHash, OpenAI::Responses::CustomTool::OrHash, + OpenAI::Responses::ApplyPatchTool::OrHash, OpenAI::Responses::WebSearchTool::OrHash, OpenAI::Responses::WebSearchPreviewTool::OrHash ) @@ -559,6 +588,11 @@ module OpenAI # hit rates. Replaces the `user` field. # [Learn more](https://platform.openai.com/docs/guides/prompt-caching). prompt_cache_key: nil, + # The retention policy for the prompt cache. Set to `24h` to enable extended + # prompt caching, which keeps cached prefixes active for longer, up to a maximum + # of 24 hours. + # [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + prompt_cache_retention: nil, # **gpt-5 and o-series models only** # # Configuration options for @@ -680,6 +714,10 @@ module OpenAI previous_response_id: T.nilable(String), prompt: T.nilable(OpenAI::Responses::ResponsePrompt), prompt_cache_key: String, + prompt_cache_retention: + T.nilable( + OpenAI::Responses::ResponseCreateParams::PromptCacheRetention::OrSymbol + ), reasoning: T.nilable(OpenAI::Reasoning), safety_identifier: String, service_tier: @@ -700,7 +738,9 @@ module OpenAI OpenAI::Responses::ToolChoiceTypes, OpenAI::Responses::ToolChoiceFunction, OpenAI::Responses::ToolChoiceMcp, - OpenAI::Responses::ToolChoiceCustom + OpenAI::Responses::ToolChoiceCustom, + OpenAI::Responses::ToolChoiceApplyPatch, + OpenAI::Responses::ToolChoiceShell ), tools: T::Array[ @@ -712,7 +752,9 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter, OpenAI::Responses::Tool::ImageGeneration, OpenAI::Responses::Tool::LocalShell, + OpenAI::Responses::FunctionShellTool, OpenAI::Responses::CustomTool, + OpenAI::Responses::ApplyPatchTool, OpenAI::Responses::WebSearchTool, OpenAI::Responses::WebSearchPreviewTool ) @@ -783,6 +825,44 @@ module OpenAI end end + # The retention policy for the prompt cache. Set to `24h` to enable extended + # prompt caching, which keeps cached prefixes active for longer, up to a maximum + # of 24 hours. + # [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + module PromptCacheRetention + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::ResponseCreateParams::PromptCacheRetention + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_MEMORY = + T.let( + :"in-memory", + OpenAI::Responses::ResponseCreateParams::PromptCacheRetention::TaggedSymbol + ) + PROMPT_CACHE_RETENTION_24H = + T.let( + :"24h", + OpenAI::Responses::ResponseCreateParams::PromptCacheRetention::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseCreateParams::PromptCacheRetention::TaggedSymbol + ] + ) + end + def self.values + end + end + # Specifies the processing type used for serving the request. # # - If set to 'auto', then the request will be processed with the service tier @@ -903,7 +983,9 @@ module OpenAI OpenAI::Responses::ToolChoiceTypes, OpenAI::Responses::ToolChoiceFunction, OpenAI::Responses::ToolChoiceMcp, - OpenAI::Responses::ToolChoiceCustom + OpenAI::Responses::ToolChoiceCustom, + OpenAI::Responses::ToolChoiceApplyPatch, + OpenAI::Responses::ToolChoiceShell ) end diff --git a/rbi/openai/models/responses/response_function_shell_call_output_content.rbi b/rbi/openai/models/responses/response_function_shell_call_output_content.rbi new file mode 100644 index 00000000..de170744 --- /dev/null +++ b/rbi/openai/models/responses/response_function_shell_call_output_content.rbi @@ -0,0 +1,157 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseFunctionShellCallOutputContent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseFunctionShellCallOutputContent, + OpenAI::Internal::AnyHash + ) + end + + # The exit or timeout outcome associated with this chunk. + sig do + returns( + T.any( + OpenAI::Responses::ResponseFunctionShellCallOutputContent::Outcome::Timeout, + OpenAI::Responses::ResponseFunctionShellCallOutputContent::Outcome::Exit + ) + ) + end + attr_accessor :outcome + + # Captured stderr output for this chunk of the shell call. + sig { returns(String) } + attr_accessor :stderr + + # Captured stdout output for this chunk of the shell call. + sig { returns(String) } + attr_accessor :stdout + + # Captured stdout and stderr for a portion of a function shell tool call output. + sig do + params( + outcome: + T.any( + OpenAI::Responses::ResponseFunctionShellCallOutputContent::Outcome::Timeout::OrHash, + OpenAI::Responses::ResponseFunctionShellCallOutputContent::Outcome::Exit::OrHash + ), + stderr: String, + stdout: String + ).returns(T.attached_class) + end + def self.new( + # The exit or timeout outcome associated with this chunk. + outcome:, + # Captured stderr output for this chunk of the shell call. + stderr:, + # Captured stdout output for this chunk of the shell call. + stdout: + ) + end + + sig do + override.returns( + { + outcome: + T.any( + OpenAI::Responses::ResponseFunctionShellCallOutputContent::Outcome::Timeout, + OpenAI::Responses::ResponseFunctionShellCallOutputContent::Outcome::Exit + ), + stderr: String, + stdout: String + } + ) + end + def to_hash + end + + # The exit or timeout outcome associated with this chunk. + module Outcome + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Responses::ResponseFunctionShellCallOutputContent::Outcome::Timeout, + OpenAI::Responses::ResponseFunctionShellCallOutputContent::Outcome::Exit + ) + end + + class Timeout < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseFunctionShellCallOutputContent::Outcome::Timeout, + OpenAI::Internal::AnyHash + ) + end + + # The outcome type. Always `timeout`. + sig { returns(Symbol) } + attr_accessor :type + + # Indicates that the function shell call exceeded its configured time limit. + sig { params(type: Symbol).returns(T.attached_class) } + def self.new( + # The outcome type. Always `timeout`. + type: :timeout + ) + end + + sig { override.returns({ type: Symbol }) } + def to_hash + end + end + + class Exit < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseFunctionShellCallOutputContent::Outcome::Exit, + OpenAI::Internal::AnyHash + ) + end + + # The exit code returned by the shell process. + sig { returns(Integer) } + attr_accessor :exit_code + + # The outcome type. Always `exit`. + sig { returns(Symbol) } + attr_accessor :type + + # Indicates that the shell commands finished and returned an exit code. + sig do + params(exit_code: Integer, type: Symbol).returns(T.attached_class) + end + def self.new( + # The exit code returned by the shell process. + exit_code:, + # The outcome type. Always `exit`. + type: :exit + ) + end + + sig { override.returns({ exit_code: Integer, type: Symbol }) } + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseFunctionShellCallOutputContent::Outcome::Variants + ] + ) + end + def self.variants + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_function_shell_tool_call.rbi b/rbi/openai/models/responses/response_function_shell_tool_call.rbi new file mode 100644 index 00000000..7b4ed98c --- /dev/null +++ b/rbi/openai/models/responses/response_function_shell_tool_call.rbi @@ -0,0 +1,198 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseFunctionShellToolCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseFunctionShellToolCall, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the function shell tool call. Populated when this item is + # returned via API. + sig { returns(String) } + attr_accessor :id + + # The shell commands and limits that describe how to run the tool call. + sig do + returns(OpenAI::Responses::ResponseFunctionShellToolCall::Action) + end + attr_reader :action + + sig do + params( + action: + OpenAI::Responses::ResponseFunctionShellToolCall::Action::OrHash + ).void + end + attr_writer :action + + # The unique ID of the function shell tool call generated by the model. + sig { returns(String) } + attr_accessor :call_id + + # The status of the shell call. One of `in_progress`, `completed`, or + # `incomplete`. + sig do + returns( + OpenAI::Responses::ResponseFunctionShellToolCall::Status::TaggedSymbol + ) + end + attr_accessor :status + + # The type of the item. Always `shell_call`. + sig { returns(Symbol) } + attr_accessor :type + + # The ID of the entity that created this tool call. + sig { returns(T.nilable(String)) } + attr_reader :created_by + + sig { params(created_by: String).void } + attr_writer :created_by + + # A tool call that executes one or more shell commands in a managed environment. + sig do + params( + id: String, + action: + OpenAI::Responses::ResponseFunctionShellToolCall::Action::OrHash, + call_id: String, + status: + OpenAI::Responses::ResponseFunctionShellToolCall::Status::OrSymbol, + created_by: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the function shell tool call. Populated when this item is + # returned via API. + id:, + # The shell commands and limits that describe how to run the tool call. + action:, + # The unique ID of the function shell tool call generated by the model. + call_id:, + # The status of the shell call. One of `in_progress`, `completed`, or + # `incomplete`. + status:, + # The ID of the entity that created this tool call. + created_by: nil, + # The type of the item. Always `shell_call`. + type: :shell_call + ) + end + + sig do + override.returns( + { + id: String, + action: OpenAI::Responses::ResponseFunctionShellToolCall::Action, + call_id: String, + status: + OpenAI::Responses::ResponseFunctionShellToolCall::Status::TaggedSymbol, + type: Symbol, + created_by: String + } + ) + end + def to_hash + end + + class Action < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseFunctionShellToolCall::Action, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(T::Array[String]) } + attr_accessor :commands + + # Optional maximum number of characters to return from each command. + sig { returns(T.nilable(Integer)) } + attr_accessor :max_output_length + + # Optional timeout in milliseconds for the commands. + sig { returns(T.nilable(Integer)) } + attr_accessor :timeout_ms + + # The shell commands and limits that describe how to run the tool call. + sig do + params( + commands: T::Array[String], + max_output_length: T.nilable(Integer), + timeout_ms: T.nilable(Integer) + ).returns(T.attached_class) + end + def self.new( + commands:, + # Optional maximum number of characters to return from each command. + max_output_length:, + # Optional timeout in milliseconds for the commands. + timeout_ms: + ) + end + + sig do + override.returns( + { + commands: T::Array[String], + max_output_length: T.nilable(Integer), + timeout_ms: T.nilable(Integer) + } + ) + end + def to_hash + end + end + + # The status of the shell call. One of `in_progress`, `completed`, or + # `incomplete`. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::ResponseFunctionShellToolCall::Status + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::Responses::ResponseFunctionShellToolCall::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::Responses::ResponseFunctionShellToolCall::Status::TaggedSymbol + ) + INCOMPLETE = + T.let( + :incomplete, + OpenAI::Responses::ResponseFunctionShellToolCall::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseFunctionShellToolCall::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_function_shell_tool_call_output.rbi b/rbi/openai/models/responses/response_function_shell_tool_call_output.rbi new file mode 100644 index 00000000..6371906d --- /dev/null +++ b/rbi/openai/models/responses/response_function_shell_tool_call_output.rbi @@ -0,0 +1,254 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseFunctionShellToolCallOutput < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseFunctionShellToolCallOutput, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the shell call output. Populated when this item is returned via + # API. + sig { returns(String) } + attr_accessor :id + + # The unique ID of the shell tool call generated by the model. + sig { returns(String) } + attr_accessor :call_id + + # The maximum length of the shell command output. This is generated by the model + # and should be passed back with the raw output. + sig { returns(T.nilable(Integer)) } + attr_accessor :max_output_length + + # An array of shell call output contents + sig do + returns( + T::Array[ + OpenAI::Responses::ResponseFunctionShellToolCallOutput::Output + ] + ) + end + attr_accessor :output + + # The type of the shell call output. Always `shell_call_output`. + sig { returns(Symbol) } + attr_accessor :type + + sig { returns(T.nilable(String)) } + attr_reader :created_by + + sig { params(created_by: String).void } + attr_writer :created_by + + # The output of a shell tool call. + sig do + params( + id: String, + call_id: String, + max_output_length: T.nilable(Integer), + output: + T::Array[ + OpenAI::Responses::ResponseFunctionShellToolCallOutput::Output::OrHash + ], + created_by: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the shell call output. Populated when this item is returned via + # API. + id:, + # The unique ID of the shell tool call generated by the model. + call_id:, + # The maximum length of the shell command output. This is generated by the model + # and should be passed back with the raw output. + max_output_length:, + # An array of shell call output contents + output:, + created_by: nil, + # The type of the shell call output. Always `shell_call_output`. + type: :shell_call_output + ) + end + + sig do + override.returns( + { + id: String, + call_id: String, + max_output_length: T.nilable(Integer), + output: + T::Array[ + OpenAI::Responses::ResponseFunctionShellToolCallOutput::Output + ], + type: Symbol, + created_by: String + } + ) + end + def to_hash + end + + class Output < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseFunctionShellToolCallOutput::Output, + OpenAI::Internal::AnyHash + ) + end + + # Represents either an exit outcome (with an exit code) or a timeout outcome for a + # shell call output chunk. + sig do + returns( + OpenAI::Responses::ResponseFunctionShellToolCallOutput::Output::Outcome::Variants + ) + end + attr_accessor :outcome + + sig { returns(String) } + attr_accessor :stderr + + sig { returns(String) } + attr_accessor :stdout + + sig { returns(T.nilable(String)) } + attr_reader :created_by + + sig { params(created_by: String).void } + attr_writer :created_by + + # The content of a shell call output. + sig do + params( + outcome: + T.any( + OpenAI::Responses::ResponseFunctionShellToolCallOutput::Output::Outcome::Timeout::OrHash, + OpenAI::Responses::ResponseFunctionShellToolCallOutput::Output::Outcome::Exit::OrHash + ), + stderr: String, + stdout: String, + created_by: String + ).returns(T.attached_class) + end + def self.new( + # Represents either an exit outcome (with an exit code) or a timeout outcome for a + # shell call output chunk. + outcome:, + stderr:, + stdout:, + created_by: nil + ) + end + + sig do + override.returns( + { + outcome: + OpenAI::Responses::ResponseFunctionShellToolCallOutput::Output::Outcome::Variants, + stderr: String, + stdout: String, + created_by: String + } + ) + end + def to_hash + end + + # Represents either an exit outcome (with an exit code) or a timeout outcome for a + # shell call output chunk. + module Outcome + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Responses::ResponseFunctionShellToolCallOutput::Output::Outcome::Timeout, + OpenAI::Responses::ResponseFunctionShellToolCallOutput::Output::Outcome::Exit + ) + end + + class Timeout < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseFunctionShellToolCallOutput::Output::Outcome::Timeout, + OpenAI::Internal::AnyHash + ) + end + + # The outcome type. Always `timeout`. + sig { returns(Symbol) } + attr_accessor :type + + # Indicates that the function shell call exceeded its configured time limit. + sig { params(type: Symbol).returns(T.attached_class) } + def self.new( + # The outcome type. Always `timeout`. + type: :timeout + ) + end + + sig { override.returns({ type: Symbol }) } + def to_hash + end + end + + class Exit < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseFunctionShellToolCallOutput::Output::Outcome::Exit, + OpenAI::Internal::AnyHash + ) + end + + # Exit code from the shell process. + sig { returns(Integer) } + attr_accessor :exit_code + + # The outcome type. Always `exit`. + sig { returns(Symbol) } + attr_accessor :type + + # Indicates that the shell commands finished and returned an exit code. + sig do + params(exit_code: Integer, type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # Exit code from the shell process. + exit_code:, + # The outcome type. Always `exit`. + type: :exit + ) + end + + sig { override.returns({ exit_code: Integer, type: Symbol }) } + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseFunctionShellToolCallOutput::Output::Outcome::Variants + ] + ) + end + def self.variants + end + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_input_item.rbi b/rbi/openai/models/responses/response_input_item.rbi index a8650b01..aa4ce248 100644 --- a/rbi/openai/models/responses/response_input_item.rbi +++ b/rbi/openai/models/responses/response_input_item.rbi @@ -28,6 +28,10 @@ module OpenAI OpenAI::Responses::ResponseCodeInterpreterToolCall, OpenAI::Responses::ResponseInputItem::LocalShellCall, OpenAI::Responses::ResponseInputItem::LocalShellCallOutput, + OpenAI::Responses::ResponseInputItem::ShellCall, + OpenAI::Responses::ResponseInputItem::ShellCallOutput, + OpenAI::Responses::ResponseInputItem::ApplyPatchCall, + OpenAI::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Responses::ResponseInputItem::McpListTools, OpenAI::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Responses::ResponseInputItem::McpApprovalResponse, @@ -1075,6 +1079,677 @@ module OpenAI end end + class ShellCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseInputItem::ShellCall, + OpenAI::Internal::AnyHash + ) + end + + # The shell commands and limits that describe how to run the tool call. + sig do + returns(OpenAI::Responses::ResponseInputItem::ShellCall::Action) + end + attr_reader :action + + sig do + params( + action: + OpenAI::Responses::ResponseInputItem::ShellCall::Action::OrHash + ).void + end + attr_writer :action + + # The unique ID of the function shell tool call generated by the model. + sig { returns(String) } + attr_accessor :call_id + + # The type of the item. Always `function_shell_call`. + sig { returns(Symbol) } + attr_accessor :type + + # The unique ID of the function shell tool call. Populated when this item is + # returned via API. + sig { returns(T.nilable(String)) } + attr_accessor :id + + # The status of the shell call. One of `in_progress`, `completed`, or + # `incomplete`. + sig do + returns( + T.nilable( + OpenAI::Responses::ResponseInputItem::ShellCall::Status::OrSymbol + ) + ) + end + attr_accessor :status + + # A tool representing a request to execute one or more shell commands. + sig do + params( + action: + OpenAI::Responses::ResponseInputItem::ShellCall::Action::OrHash, + call_id: String, + id: T.nilable(String), + status: + T.nilable( + OpenAI::Responses::ResponseInputItem::ShellCall::Status::OrSymbol + ), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The shell commands and limits that describe how to run the tool call. + action:, + # The unique ID of the function shell tool call generated by the model. + call_id:, + # The unique ID of the function shell tool call. Populated when this item is + # returned via API. + id: nil, + # The status of the shell call. One of `in_progress`, `completed`, or + # `incomplete`. + status: nil, + # The type of the item. Always `function_shell_call`. + type: :shell_call + ) + end + + sig do + override.returns( + { + action: OpenAI::Responses::ResponseInputItem::ShellCall::Action, + call_id: String, + type: Symbol, + id: T.nilable(String), + status: + T.nilable( + OpenAI::Responses::ResponseInputItem::ShellCall::Status::OrSymbol + ) + } + ) + end + def to_hash + end + + class Action < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseInputItem::ShellCall::Action, + OpenAI::Internal::AnyHash + ) + end + + # Ordered shell commands for the execution environment to run. + sig { returns(T::Array[String]) } + attr_accessor :commands + + # Maximum number of UTF-8 characters to capture from combined stdout and stderr + # output. + sig { returns(T.nilable(Integer)) } + attr_accessor :max_output_length + + # Maximum wall-clock time in milliseconds to allow the shell commands to run. + sig { returns(T.nilable(Integer)) } + attr_accessor :timeout_ms + + # The shell commands and limits that describe how to run the tool call. + sig do + params( + commands: T::Array[String], + max_output_length: T.nilable(Integer), + timeout_ms: T.nilable(Integer) + ).returns(T.attached_class) + end + def self.new( + # Ordered shell commands for the execution environment to run. + commands:, + # Maximum number of UTF-8 characters to capture from combined stdout and stderr + # output. + max_output_length: nil, + # Maximum wall-clock time in milliseconds to allow the shell commands to run. + timeout_ms: nil + ) + end + + sig do + override.returns( + { + commands: T::Array[String], + max_output_length: T.nilable(Integer), + timeout_ms: T.nilable(Integer) + } + ) + end + def to_hash + end + end + + # The status of the shell call. One of `in_progress`, `completed`, or + # `incomplete`. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::ResponseInputItem::ShellCall::Status + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::Responses::ResponseInputItem::ShellCall::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::Responses::ResponseInputItem::ShellCall::Status::TaggedSymbol + ) + INCOMPLETE = + T.let( + :incomplete, + OpenAI::Responses::ResponseInputItem::ShellCall::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseInputItem::ShellCall::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + class ShellCallOutput < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseInputItem::ShellCallOutput, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the function shell tool call generated by the model. + sig { returns(String) } + attr_accessor :call_id + + # Captured chunks of stdout and stderr output, along with their associated + # outcomes. + sig do + returns( + T::Array[ + OpenAI::Responses::ResponseFunctionShellCallOutputContent + ] + ) + end + attr_accessor :output + + # The type of the item. Always `function_shell_call_output`. + sig { returns(Symbol) } + attr_accessor :type + + # The unique ID of the function shell tool call output. Populated when this item + # is returned via API. + sig { returns(T.nilable(String)) } + attr_accessor :id + + # The maximum number of UTF-8 characters captured for this shell call's combined + # output. + sig { returns(T.nilable(Integer)) } + attr_accessor :max_output_length + + # The streamed output items emitted by a function shell tool call. + sig do + params( + call_id: String, + output: + T::Array[ + OpenAI::Responses::ResponseFunctionShellCallOutputContent::OrHash + ], + id: T.nilable(String), + max_output_length: T.nilable(Integer), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the function shell tool call generated by the model. + call_id:, + # Captured chunks of stdout and stderr output, along with their associated + # outcomes. + output:, + # The unique ID of the function shell tool call output. Populated when this item + # is returned via API. + id: nil, + # The maximum number of UTF-8 characters captured for this shell call's combined + # output. + max_output_length: nil, + # The type of the item. Always `function_shell_call_output`. + type: :shell_call_output + ) + end + + sig do + override.returns( + { + call_id: String, + output: + T::Array[ + OpenAI::Responses::ResponseFunctionShellCallOutputContent + ], + type: Symbol, + id: T.nilable(String), + max_output_length: T.nilable(Integer) + } + ) + end + def to_hash + end + end + + class ApplyPatchCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseInputItem::ApplyPatchCall, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the apply patch tool call generated by the model. + sig { returns(String) } + attr_accessor :call_id + + # The specific create, delete, or update instruction for the apply_patch tool + # call. + sig do + returns( + T.any( + OpenAI::Responses::ResponseInputItem::ApplyPatchCall::Operation::CreateFile, + OpenAI::Responses::ResponseInputItem::ApplyPatchCall::Operation::DeleteFile, + OpenAI::Responses::ResponseInputItem::ApplyPatchCall::Operation::UpdateFile + ) + ) + end + attr_accessor :operation + + # The status of the apply patch tool call. One of `in_progress` or `completed`. + sig do + returns( + OpenAI::Responses::ResponseInputItem::ApplyPatchCall::Status::OrSymbol + ) + end + attr_accessor :status + + # The type of the item. Always `apply_patch_call`. + sig { returns(Symbol) } + attr_accessor :type + + # The unique ID of the apply patch tool call. Populated when this item is returned + # via API. + sig { returns(T.nilable(String)) } + attr_accessor :id + + # A tool call representing a request to create, delete, or update files using diff + # patches. + sig do + params( + call_id: String, + operation: + T.any( + OpenAI::Responses::ResponseInputItem::ApplyPatchCall::Operation::CreateFile::OrHash, + OpenAI::Responses::ResponseInputItem::ApplyPatchCall::Operation::DeleteFile::OrHash, + OpenAI::Responses::ResponseInputItem::ApplyPatchCall::Operation::UpdateFile::OrHash + ), + status: + OpenAI::Responses::ResponseInputItem::ApplyPatchCall::Status::OrSymbol, + id: T.nilable(String), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the apply patch tool call generated by the model. + call_id:, + # The specific create, delete, or update instruction for the apply_patch tool + # call. + operation:, + # The status of the apply patch tool call. One of `in_progress` or `completed`. + status:, + # The unique ID of the apply patch tool call. Populated when this item is returned + # via API. + id: nil, + # The type of the item. Always `apply_patch_call`. + type: :apply_patch_call + ) + end + + sig do + override.returns( + { + call_id: String, + operation: + T.any( + OpenAI::Responses::ResponseInputItem::ApplyPatchCall::Operation::CreateFile, + OpenAI::Responses::ResponseInputItem::ApplyPatchCall::Operation::DeleteFile, + OpenAI::Responses::ResponseInputItem::ApplyPatchCall::Operation::UpdateFile + ), + status: + OpenAI::Responses::ResponseInputItem::ApplyPatchCall::Status::OrSymbol, + type: Symbol, + id: T.nilable(String) + } + ) + end + def to_hash + end + + # The specific create, delete, or update instruction for the apply_patch tool + # call. + module Operation + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Responses::ResponseInputItem::ApplyPatchCall::Operation::CreateFile, + OpenAI::Responses::ResponseInputItem::ApplyPatchCall::Operation::DeleteFile, + OpenAI::Responses::ResponseInputItem::ApplyPatchCall::Operation::UpdateFile + ) + end + + class CreateFile < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseInputItem::ApplyPatchCall::Operation::CreateFile, + OpenAI::Internal::AnyHash + ) + end + + # Unified diff content to apply when creating the file. + sig { returns(String) } + attr_accessor :diff + + # Path of the file to create relative to the workspace root. + sig { returns(String) } + attr_accessor :path + + # The operation type. Always `create_file`. + sig { returns(Symbol) } + attr_accessor :type + + # Instruction for creating a new file via the apply_patch tool. + sig do + params(diff: String, path: String, type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # Unified diff content to apply when creating the file. + diff:, + # Path of the file to create relative to the workspace root. + path:, + # The operation type. Always `create_file`. + type: :create_file + ) + end + + sig do + override.returns({ diff: String, path: String, type: Symbol }) + end + def to_hash + end + end + + class DeleteFile < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseInputItem::ApplyPatchCall::Operation::DeleteFile, + OpenAI::Internal::AnyHash + ) + end + + # Path of the file to delete relative to the workspace root. + sig { returns(String) } + attr_accessor :path + + # The operation type. Always `delete_file`. + sig { returns(Symbol) } + attr_accessor :type + + # Instruction for deleting an existing file via the apply_patch tool. + sig do + params(path: String, type: Symbol).returns(T.attached_class) + end + def self.new( + # Path of the file to delete relative to the workspace root. + path:, + # The operation type. Always `delete_file`. + type: :delete_file + ) + end + + sig { override.returns({ path: String, type: Symbol }) } + def to_hash + end + end + + class UpdateFile < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseInputItem::ApplyPatchCall::Operation::UpdateFile, + OpenAI::Internal::AnyHash + ) + end + + # Unified diff content to apply to the existing file. + sig { returns(String) } + attr_accessor :diff + + # Path of the file to update relative to the workspace root. + sig { returns(String) } + attr_accessor :path + + # The operation type. Always `update_file`. + sig { returns(Symbol) } + attr_accessor :type + + # Instruction for updating an existing file via the apply_patch tool. + sig do + params(diff: String, path: String, type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # Unified diff content to apply to the existing file. + diff:, + # Path of the file to update relative to the workspace root. + path:, + # The operation type. Always `update_file`. + type: :update_file + ) + end + + sig do + override.returns({ diff: String, path: String, type: Symbol }) + end + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseInputItem::ApplyPatchCall::Operation::Variants + ] + ) + end + def self.variants + end + end + + # The status of the apply patch tool call. One of `in_progress` or `completed`. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::ResponseInputItem::ApplyPatchCall::Status + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::Responses::ResponseInputItem::ApplyPatchCall::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::Responses::ResponseInputItem::ApplyPatchCall::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseInputItem::ApplyPatchCall::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + class ApplyPatchCallOutput < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseInputItem::ApplyPatchCallOutput, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the apply patch tool call generated by the model. + sig { returns(String) } + attr_accessor :call_id + + # The status of the apply patch tool call output. One of `completed` or `failed`. + sig do + returns( + OpenAI::Responses::ResponseInputItem::ApplyPatchCallOutput::Status::OrSymbol + ) + end + attr_accessor :status + + # The type of the item. Always `apply_patch_call_output`. + sig { returns(Symbol) } + attr_accessor :type + + # The unique ID of the apply patch tool call output. Populated when this item is + # returned via API. + sig { returns(T.nilable(String)) } + attr_accessor :id + + # Optional human-readable log text from the apply patch tool (e.g., patch results + # or errors). + sig { returns(T.nilable(String)) } + attr_reader :output + + sig { params(output: String).void } + attr_writer :output + + # The streamed output emitted by an apply patch tool call. + sig do + params( + call_id: String, + status: + OpenAI::Responses::ResponseInputItem::ApplyPatchCallOutput::Status::OrSymbol, + id: T.nilable(String), + output: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the apply patch tool call generated by the model. + call_id:, + # The status of the apply patch tool call output. One of `completed` or `failed`. + status:, + # The unique ID of the apply patch tool call output. Populated when this item is + # returned via API. + id: nil, + # Optional human-readable log text from the apply patch tool (e.g., patch results + # or errors). + output: nil, + # The type of the item. Always `apply_patch_call_output`. + type: :apply_patch_call_output + ) + end + + sig do + override.returns( + { + call_id: String, + status: + OpenAI::Responses::ResponseInputItem::ApplyPatchCallOutput::Status::OrSymbol, + type: Symbol, + id: T.nilable(String), + output: String + } + ) + end + def to_hash + end + + # The status of the apply patch tool call output. One of `completed` or `failed`. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::ResponseInputItem::ApplyPatchCallOutput::Status + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + COMPLETED = + T.let( + :completed, + OpenAI::Responses::ResponseInputItem::ApplyPatchCallOutput::Status::TaggedSymbol + ) + FAILED = + T.let( + :failed, + OpenAI::Responses::ResponseInputItem::ApplyPatchCallOutput::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseInputItem::ApplyPatchCallOutput::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + end + class McpListTools < OpenAI::Internal::Type::BaseModel OrHash = T.type_alias do diff --git a/rbi/openai/models/responses/response_item.rbi b/rbi/openai/models/responses/response_item.rbi index 1bf6eb1a..cd97f04f 100644 --- a/rbi/openai/models/responses/response_item.rbi +++ b/rbi/openai/models/responses/response_item.rbi @@ -22,6 +22,10 @@ module OpenAI OpenAI::Responses::ResponseCodeInterpreterToolCall, OpenAI::Responses::ResponseItem::LocalShellCall, OpenAI::Responses::ResponseItem::LocalShellCallOutput, + OpenAI::Responses::ResponseFunctionShellToolCall, + OpenAI::Responses::ResponseFunctionShellToolCallOutput, + OpenAI::Responses::ResponseApplyPatchToolCall, + OpenAI::Responses::ResponseApplyPatchToolCallOutput, OpenAI::Responses::ResponseItem::McpListTools, OpenAI::Responses::ResponseItem::McpApprovalRequest, OpenAI::Responses::ResponseItem::McpApprovalResponse, diff --git a/rbi/openai/models/responses/response_item_list.rbi b/rbi/openai/models/responses/response_item_list.rbi index 4e645d9f..38d1a8aa 100644 --- a/rbi/openai/models/responses/response_item_list.rbi +++ b/rbi/openai/models/responses/response_item_list.rbi @@ -52,6 +52,10 @@ module OpenAI OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash, OpenAI::Responses::ResponseItem::LocalShellCall::OrHash, OpenAI::Responses::ResponseItem::LocalShellCallOutput::OrHash, + OpenAI::Responses::ResponseFunctionShellToolCall::OrHash, + OpenAI::Responses::ResponseFunctionShellToolCallOutput::OrHash, + OpenAI::Responses::ResponseApplyPatchToolCall::OrHash, + OpenAI::Responses::ResponseApplyPatchToolCallOutput::OrHash, OpenAI::Responses::ResponseItem::McpListTools::OrHash, OpenAI::Responses::ResponseItem::McpApprovalRequest::OrHash, OpenAI::Responses::ResponseItem::McpApprovalResponse::OrHash, diff --git a/rbi/openai/models/responses/response_output_item.rbi b/rbi/openai/models/responses/response_output_item.rbi index 8f527828..83620f7e 100644 --- a/rbi/openai/models/responses/response_output_item.rbi +++ b/rbi/openai/models/responses/response_output_item.rbi @@ -19,6 +19,10 @@ module OpenAI OpenAI::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Responses::ResponseCodeInterpreterToolCall, OpenAI::Responses::ResponseOutputItem::LocalShellCall, + OpenAI::Responses::ResponseFunctionShellToolCall, + OpenAI::Responses::ResponseFunctionShellToolCallOutput, + OpenAI::Responses::ResponseApplyPatchToolCall, + OpenAI::Responses::ResponseApplyPatchToolCallOutput, OpenAI::Responses::ResponseOutputItem::McpCall, OpenAI::Responses::ResponseOutputItem::McpListTools, OpenAI::Responses::ResponseOutputItem::McpApprovalRequest, diff --git a/rbi/openai/models/responses/response_output_item_added_event.rbi b/rbi/openai/models/responses/response_output_item_added_event.rbi index 6ac51356..a5201d6f 100644 --- a/rbi/openai/models/responses/response_output_item_added_event.rbi +++ b/rbi/openai/models/responses/response_output_item_added_event.rbi @@ -42,6 +42,10 @@ module OpenAI OpenAI::Responses::ResponseOutputItem::ImageGenerationCall::OrHash, OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash, OpenAI::Responses::ResponseOutputItem::LocalShellCall::OrHash, + OpenAI::Responses::ResponseFunctionShellToolCall::OrHash, + OpenAI::Responses::ResponseFunctionShellToolCallOutput::OrHash, + OpenAI::Responses::ResponseApplyPatchToolCall::OrHash, + OpenAI::Responses::ResponseApplyPatchToolCallOutput::OrHash, OpenAI::Responses::ResponseOutputItem::McpCall::OrHash, OpenAI::Responses::ResponseOutputItem::McpListTools::OrHash, OpenAI::Responses::ResponseOutputItem::McpApprovalRequest::OrHash, diff --git a/rbi/openai/models/responses/response_output_item_done_event.rbi b/rbi/openai/models/responses/response_output_item_done_event.rbi index ba0ecc6d..2acace84 100644 --- a/rbi/openai/models/responses/response_output_item_done_event.rbi +++ b/rbi/openai/models/responses/response_output_item_done_event.rbi @@ -42,6 +42,10 @@ module OpenAI OpenAI::Responses::ResponseOutputItem::ImageGenerationCall::OrHash, OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash, OpenAI::Responses::ResponseOutputItem::LocalShellCall::OrHash, + OpenAI::Responses::ResponseFunctionShellToolCall::OrHash, + OpenAI::Responses::ResponseFunctionShellToolCallOutput::OrHash, + OpenAI::Responses::ResponseApplyPatchToolCall::OrHash, + OpenAI::Responses::ResponseApplyPatchToolCallOutput::OrHash, OpenAI::Responses::ResponseOutputItem::McpCall::OrHash, OpenAI::Responses::ResponseOutputItem::McpListTools::OrHash, OpenAI::Responses::ResponseOutputItem::McpApprovalRequest::OrHash, diff --git a/rbi/openai/models/responses/tool.rbi b/rbi/openai/models/responses/tool.rbi index 2de15c79..2c3f8589 100644 --- a/rbi/openai/models/responses/tool.rbi +++ b/rbi/openai/models/responses/tool.rbi @@ -17,7 +17,9 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter, OpenAI::Responses::Tool::ImageGeneration, OpenAI::Responses::Tool::LocalShell, + OpenAI::Responses::FunctionShellTool, OpenAI::Responses::CustomTool, + OpenAI::Responses::ApplyPatchTool, OpenAI::Responses::WebSearchTool, OpenAI::Responses::WebSearchPreviewTool ) diff --git a/rbi/openai/models/responses/tool_choice_apply_patch.rbi b/rbi/openai/models/responses/tool_choice_apply_patch.rbi new file mode 100644 index 00000000..e12a53c2 --- /dev/null +++ b/rbi/openai/models/responses/tool_choice_apply_patch.rbi @@ -0,0 +1,33 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ToolChoiceApplyPatch < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ToolChoiceApplyPatch, + OpenAI::Internal::AnyHash + ) + end + + # The tool to call. Always `apply_patch`. + sig { returns(Symbol) } + attr_accessor :type + + # Forces the model to call the apply_patch tool when executing a tool call. + sig { params(type: Symbol).returns(T.attached_class) } + def self.new( + # The tool to call. Always `apply_patch`. + type: :apply_patch + ) + end + + sig { override.returns({ type: Symbol }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/tool_choice_shell.rbi b/rbi/openai/models/responses/tool_choice_shell.rbi new file mode 100644 index 00000000..e6c43d48 --- /dev/null +++ b/rbi/openai/models/responses/tool_choice_shell.rbi @@ -0,0 +1,30 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ToolChoiceShell < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Responses::ToolChoiceShell, OpenAI::Internal::AnyHash) + end + + # The tool to call. Always `shell`. + sig { returns(Symbol) } + attr_accessor :type + + # Forces the model to call the function shell tool when a tool call is required. + sig { params(type: Symbol).returns(T.attached_class) } + def self.new( + # The tool to call. Always `shell`. + type: :shell + ) + end + + sig { override.returns({ type: Symbol }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/resources/batches.rbi b/rbi/openai/resources/batches.rbi index 26543c2d..1f1cc217 100644 --- a/rbi/openai/resources/batches.rbi +++ b/rbi/openai/resources/batches.rbi @@ -21,9 +21,10 @@ module OpenAI # is supported. completion_window:, # The endpoint to be used for all requests in the batch. Currently - # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` - # are supported. Note that `/v1/embeddings` batches are also restricted to a - # maximum of 50,000 embedding inputs across all requests in the batch. + # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, `/v1/completions`, + # and `/v1/moderations` are supported. Note that `/v1/embeddings` batches are also + # restricted to a maximum of 50,000 embedding inputs across all requests in the + # batch. endpoint:, # The ID of an uploaded file that contains requests for the new batch. # diff --git a/rbi/openai/resources/beta/assistants.rbi b/rbi/openai/resources/beta/assistants.rbi index fb286a85..6c515268 100644 --- a/rbi/openai/resources/beta/assistants.rbi +++ b/rbi/openai/resources/beta/assistants.rbi @@ -62,12 +62,16 @@ module OpenAI name: nil, # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. reasoning_effort: nil, # Specifies the format that the model must output. Compatible with # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -192,12 +196,16 @@ module OpenAI name: nil, # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. reasoning_effort: nil, # Specifies the format that the model must output. Compatible with # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), diff --git a/rbi/openai/resources/beta/threads/runs.rbi b/rbi/openai/resources/beta/threads/runs.rbi index 06008148..5756641a 100644 --- a/rbi/openai/resources/beta/threads/runs.rbi +++ b/rbi/openai/resources/beta/threads/runs.rbi @@ -125,12 +125,16 @@ module OpenAI parallel_tool_calls: nil, # Body param: Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. reasoning_effort: nil, # Body param: Specifies the format that the model must output. Compatible with # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -307,12 +311,16 @@ module OpenAI parallel_tool_calls: nil, # Body param: Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. reasoning_effort: nil, # Body param: Specifies the format that the model must output. Compatible with # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), diff --git a/rbi/openai/resources/chat/completions.rbi b/rbi/openai/resources/chat/completions.rbi index 54da2ae3..3362d30f 100644 --- a/rbi/openai/resources/chat/completions.rbi +++ b/rbi/openai/resources/chat/completions.rbi @@ -66,6 +66,10 @@ module OpenAI T.nilable(OpenAI::Chat::ChatCompletionPredictionContent::OrHash), presence_penalty: T.nilable(Float), prompt_cache_key: String, + prompt_cache_retention: + T.nilable( + OpenAI::Chat::CompletionCreateParams::PromptCacheRetention::OrSymbol + ), reasoning_effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol), response_format: T.any( @@ -217,14 +221,23 @@ module OpenAI # hit rates. Replaces the `user` field. # [Learn more](https://platform.openai.com/docs/guides/prompt-caching). prompt_cache_key: nil, + # The retention policy for the prompt cache. Set to `24h` to enable extended + # prompt caching, which keeps cached prefixes active for longer, up to a maximum + # of 24 hours. + # [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + prompt_cache_retention: nil, # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. - # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. + # + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. reasoning_effort: nil, # An object specifying the format that the model must output. # @@ -388,6 +401,10 @@ module OpenAI T.nilable(OpenAI::Chat::ChatCompletionPredictionContent::OrHash), presence_penalty: T.nilable(Float), prompt_cache_key: String, + prompt_cache_retention: + T.nilable( + OpenAI::Chat::CompletionCreateParams::PromptCacheRetention::OrSymbol + ), reasoning_effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol), response_format: T.any( @@ -537,14 +554,23 @@ module OpenAI # hit rates. Replaces the `user` field. # [Learn more](https://platform.openai.com/docs/guides/prompt-caching). prompt_cache_key: nil, + # The retention policy for the prompt cache. Set to `24h` to enable extended + # prompt caching, which keeps cached prefixes active for longer, up to a maximum + # of 24 hours. + # [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + prompt_cache_retention: nil, # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - # effort can result in faster responses and fewer tokens used on reasoning in a - # response. - # - # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - # effort. + # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing + # reasoning effort can result in faster responses and fewer tokens used on + # reasoning in a response. + # + # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + # calls are supported for all reasoning values in gpt-5.1. + # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + # support `none`. + # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. reasoning_effort: nil, # An object specifying the format that the model must output. # diff --git a/rbi/openai/resources/conversations.rbi b/rbi/openai/resources/conversations.rbi index 86ab393c..9cca87a4 100644 --- a/rbi/openai/resources/conversations.rbi +++ b/rbi/openai/resources/conversations.rbi @@ -27,6 +27,10 @@ module OpenAI OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash, OpenAI::Responses::ResponseInputItem::LocalShellCall::OrHash, OpenAI::Responses::ResponseInputItem::LocalShellCallOutput::OrHash, + OpenAI::Responses::ResponseInputItem::ShellCall::OrHash, + OpenAI::Responses::ResponseInputItem::ShellCallOutput::OrHash, + OpenAI::Responses::ResponseInputItem::ApplyPatchCall::OrHash, + OpenAI::Responses::ResponseInputItem::ApplyPatchCallOutput::OrHash, OpenAI::Responses::ResponseInputItem::McpListTools::OrHash, OpenAI::Responses::ResponseInputItem::McpApprovalRequest::OrHash, OpenAI::Responses::ResponseInputItem::McpApprovalResponse::OrHash, diff --git a/rbi/openai/resources/conversations/items.rbi b/rbi/openai/resources/conversations/items.rbi index d01e15a9..0c570493 100644 --- a/rbi/openai/resources/conversations/items.rbi +++ b/rbi/openai/resources/conversations/items.rbi @@ -25,6 +25,10 @@ module OpenAI OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash, OpenAI::Responses::ResponseInputItem::LocalShellCall::OrHash, OpenAI::Responses::ResponseInputItem::LocalShellCallOutput::OrHash, + OpenAI::Responses::ResponseInputItem::ShellCall::OrHash, + OpenAI::Responses::ResponseInputItem::ShellCallOutput::OrHash, + OpenAI::Responses::ResponseInputItem::ApplyPatchCall::OrHash, + OpenAI::Responses::ResponseInputItem::ApplyPatchCallOutput::OrHash, OpenAI::Responses::ResponseInputItem::McpListTools::OrHash, OpenAI::Responses::ResponseInputItem::McpApprovalRequest::OrHash, OpenAI::Responses::ResponseInputItem::McpApprovalResponse::OrHash, diff --git a/rbi/openai/resources/responses.rbi b/rbi/openai/resources/responses.rbi index 17d1a95e..1bd7eeeb 100644 --- a/rbi/openai/resources/responses.rbi +++ b/rbi/openai/resources/responses.rbi @@ -51,6 +51,10 @@ module OpenAI previous_response_id: T.nilable(String), prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash), prompt_cache_key: String, + prompt_cache_retention: + T.nilable( + OpenAI::Responses::ResponseCreateParams::PromptCacheRetention::OrSymbol + ), reasoning: T.nilable(OpenAI::Reasoning::OrHash), safety_identifier: String, service_tier: @@ -75,7 +79,9 @@ module OpenAI OpenAI::Responses::ToolChoiceTypes::OrHash, OpenAI::Responses::ToolChoiceFunction::OrHash, OpenAI::Responses::ToolChoiceMcp::OrHash, - OpenAI::Responses::ToolChoiceCustom::OrHash + OpenAI::Responses::ToolChoiceCustom::OrHash, + OpenAI::Responses::ToolChoiceApplyPatch::OrHash, + OpenAI::Responses::ToolChoiceShell::OrHash ), tools: T::Array[ @@ -87,7 +93,9 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter::OrHash, OpenAI::Responses::Tool::ImageGeneration::OrHash, OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::FunctionShellTool::OrHash, OpenAI::Responses::CustomTool::OrHash, + OpenAI::Responses::ApplyPatchTool::OrHash, OpenAI::Responses::WebSearchTool::OrHash, OpenAI::Responses::WebSearchPreviewTool::OrHash ) @@ -183,6 +191,11 @@ module OpenAI # hit rates. Replaces the `user` field. # [Learn more](https://platform.openai.com/docs/guides/prompt-caching). prompt_cache_key: nil, + # The retention policy for the prompt cache. Set to `24h` to enable extended + # prompt caching, which keeps cached prefixes active for longer, up to a maximum + # of 24 hours. + # [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + prompt_cache_retention: nil, # **gpt-5 and o-series models only** # # Configuration options for @@ -322,6 +335,10 @@ module OpenAI previous_response_id: T.nilable(String), prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash), prompt_cache_key: String, + prompt_cache_retention: + T.nilable( + OpenAI::Responses::ResponseCreateParams::PromptCacheRetention::OrSymbol + ), reasoning: T.nilable(OpenAI::Reasoning::OrHash), safety_identifier: String, service_tier: @@ -348,7 +365,9 @@ module OpenAI OpenAI::Responses::ToolChoiceTypes::OrHash, OpenAI::Responses::ToolChoiceFunction::OrHash, OpenAI::Responses::ToolChoiceMcp::OrHash, - OpenAI::Responses::ToolChoiceCustom::OrHash + OpenAI::Responses::ToolChoiceCustom::OrHash, + OpenAI::Responses::ToolChoiceApplyPatch::OrHash, + OpenAI::Responses::ToolChoiceShell::OrHash ), tools: T::Array[ @@ -360,7 +379,9 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter::OrHash, OpenAI::Responses::Tool::ImageGeneration::OrHash, OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::FunctionShellTool::OrHash, OpenAI::Responses::CustomTool::OrHash, + OpenAI::Responses::ApplyPatchTool::OrHash, OpenAI::Responses::WebSearchTool::OrHash, OpenAI::Responses::WebSearchPreviewTool::OrHash ) @@ -460,6 +481,11 @@ module OpenAI # hit rates. Replaces the `user` field. # [Learn more](https://platform.openai.com/docs/guides/prompt-caching). prompt_cache_key: nil, + # The retention policy for the prompt cache. Set to `24h` to enable extended + # prompt caching, which keeps cached prefixes active for longer, up to a maximum + # of 24 hours. + # [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + prompt_cache_retention: nil, # **gpt-5 and o-series models only** # # Configuration options for diff --git a/rbi/openai/resources/responses/input_tokens.rbi b/rbi/openai/resources/responses/input_tokens.rbi index badc9068..49e1fafa 100644 --- a/rbi/openai/resources/responses/input_tokens.rbi +++ b/rbi/openai/resources/responses/input_tokens.rbi @@ -33,7 +33,9 @@ module OpenAI OpenAI::Responses::ToolChoiceTypes::OrHash, OpenAI::Responses::ToolChoiceFunction::OrHash, OpenAI::Responses::ToolChoiceMcp::OrHash, - OpenAI::Responses::ToolChoiceCustom::OrHash + OpenAI::Responses::ToolChoiceCustom::OrHash, + OpenAI::Responses::ToolChoiceApplyPatch::OrHash, + OpenAI::Responses::ToolChoiceShell::OrHash ) ), tools: @@ -47,7 +49,9 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter::OrHash, OpenAI::Responses::Tool::ImageGeneration::OrHash, OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::FunctionShellTool::OrHash, OpenAI::Responses::CustomTool::OrHash, + OpenAI::Responses::ApplyPatchTool::OrHash, OpenAI::Responses::WebSearchTool::OrHash, OpenAI::Responses::WebSearchPreviewTool::OrHash ) diff --git a/sig/openai/models/batch_create_params.rbs b/sig/openai/models/batch_create_params.rbs index 3b5ff7a8..b04f8dae 100644 --- a/sig/openai/models/batch_create_params.rbs +++ b/sig/openai/models/batch_create_params.rbs @@ -61,6 +61,7 @@ module OpenAI | :"/v1/chat/completions" | :"/v1/embeddings" | :"/v1/completions" + | :"/v1/moderations" module Endpoint extend OpenAI::Internal::Type::Enum @@ -69,6 +70,7 @@ module OpenAI V1_CHAT_COMPLETIONS: :"/v1/chat/completions" V1_EMBEDDINGS: :"/v1/embeddings" V1_COMPLETIONS: :"/v1/completions" + V1_MODERATIONS: :"/v1/moderations" def self?.values: -> ::Array[OpenAI::Models::BatchCreateParams::endpoint] end diff --git a/sig/openai/models/chat/completion_create_params.rbs b/sig/openai/models/chat/completion_create_params.rbs index e02095c4..a4a10068 100644 --- a/sig/openai/models/chat/completion_create_params.rbs +++ b/sig/openai/models/chat/completion_create_params.rbs @@ -20,6 +20,7 @@ module OpenAI prediction: OpenAI::Chat::ChatCompletionPredictionContent?, presence_penalty: Float?, prompt_cache_key: String, + prompt_cache_retention: OpenAI::Models::Chat::CompletionCreateParams::prompt_cache_retention?, reasoning_effort: OpenAI::Models::reasoning_effort?, response_format: OpenAI::Models::Chat::CompletionCreateParams::response_format, safety_identifier: String, @@ -89,6 +90,8 @@ module OpenAI def prompt_cache_key=: (String) -> String + attr_accessor prompt_cache_retention: OpenAI::Models::Chat::CompletionCreateParams::prompt_cache_retention? + attr_accessor reasoning_effort: OpenAI::Models::reasoning_effort? attr_reader response_format: OpenAI::Models::Chat::CompletionCreateParams::response_format? @@ -159,6 +162,7 @@ module OpenAI ?prediction: OpenAI::Chat::ChatCompletionPredictionContent?, ?presence_penalty: Float?, ?prompt_cache_key: String, + ?prompt_cache_retention: OpenAI::Models::Chat::CompletionCreateParams::prompt_cache_retention?, ?reasoning_effort: OpenAI::Models::reasoning_effort?, ?response_format: OpenAI::Models::Chat::CompletionCreateParams::response_format, ?safety_identifier: String, @@ -196,6 +200,7 @@ module OpenAI prediction: OpenAI::Chat::ChatCompletionPredictionContent?, presence_penalty: Float?, prompt_cache_key: String, + prompt_cache_retention: OpenAI::Models::Chat::CompletionCreateParams::prompt_cache_retention?, reasoning_effort: OpenAI::Models::reasoning_effort?, response_format: OpenAI::Models::Chat::CompletionCreateParams::response_format, safety_identifier: String, @@ -288,6 +293,17 @@ module OpenAI def self?.values: -> ::Array[OpenAI::Models::Chat::CompletionCreateParams::modality] end + type prompt_cache_retention = :"in-memory" | :"24h" + + module PromptCacheRetention + extend OpenAI::Internal::Type::Enum + + IN_MEMORY: :"in-memory" + PROMPT_CACHE_RETENTION_24H: :"24h" + + def self?.values: -> ::Array[OpenAI::Models::Chat::CompletionCreateParams::prompt_cache_retention] + end + type response_format = OpenAI::ResponseFormatText | OpenAI::ResponseFormatJSONSchema diff --git a/sig/openai/models/chat_model.rbs b/sig/openai/models/chat_model.rbs index afba9c16..4fd9ddab 100644 --- a/sig/openai/models/chat_model.rbs +++ b/sig/openai/models/chat_model.rbs @@ -1,7 +1,12 @@ module OpenAI module Models type chat_model = - :"gpt-5" + :"gpt-5.1" + | :"gpt-5.1-2025-11-13" + | :"gpt-5.1-codex" + | :"gpt-5.1-mini" + | :"gpt-5.1-chat-latest" + | :"gpt-5" | :"gpt-5-mini" | :"gpt-5-nano" | :"gpt-5-2025-08-07" @@ -67,6 +72,11 @@ module OpenAI module ChatModel extend OpenAI::Internal::Type::Enum + GPT_5_1: :"gpt-5.1" + GPT_5_1_2025_11_13: :"gpt-5.1-2025-11-13" + GPT_5_1_CODEX: :"gpt-5.1-codex" + GPT_5_1_MINI: :"gpt-5.1-mini" + GPT_5_1_CHAT_LATEST: :"gpt-5.1-chat-latest" GPT_5: :"gpt-5" GPT_5_MINI: :"gpt-5-mini" GPT_5_NANO: :"gpt-5-nano" diff --git a/sig/openai/models/conversations/conversation_item.rbs b/sig/openai/models/conversations/conversation_item.rbs index fbdce4fd..0f3eae46 100644 --- a/sig/openai/models/conversations/conversation_item.rbs +++ b/sig/openai/models/conversations/conversation_item.rbs @@ -16,6 +16,10 @@ module OpenAI | OpenAI::Responses::ResponseCodeInterpreterToolCall | OpenAI::Conversations::ConversationItem::LocalShellCall | OpenAI::Conversations::ConversationItem::LocalShellCallOutput + | OpenAI::Responses::ResponseFunctionShellToolCall + | OpenAI::Responses::ResponseFunctionShellToolCallOutput + | OpenAI::Responses::ResponseApplyPatchToolCall + | OpenAI::Responses::ResponseApplyPatchToolCallOutput | OpenAI::Conversations::ConversationItem::McpListTools | OpenAI::Conversations::ConversationItem::McpApprovalRequest | OpenAI::Conversations::ConversationItem::McpApprovalResponse diff --git a/sig/openai/models/reasoning_effort.rbs b/sig/openai/models/reasoning_effort.rbs index 2245e639..dd355847 100644 --- a/sig/openai/models/reasoning_effort.rbs +++ b/sig/openai/models/reasoning_effort.rbs @@ -1,10 +1,11 @@ module OpenAI module Models - type reasoning_effort = :minimal | :low | :medium | :high + type reasoning_effort = :none | :minimal | :low | :medium | :high module ReasoningEffort extend OpenAI::Internal::Type::Enum + NONE: :none MINIMAL: :minimal LOW: :low MEDIUM: :medium diff --git a/sig/openai/models/responses/apply_patch_tool.rbs b/sig/openai/models/responses/apply_patch_tool.rbs new file mode 100644 index 00000000..9cf81da3 --- /dev/null +++ b/sig/openai/models/responses/apply_patch_tool.rbs @@ -0,0 +1,15 @@ +module OpenAI + module Models + module Responses + type apply_patch_tool = { type: :apply_patch } + + class ApplyPatchTool < OpenAI::Internal::Type::BaseModel + attr_accessor type: :apply_patch + + def initialize: (?type: :apply_patch) -> void + + def to_hash: -> { type: :apply_patch } + end + end + end +end diff --git a/sig/openai/models/responses/function_shell_tool.rbs b/sig/openai/models/responses/function_shell_tool.rbs new file mode 100644 index 00000000..a4b258a4 --- /dev/null +++ b/sig/openai/models/responses/function_shell_tool.rbs @@ -0,0 +1,15 @@ +module OpenAI + module Models + module Responses + type function_shell_tool = { type: :shell } + + class FunctionShellTool < OpenAI::Internal::Type::BaseModel + attr_accessor type: :shell + + def initialize: (?type: :shell) -> void + + def to_hash: -> { type: :shell } + end + end + end +end diff --git a/sig/openai/models/responses/input_token_count_params.rbs b/sig/openai/models/responses/input_token_count_params.rbs index 88157c03..c5c5ea7d 100644 --- a/sig/openai/models/responses/input_token_count_params.rbs +++ b/sig/openai/models/responses/input_token_count_params.rbs @@ -142,6 +142,8 @@ module OpenAI | OpenAI::Responses::ToolChoiceFunction | OpenAI::Responses::ToolChoiceMcp | OpenAI::Responses::ToolChoiceCustom + | OpenAI::Responses::ToolChoiceApplyPatch + | OpenAI::Responses::ToolChoiceShell module ToolChoice extend OpenAI::Internal::Type::Union diff --git a/sig/openai/models/responses/response.rbs b/sig/openai/models/responses/response.rbs index 41dbcf8d..80480900 100644 --- a/sig/openai/models/responses/response.rbs +++ b/sig/openai/models/responses/response.rbs @@ -24,6 +24,7 @@ module OpenAI previous_response_id: String?, prompt: OpenAI::Responses::ResponsePrompt?, prompt_cache_key: String, + prompt_cache_retention: OpenAI::Models::Responses::Response::prompt_cache_retention?, reasoning: OpenAI::Reasoning?, safety_identifier: String, service_tier: OpenAI::Models::Responses::Response::service_tier?, @@ -80,6 +81,8 @@ module OpenAI def prompt_cache_key=: (String) -> String + attr_accessor prompt_cache_retention: OpenAI::Models::Responses::Response::prompt_cache_retention? + attr_accessor reasoning: OpenAI::Reasoning? attr_reader safety_identifier: String? @@ -135,6 +138,7 @@ module OpenAI ?previous_response_id: String?, ?prompt: OpenAI::Responses::ResponsePrompt?, ?prompt_cache_key: String, + ?prompt_cache_retention: OpenAI::Models::Responses::Response::prompt_cache_retention?, ?reasoning: OpenAI::Reasoning?, ?safety_identifier: String, ?service_tier: OpenAI::Models::Responses::Response::service_tier?, @@ -169,6 +173,7 @@ module OpenAI previous_response_id: String?, prompt: OpenAI::Responses::ResponsePrompt?, prompt_cache_key: String, + prompt_cache_retention: OpenAI::Models::Responses::Response::prompt_cache_retention?, reasoning: OpenAI::Reasoning?, safety_identifier: String, service_tier: OpenAI::Models::Responses::Response::service_tier?, @@ -230,6 +235,8 @@ module OpenAI | OpenAI::Responses::ToolChoiceFunction | OpenAI::Responses::ToolChoiceMcp | OpenAI::Responses::ToolChoiceCustom + | OpenAI::Responses::ToolChoiceApplyPatch + | OpenAI::Responses::ToolChoiceShell module ToolChoice extend OpenAI::Internal::Type::Union @@ -247,6 +254,17 @@ module OpenAI def to_hash: -> { id: String } end + type prompt_cache_retention = :"in-memory" | :"24h" + + module PromptCacheRetention + extend OpenAI::Internal::Type::Enum + + IN_MEMORY: :"in-memory" + PROMPT_CACHE_RETENTION_24H: :"24h" + + def self?.values: -> ::Array[OpenAI::Models::Responses::Response::prompt_cache_retention] + end + type service_tier = :auto | :default | :flex | :scale | :priority module ServiceTier diff --git a/sig/openai/models/responses/response_apply_patch_tool_call.rbs b/sig/openai/models/responses/response_apply_patch_tool_call.rbs new file mode 100644 index 00000000..37238f0c --- /dev/null +++ b/sig/openai/models/responses/response_apply_patch_tool_call.rbs @@ -0,0 +1,123 @@ +module OpenAI + module Models + module Responses + type response_apply_patch_tool_call = + { + id: String, + call_id: String, + status: OpenAI::Models::Responses::ResponseApplyPatchToolCall::status, + type: :apply_patch_call, + created_by: String, + operation: OpenAI::Models::Responses::ResponseApplyPatchToolCall::operation + } + + class ResponseApplyPatchToolCall < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor call_id: String + + attr_accessor status: OpenAI::Models::Responses::ResponseApplyPatchToolCall::status + + attr_accessor type: :apply_patch_call + + attr_reader created_by: String? + + def created_by=: (String) -> String + + attr_reader operation: OpenAI::Models::Responses::ResponseApplyPatchToolCall::operation? + + def operation=: ( + OpenAI::Models::Responses::ResponseApplyPatchToolCall::operation + ) -> OpenAI::Models::Responses::ResponseApplyPatchToolCall::operation + + def initialize: ( + id: String, + call_id: String, + status: OpenAI::Models::Responses::ResponseApplyPatchToolCall::status, + ?created_by: String, + ?operation: OpenAI::Models::Responses::ResponseApplyPatchToolCall::operation, + ?type: :apply_patch_call + ) -> void + + def to_hash: -> { + id: String, + call_id: String, + status: OpenAI::Models::Responses::ResponseApplyPatchToolCall::status, + type: :apply_patch_call, + created_by: String, + operation: OpenAI::Models::Responses::ResponseApplyPatchToolCall::operation + } + + type status = :in_progress | :completed + + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS: :in_progress + COMPLETED: :completed + + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseApplyPatchToolCall::status] + end + + type operation = + OpenAI::Responses::ResponseApplyPatchToolCall::Operation::CreateFile + | OpenAI::Responses::ResponseApplyPatchToolCall::Operation::DeleteFile + | OpenAI::Responses::ResponseApplyPatchToolCall::Operation::UpdateFile + + module Operation + extend OpenAI::Internal::Type::Union + + type create_file = { diff: String, path: String, type: :create_file } + + class CreateFile < OpenAI::Internal::Type::BaseModel + attr_accessor diff: String + + attr_accessor path: String + + attr_accessor type: :create_file + + def initialize: ( + diff: String, + path: String, + ?type: :create_file + ) -> void + + def to_hash: -> { diff: String, path: String, type: :create_file } + end + + type delete_file = { path: String, type: :delete_file } + + class DeleteFile < OpenAI::Internal::Type::BaseModel + attr_accessor path: String + + attr_accessor type: :delete_file + + def initialize: (path: String, ?type: :delete_file) -> void + + def to_hash: -> { path: String, type: :delete_file } + end + + type update_file = { diff: String, path: String, type: :update_file } + + class UpdateFile < OpenAI::Internal::Type::BaseModel + attr_accessor diff: String + + attr_accessor path: String + + attr_accessor type: :update_file + + def initialize: ( + diff: String, + path: String, + ?type: :update_file + ) -> void + + def to_hash: -> { diff: String, path: String, type: :update_file } + end + + def self?.variants: -> ::Array[OpenAI::Models::Responses::ResponseApplyPatchToolCall::operation] + end + end + end + end +end diff --git a/sig/openai/models/responses/response_apply_patch_tool_call_output.rbs b/sig/openai/models/responses/response_apply_patch_tool_call_output.rbs new file mode 100644 index 00000000..45908b33 --- /dev/null +++ b/sig/openai/models/responses/response_apply_patch_tool_call_output.rbs @@ -0,0 +1,60 @@ +module OpenAI + module Models + module Responses + type response_apply_patch_tool_call_output = + { + id: String, + call_id: String, + output: String?, + status: OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput::status, + type: :apply_patch_call_output, + created_by: String + } + + class ResponseApplyPatchToolCallOutput < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor call_id: String + + attr_accessor output: String? + + attr_accessor status: OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput::status + + attr_accessor type: :apply_patch_call_output + + attr_reader created_by: String? + + def created_by=: (String) -> String + + def initialize: ( + id: String, + call_id: String, + output: String?, + status: OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput::status, + ?created_by: String, + ?type: :apply_patch_call_output + ) -> void + + def to_hash: -> { + id: String, + call_id: String, + output: String?, + status: OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput::status, + type: :apply_patch_call_output, + created_by: String + } + + type status = :completed | :failed + + module Status + extend OpenAI::Internal::Type::Enum + + COMPLETED: :completed + FAILED: :failed + + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput::status] + end + end + end + end +end diff --git a/sig/openai/models/responses/response_create_params.rbs b/sig/openai/models/responses/response_create_params.rbs index 4a636dd1..127ed52a 100644 --- a/sig/openai/models/responses/response_create_params.rbs +++ b/sig/openai/models/responses/response_create_params.rbs @@ -16,6 +16,7 @@ module OpenAI previous_response_id: String?, prompt: OpenAI::Responses::ResponsePrompt?, prompt_cache_key: String, + prompt_cache_retention: OpenAI::Models::Responses::ResponseCreateParams::prompt_cache_retention?, reasoning: OpenAI::Reasoning?, safety_identifier: String, service_tier: OpenAI::Models::Responses::ResponseCreateParams::service_tier?, @@ -72,6 +73,8 @@ module OpenAI def prompt_cache_key=: (String) -> String + attr_accessor prompt_cache_retention: OpenAI::Models::Responses::ResponseCreateParams::prompt_cache_retention? + attr_accessor reasoning: OpenAI::Reasoning? attr_reader safety_identifier: String? @@ -128,6 +131,7 @@ module OpenAI ?previous_response_id: String?, ?prompt: OpenAI::Responses::ResponsePrompt?, ?prompt_cache_key: String, + ?prompt_cache_retention: OpenAI::Models::Responses::ResponseCreateParams::prompt_cache_retention?, ?reasoning: OpenAI::Reasoning?, ?safety_identifier: String, ?service_tier: OpenAI::Models::Responses::ResponseCreateParams::service_tier?, @@ -158,6 +162,7 @@ module OpenAI previous_response_id: String?, prompt: OpenAI::Responses::ResponsePrompt?, prompt_cache_key: String, + prompt_cache_retention: OpenAI::Models::Responses::ResponseCreateParams::prompt_cache_retention?, reasoning: OpenAI::Reasoning?, safety_identifier: String, service_tier: OpenAI::Models::Responses::ResponseCreateParams::service_tier?, @@ -191,6 +196,17 @@ module OpenAI def self?.variants: -> ::Array[OpenAI::Models::Responses::ResponseCreateParams::input] end + type prompt_cache_retention = :"in-memory" | :"24h" + + module PromptCacheRetention + extend OpenAI::Internal::Type::Enum + + IN_MEMORY: :"in-memory" + PROMPT_CACHE_RETENTION_24H: :"24h" + + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseCreateParams::prompt_cache_retention] + end + type service_tier = :auto | :default | :flex | :scale | :priority module ServiceTier @@ -224,6 +240,8 @@ module OpenAI | OpenAI::Responses::ToolChoiceFunction | OpenAI::Responses::ToolChoiceMcp | OpenAI::Responses::ToolChoiceCustom + | OpenAI::Responses::ToolChoiceApplyPatch + | OpenAI::Responses::ToolChoiceShell module ToolChoice extend OpenAI::Internal::Type::Union diff --git a/sig/openai/models/responses/response_function_shell_call_output_content.rbs b/sig/openai/models/responses/response_function_shell_call_output_content.rbs new file mode 100644 index 00000000..5339869a --- /dev/null +++ b/sig/openai/models/responses/response_function_shell_call_output_content.rbs @@ -0,0 +1,64 @@ +module OpenAI + module Models + module Responses + type response_function_shell_call_output_content = + { + outcome: OpenAI::Models::Responses::ResponseFunctionShellCallOutputContent::outcome, + stderr: String, + stdout: String + } + + class ResponseFunctionShellCallOutputContent < OpenAI::Internal::Type::BaseModel + attr_accessor outcome: OpenAI::Models::Responses::ResponseFunctionShellCallOutputContent::outcome + + attr_accessor stderr: String + + attr_accessor stdout: String + + def initialize: ( + outcome: OpenAI::Models::Responses::ResponseFunctionShellCallOutputContent::outcome, + stderr: String, + stdout: String + ) -> void + + def to_hash: -> { + outcome: OpenAI::Models::Responses::ResponseFunctionShellCallOutputContent::outcome, + stderr: String, + stdout: String + } + + type outcome = + OpenAI::Responses::ResponseFunctionShellCallOutputContent::Outcome::Timeout + | OpenAI::Responses::ResponseFunctionShellCallOutputContent::Outcome::Exit + + module Outcome + extend OpenAI::Internal::Type::Union + + type timeout = { type: :timeout } + + class Timeout < OpenAI::Internal::Type::BaseModel + attr_accessor type: :timeout + + def initialize: (?type: :timeout) -> void + + def to_hash: -> { type: :timeout } + end + + type exit_ = { exit_code: Integer, type: :exit } + + class Exit < OpenAI::Internal::Type::BaseModel + attr_accessor exit_code: Integer + + attr_accessor type: :exit + + def initialize: (exit_code: Integer, ?type: :exit) -> void + + def to_hash: -> { exit_code: Integer, type: :exit } + end + + def self?.variants: -> ::Array[OpenAI::Models::Responses::ResponseFunctionShellCallOutputContent::outcome] + end + end + end + end +end diff --git a/sig/openai/models/responses/response_function_shell_tool_call.rbs b/sig/openai/models/responses/response_function_shell_tool_call.rbs new file mode 100644 index 00000000..b2cc6496 --- /dev/null +++ b/sig/openai/models/responses/response_function_shell_tool_call.rbs @@ -0,0 +1,88 @@ +module OpenAI + module Models + module Responses + type response_function_shell_tool_call = + { + id: String, + action: OpenAI::Responses::ResponseFunctionShellToolCall::Action, + call_id: String, + status: OpenAI::Models::Responses::ResponseFunctionShellToolCall::status, + type: :shell_call, + created_by: String + } + + class ResponseFunctionShellToolCall < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor action: OpenAI::Responses::ResponseFunctionShellToolCall::Action + + attr_accessor call_id: String + + attr_accessor status: OpenAI::Models::Responses::ResponseFunctionShellToolCall::status + + attr_accessor type: :shell_call + + attr_reader created_by: String? + + def created_by=: (String) -> String + + def initialize: ( + id: String, + action: OpenAI::Responses::ResponseFunctionShellToolCall::Action, + call_id: String, + status: OpenAI::Models::Responses::ResponseFunctionShellToolCall::status, + ?created_by: String, + ?type: :shell_call + ) -> void + + def to_hash: -> { + id: String, + action: OpenAI::Responses::ResponseFunctionShellToolCall::Action, + call_id: String, + status: OpenAI::Models::Responses::ResponseFunctionShellToolCall::status, + type: :shell_call, + created_by: String + } + + type action = + { + commands: ::Array[String], + max_output_length: Integer?, + timeout_ms: Integer? + } + + class Action < OpenAI::Internal::Type::BaseModel + attr_accessor commands: ::Array[String] + + attr_accessor max_output_length: Integer? + + attr_accessor timeout_ms: Integer? + + def initialize: ( + commands: ::Array[String], + max_output_length: Integer?, + timeout_ms: Integer? + ) -> void + + def to_hash: -> { + commands: ::Array[String], + max_output_length: Integer?, + timeout_ms: Integer? + } + end + + type status = :in_progress | :completed | :incomplete + + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS: :in_progress + COMPLETED: :completed + INCOMPLETE: :incomplete + + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseFunctionShellToolCall::status] + end + end + end + end +end diff --git a/sig/openai/models/responses/response_function_shell_tool_call_output.rbs b/sig/openai/models/responses/response_function_shell_tool_call_output.rbs new file mode 100644 index 00000000..b63759ae --- /dev/null +++ b/sig/openai/models/responses/response_function_shell_tool_call_output.rbs @@ -0,0 +1,115 @@ +module OpenAI + module Models + module Responses + type response_function_shell_tool_call_output = + { + id: String, + call_id: String, + max_output_length: Integer?, + output: ::Array[OpenAI::Responses::ResponseFunctionShellToolCallOutput::Output], + type: :shell_call_output, + created_by: String + } + + class ResponseFunctionShellToolCallOutput < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor call_id: String + + attr_accessor max_output_length: Integer? + + attr_accessor output: ::Array[OpenAI::Responses::ResponseFunctionShellToolCallOutput::Output] + + attr_accessor type: :shell_call_output + + attr_reader created_by: String? + + def created_by=: (String) -> String + + def initialize: ( + id: String, + call_id: String, + max_output_length: Integer?, + output: ::Array[OpenAI::Responses::ResponseFunctionShellToolCallOutput::Output], + ?created_by: String, + ?type: :shell_call_output + ) -> void + + def to_hash: -> { + id: String, + call_id: String, + max_output_length: Integer?, + output: ::Array[OpenAI::Responses::ResponseFunctionShellToolCallOutput::Output], + type: :shell_call_output, + created_by: String + } + + type output = + { + outcome: OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput::Output::outcome, + stderr: String, + stdout: String, + created_by: String + } + + class Output < OpenAI::Internal::Type::BaseModel + attr_accessor outcome: OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput::Output::outcome + + attr_accessor stderr: String + + attr_accessor stdout: String + + attr_reader created_by: String? + + def created_by=: (String) -> String + + def initialize: ( + outcome: OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput::Output::outcome, + stderr: String, + stdout: String, + ?created_by: String + ) -> void + + def to_hash: -> { + outcome: OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput::Output::outcome, + stderr: String, + stdout: String, + created_by: String + } + + type outcome = + OpenAI::Responses::ResponseFunctionShellToolCallOutput::Output::Outcome::Timeout + | OpenAI::Responses::ResponseFunctionShellToolCallOutput::Output::Outcome::Exit + + module Outcome + extend OpenAI::Internal::Type::Union + + type timeout = { type: :timeout } + + class Timeout < OpenAI::Internal::Type::BaseModel + attr_accessor type: :timeout + + def initialize: (?type: :timeout) -> void + + def to_hash: -> { type: :timeout } + end + + type exit_ = { exit_code: Integer, type: :exit } + + class Exit < OpenAI::Internal::Type::BaseModel + attr_accessor exit_code: Integer + + attr_accessor type: :exit + + def initialize: (exit_code: Integer, ?type: :exit) -> void + + def to_hash: -> { exit_code: Integer, type: :exit } + end + + def self?.variants: -> ::Array[OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput::Output::outcome] + end + end + end + end + end +end diff --git a/sig/openai/models/responses/response_input_item.rbs b/sig/openai/models/responses/response_input_item.rbs index d99b7359..f7d227ab 100644 --- a/sig/openai/models/responses/response_input_item.rbs +++ b/sig/openai/models/responses/response_input_item.rbs @@ -16,6 +16,10 @@ module OpenAI | OpenAI::Responses::ResponseCodeInterpreterToolCall | OpenAI::Responses::ResponseInputItem::LocalShellCall | OpenAI::Responses::ResponseInputItem::LocalShellCallOutput + | OpenAI::Responses::ResponseInputItem::ShellCall + | OpenAI::Responses::ResponseInputItem::ShellCallOutput + | OpenAI::Responses::ResponseInputItem::ApplyPatchCall + | OpenAI::Responses::ResponseInputItem::ApplyPatchCallOutput | OpenAI::Responses::ResponseInputItem::McpListTools | OpenAI::Responses::ResponseInputItem::McpApprovalRequest | OpenAI::Responses::ResponseInputItem::McpApprovalResponse @@ -413,6 +417,278 @@ module OpenAI end end + type shell_call = + { + action: OpenAI::Responses::ResponseInputItem::ShellCall::Action, + call_id: String, + type: :shell_call, + id: String?, + status: OpenAI::Models::Responses::ResponseInputItem::ShellCall::status? + } + + class ShellCall < OpenAI::Internal::Type::BaseModel + attr_accessor action: OpenAI::Responses::ResponseInputItem::ShellCall::Action + + attr_accessor call_id: String + + attr_accessor type: :shell_call + + attr_accessor id: String? + + attr_accessor status: OpenAI::Models::Responses::ResponseInputItem::ShellCall::status? + + def initialize: ( + action: OpenAI::Responses::ResponseInputItem::ShellCall::Action, + call_id: String, + ?id: String?, + ?status: OpenAI::Models::Responses::ResponseInputItem::ShellCall::status?, + ?type: :shell_call + ) -> void + + def to_hash: -> { + action: OpenAI::Responses::ResponseInputItem::ShellCall::Action, + call_id: String, + type: :shell_call, + id: String?, + status: OpenAI::Models::Responses::ResponseInputItem::ShellCall::status? + } + + type action = + { + commands: ::Array[String], + max_output_length: Integer?, + timeout_ms: Integer? + } + + class Action < OpenAI::Internal::Type::BaseModel + attr_accessor commands: ::Array[String] + + attr_accessor max_output_length: Integer? + + attr_accessor timeout_ms: Integer? + + def initialize: ( + commands: ::Array[String], + ?max_output_length: Integer?, + ?timeout_ms: Integer? + ) -> void + + def to_hash: -> { + commands: ::Array[String], + max_output_length: Integer?, + timeout_ms: Integer? + } + end + + type status = :in_progress | :completed | :incomplete + + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS: :in_progress + COMPLETED: :completed + INCOMPLETE: :incomplete + + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseInputItem::ShellCall::status] + end + end + + type shell_call_output = + { + call_id: String, + output: ::Array[OpenAI::Responses::ResponseFunctionShellCallOutputContent], + type: :shell_call_output, + id: String?, + max_output_length: Integer? + } + + class ShellCallOutput < OpenAI::Internal::Type::BaseModel + attr_accessor call_id: String + + attr_accessor output: ::Array[OpenAI::Responses::ResponseFunctionShellCallOutputContent] + + attr_accessor type: :shell_call_output + + attr_accessor id: String? + + attr_accessor max_output_length: Integer? + + def initialize: ( + call_id: String, + output: ::Array[OpenAI::Responses::ResponseFunctionShellCallOutputContent], + ?id: String?, + ?max_output_length: Integer?, + ?type: :shell_call_output + ) -> void + + def to_hash: -> { + call_id: String, + output: ::Array[OpenAI::Responses::ResponseFunctionShellCallOutputContent], + type: :shell_call_output, + id: String?, + max_output_length: Integer? + } + end + + type apply_patch_call = + { + call_id: String, + operation: OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall::operation, + status: OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall::status, + type: :apply_patch_call, + id: String? + } + + class ApplyPatchCall < OpenAI::Internal::Type::BaseModel + attr_accessor call_id: String + + attr_accessor operation: OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall::operation + + attr_accessor status: OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall::status + + attr_accessor type: :apply_patch_call + + attr_accessor id: String? + + def initialize: ( + call_id: String, + operation: OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall::operation, + status: OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall::status, + ?id: String?, + ?type: :apply_patch_call + ) -> void + + def to_hash: -> { + call_id: String, + operation: OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall::operation, + status: OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall::status, + type: :apply_patch_call, + id: String? + } + + type operation = + OpenAI::Responses::ResponseInputItem::ApplyPatchCall::Operation::CreateFile + | OpenAI::Responses::ResponseInputItem::ApplyPatchCall::Operation::DeleteFile + | OpenAI::Responses::ResponseInputItem::ApplyPatchCall::Operation::UpdateFile + + module Operation + extend OpenAI::Internal::Type::Union + + type create_file = + { diff: String, path: String, type: :create_file } + + class CreateFile < OpenAI::Internal::Type::BaseModel + attr_accessor diff: String + + attr_accessor path: String + + attr_accessor type: :create_file + + def initialize: ( + diff: String, + path: String, + ?type: :create_file + ) -> void + + def to_hash: -> { diff: String, path: String, type: :create_file } + end + + type delete_file = { path: String, type: :delete_file } + + class DeleteFile < OpenAI::Internal::Type::BaseModel + attr_accessor path: String + + attr_accessor type: :delete_file + + def initialize: (path: String, ?type: :delete_file) -> void + + def to_hash: -> { path: String, type: :delete_file } + end + + type update_file = + { diff: String, path: String, type: :update_file } + + class UpdateFile < OpenAI::Internal::Type::BaseModel + attr_accessor diff: String + + attr_accessor path: String + + attr_accessor type: :update_file + + def initialize: ( + diff: String, + path: String, + ?type: :update_file + ) -> void + + def to_hash: -> { diff: String, path: String, type: :update_file } + end + + def self?.variants: -> ::Array[OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall::operation] + end + + type status = :in_progress | :completed + + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS: :in_progress + COMPLETED: :completed + + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall::status] + end + end + + type apply_patch_call_output = + { + call_id: String, + status: OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput::status, + type: :apply_patch_call_output, + id: String?, + output: String + } + + class ApplyPatchCallOutput < OpenAI::Internal::Type::BaseModel + attr_accessor call_id: String + + attr_accessor status: OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput::status + + attr_accessor type: :apply_patch_call_output + + attr_accessor id: String? + + attr_reader output: String? + + def output=: (String) -> String + + def initialize: ( + call_id: String, + status: OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput::status, + ?id: String?, + ?output: String, + ?type: :apply_patch_call_output + ) -> void + + def to_hash: -> { + call_id: String, + status: OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput::status, + type: :apply_patch_call_output, + id: String?, + output: String + } + + type status = :completed | :failed + + module Status + extend OpenAI::Internal::Type::Enum + + COMPLETED: :completed + FAILED: :failed + + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput::status] + end + end + type mcp_list_tools = { id: String, diff --git a/sig/openai/models/responses/response_item.rbs b/sig/openai/models/responses/response_item.rbs index 9a154b36..89e9fc51 100644 --- a/sig/openai/models/responses/response_item.rbs +++ b/sig/openai/models/responses/response_item.rbs @@ -14,6 +14,10 @@ module OpenAI | OpenAI::Responses::ResponseCodeInterpreterToolCall | OpenAI::Responses::ResponseItem::LocalShellCall | OpenAI::Responses::ResponseItem::LocalShellCallOutput + | OpenAI::Responses::ResponseFunctionShellToolCall + | OpenAI::Responses::ResponseFunctionShellToolCallOutput + | OpenAI::Responses::ResponseApplyPatchToolCall + | OpenAI::Responses::ResponseApplyPatchToolCallOutput | OpenAI::Responses::ResponseItem::McpListTools | OpenAI::Responses::ResponseItem::McpApprovalRequest | OpenAI::Responses::ResponseItem::McpApprovalResponse diff --git a/sig/openai/models/responses/response_output_item.rbs b/sig/openai/models/responses/response_output_item.rbs index 3c3a2838..70b70c48 100644 --- a/sig/openai/models/responses/response_output_item.rbs +++ b/sig/openai/models/responses/response_output_item.rbs @@ -11,6 +11,10 @@ module OpenAI | OpenAI::Responses::ResponseOutputItem::ImageGenerationCall | OpenAI::Responses::ResponseCodeInterpreterToolCall | OpenAI::Responses::ResponseOutputItem::LocalShellCall + | OpenAI::Responses::ResponseFunctionShellToolCall + | OpenAI::Responses::ResponseFunctionShellToolCallOutput + | OpenAI::Responses::ResponseApplyPatchToolCall + | OpenAI::Responses::ResponseApplyPatchToolCallOutput | OpenAI::Responses::ResponseOutputItem::McpCall | OpenAI::Responses::ResponseOutputItem::McpListTools | OpenAI::Responses::ResponseOutputItem::McpApprovalRequest diff --git a/sig/openai/models/responses/tool.rbs b/sig/openai/models/responses/tool.rbs index 90caad98..e4ddf9e6 100644 --- a/sig/openai/models/responses/tool.rbs +++ b/sig/openai/models/responses/tool.rbs @@ -9,7 +9,9 @@ module OpenAI | OpenAI::Responses::Tool::CodeInterpreter | OpenAI::Responses::Tool::ImageGeneration | OpenAI::Responses::Tool::LocalShell + | OpenAI::Responses::FunctionShellTool | OpenAI::Responses::CustomTool + | OpenAI::Responses::ApplyPatchTool | OpenAI::Responses::WebSearchTool | OpenAI::Responses::WebSearchPreviewTool diff --git a/sig/openai/models/responses/tool_choice_apply_patch.rbs b/sig/openai/models/responses/tool_choice_apply_patch.rbs new file mode 100644 index 00000000..dc6ff182 --- /dev/null +++ b/sig/openai/models/responses/tool_choice_apply_patch.rbs @@ -0,0 +1,15 @@ +module OpenAI + module Models + module Responses + type tool_choice_apply_patch = { type: :apply_patch } + + class ToolChoiceApplyPatch < OpenAI::Internal::Type::BaseModel + attr_accessor type: :apply_patch + + def initialize: (?type: :apply_patch) -> void + + def to_hash: -> { type: :apply_patch } + end + end + end +end diff --git a/sig/openai/models/responses/tool_choice_shell.rbs b/sig/openai/models/responses/tool_choice_shell.rbs new file mode 100644 index 00000000..48368cdb --- /dev/null +++ b/sig/openai/models/responses/tool_choice_shell.rbs @@ -0,0 +1,15 @@ +module OpenAI + module Models + module Responses + type tool_choice_shell = { type: :shell } + + class ToolChoiceShell < OpenAI::Internal::Type::BaseModel + attr_accessor type: :shell + + def initialize: (?type: :shell) -> void + + def to_hash: -> { type: :shell } + end + end + end +end diff --git a/sig/openai/resources/chat/completions.rbs b/sig/openai/resources/chat/completions.rbs index a4237ff1..90b333e7 100644 --- a/sig/openai/resources/chat/completions.rbs +++ b/sig/openai/resources/chat/completions.rbs @@ -22,6 +22,7 @@ module OpenAI ?prediction: OpenAI::Chat::ChatCompletionPredictionContent?, ?presence_penalty: Float?, ?prompt_cache_key: String, + ?prompt_cache_retention: OpenAI::Models::Chat::CompletionCreateParams::prompt_cache_retention?, ?reasoning_effort: OpenAI::Models::reasoning_effort?, ?response_format: OpenAI::Models::Chat::CompletionCreateParams::response_format, ?safety_identifier: String, @@ -59,6 +60,7 @@ module OpenAI ?prediction: OpenAI::Chat::ChatCompletionPredictionContent?, ?presence_penalty: Float?, ?prompt_cache_key: String, + ?prompt_cache_retention: OpenAI::Models::Chat::CompletionCreateParams::prompt_cache_retention?, ?reasoning_effort: OpenAI::Models::reasoning_effort?, ?response_format: OpenAI::Models::Chat::CompletionCreateParams::response_format, ?safety_identifier: String, diff --git a/sig/openai/resources/responses.rbs b/sig/openai/resources/responses.rbs index d2191e81..1e0e29a2 100644 --- a/sig/openai/resources/responses.rbs +++ b/sig/openai/resources/responses.rbs @@ -19,6 +19,7 @@ module OpenAI ?previous_response_id: String?, ?prompt: OpenAI::Responses::ResponsePrompt?, ?prompt_cache_key: String, + ?prompt_cache_retention: OpenAI::Models::Responses::ResponseCreateParams::prompt_cache_retention?, ?reasoning: OpenAI::Reasoning?, ?safety_identifier: String, ?service_tier: OpenAI::Models::Responses::ResponseCreateParams::service_tier?, @@ -49,6 +50,7 @@ module OpenAI ?previous_response_id: String?, ?prompt: OpenAI::Responses::ResponsePrompt?, ?prompt_cache_key: String, + ?prompt_cache_retention: OpenAI::Models::Responses::ResponseCreateParams::prompt_cache_retention?, ?reasoning: OpenAI::Reasoning?, ?safety_identifier: String, ?service_tier: OpenAI::Models::Responses::ResponseCreateParams::service_tier?, diff --git a/test/openai/client_test.rb b/test/openai/client_test.rb index 8ba8bd2d..39883ee6 100644 --- a/test/openai/client_test.rb +++ b/test/openai/client_test.rb @@ -40,7 +40,7 @@ def test_client_default_request_default_retry_attempts openai = OpenAI::Client.new(base_url: "http://localhost", api_key: "My API Key") assert_raises(OpenAI::Errors::InternalServerError) do - openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-5") + openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-5.1") end assert_requested(:any, /./, times: 3) @@ -52,7 +52,7 @@ def test_client_given_request_default_retry_attempts openai = OpenAI::Client.new(base_url: "http://localhost", api_key: "My API Key", max_retries: 3) assert_raises(OpenAI::Errors::InternalServerError) do - openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-5") + openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-5.1") end assert_requested(:any, /./, times: 4) @@ -66,7 +66,7 @@ def test_client_default_request_given_retry_attempts assert_raises(OpenAI::Errors::InternalServerError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-5", + model: :"gpt-5.1", request_options: {max_retries: 3} ) end @@ -82,7 +82,7 @@ def test_client_given_request_given_retry_attempts assert_raises(OpenAI::Errors::InternalServerError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-5", + model: :"gpt-5.1", request_options: {max_retries: 4} ) end @@ -100,7 +100,7 @@ def test_client_retry_after_seconds openai = OpenAI::Client.new(base_url: "http://localhost", api_key: "My API Key", max_retries: 1) assert_raises(OpenAI::Errors::InternalServerError) do - openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-5") + openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-5.1") end assert_requested(:any, /./, times: 2) @@ -118,7 +118,7 @@ def test_client_retry_after_date assert_raises(OpenAI::Errors::InternalServerError) do Thread.current.thread_variable_set(:time_now, Time.now) - openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-5") + openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-5.1") Thread.current.thread_variable_set(:time_now, nil) end @@ -136,7 +136,7 @@ def test_client_retry_after_ms openai = OpenAI::Client.new(base_url: "http://localhost", api_key: "My API Key", max_retries: 1) assert_raises(OpenAI::Errors::InternalServerError) do - openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-5") + openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-5.1") end assert_requested(:any, /./, times: 2) @@ -149,7 +149,7 @@ def test_retry_count_header openai = OpenAI::Client.new(base_url: "http://localhost", api_key: "My API Key") assert_raises(OpenAI::Errors::InternalServerError) do - openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-5") + openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-5.1") end 3.times do @@ -165,7 +165,7 @@ def test_omit_retry_count_header assert_raises(OpenAI::Errors::InternalServerError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-5", + model: :"gpt-5.1", request_options: {extra_headers: {"x-stainless-retry-count" => nil}} ) end @@ -183,7 +183,7 @@ def test_overwrite_retry_count_header assert_raises(OpenAI::Errors::InternalServerError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-5", + model: :"gpt-5.1", request_options: {extra_headers: {"x-stainless-retry-count" => "42"}} ) end @@ -207,7 +207,7 @@ def test_client_redirect_307 assert_raises(OpenAI::Errors::APIConnectionError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-5", + model: :"gpt-5.1", request_options: {extra_headers: {}} ) end @@ -240,7 +240,7 @@ def test_client_redirect_303 assert_raises(OpenAI::Errors::APIConnectionError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-5", + model: :"gpt-5.1", request_options: {extra_headers: {}} ) end @@ -268,7 +268,7 @@ def test_client_redirect_auth_keep_same_origin assert_raises(OpenAI::Errors::APIConnectionError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-5", + model: :"gpt-5.1", request_options: {extra_headers: {"authorization" => "Bearer xyz"}} ) end @@ -299,7 +299,7 @@ def test_client_redirect_auth_strip_cross_origin assert_raises(OpenAI::Errors::APIConnectionError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-5", + model: :"gpt-5.1", request_options: {extra_headers: {"authorization" => "Bearer xyz"}} ) end @@ -315,7 +315,7 @@ def test_default_headers openai = OpenAI::Client.new(base_url: "http://localhost", api_key: "My API Key") - openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-5") + openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-5.1") assert_requested(:any, /./) do |req| headers = req.headers.transform_keys(&:downcase).fetch_values("accept", "content-type") diff --git a/test/openai/resources/beta/assistants_test.rb b/test/openai/resources/beta/assistants_test.rb index d10b07fd..e6436a04 100644 --- a/test/openai/resources/beta/assistants_test.rb +++ b/test/openai/resources/beta/assistants_test.rb @@ -4,7 +4,7 @@ class OpenAI::Test::Resources::Beta::AssistantsTest < OpenAI::Test::ResourceTest def test_create_required_params - response = @openai.beta.assistants.create(model: :"gpt-5") + response = @openai.beta.assistants.create(model: :"gpt-5.1") assert_pattern do response => OpenAI::Beta::Assistant diff --git a/test/openai/resources/chat/completions_test.rb b/test/openai/resources/chat/completions_test.rb index e4e742e8..15c84ca1 100644 --- a/test/openai/resources/chat/completions_test.rb +++ b/test/openai/resources/chat/completions_test.rb @@ -5,7 +5,7 @@ class OpenAI::Test::Resources::Chat::CompletionsTest < OpenAI::Test::ResourceTest def test_create_required_params response = - @openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-5") + @openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-5.1") assert_pattern do response => OpenAI::Chat::ChatCompletion diff --git a/test/openai/resources/conversations/items_test.rb b/test/openai/resources/conversations/items_test.rb index 35270acb..f405a668 100644 --- a/test/openai/resources/conversations/items_test.rb +++ b/test/openai/resources/conversations/items_test.rb @@ -46,6 +46,10 @@ def test_retrieve_required_params in OpenAI::Responses::ResponseCodeInterpreterToolCall in OpenAI::Conversations::ConversationItem::LocalShellCall in OpenAI::Conversations::ConversationItem::LocalShellCallOutput + in OpenAI::Responses::ResponseFunctionShellToolCall + in OpenAI::Responses::ResponseFunctionShellToolCallOutput + in OpenAI::Responses::ResponseApplyPatchToolCall + in OpenAI::Responses::ResponseApplyPatchToolCallOutput in OpenAI::Conversations::ConversationItem::McpListTools in OpenAI::Conversations::ConversationItem::McpApprovalRequest in OpenAI::Conversations::ConversationItem::McpApprovalResponse @@ -135,6 +139,38 @@ def test_retrieve_required_params output: String, status: OpenAI::Conversations::ConversationItem::LocalShellCallOutput::Status | nil } + in { + type: :shell_call, + id: String, + action: OpenAI::Responses::ResponseFunctionShellToolCall::Action, + call_id: String, + status: OpenAI::Responses::ResponseFunctionShellToolCall::Status, + created_by: String | nil + } + in { + type: :shell_call_output, + id: String, + call_id: String, + max_output_length: Integer | nil, + output: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseFunctionShellToolCallOutput::Output]), + created_by: String | nil + } + in { + type: :apply_patch_call, + id: String, + call_id: String, + status: OpenAI::Responses::ResponseApplyPatchToolCall::Status, + created_by: String | nil, + operation: OpenAI::Responses::ResponseApplyPatchToolCall::Operation | nil + } + in { + type: :apply_patch_call_output, + id: String, + call_id: String, + output: String | nil, + status: OpenAI::Responses::ResponseApplyPatchToolCallOutput::Status, + created_by: String | nil + } in { type: :mcp_list_tools, id: String, @@ -200,6 +236,10 @@ def test_list in OpenAI::Responses::ResponseCodeInterpreterToolCall in OpenAI::Conversations::ConversationItem::LocalShellCall in OpenAI::Conversations::ConversationItem::LocalShellCallOutput + in OpenAI::Responses::ResponseFunctionShellToolCall + in OpenAI::Responses::ResponseFunctionShellToolCallOutput + in OpenAI::Responses::ResponseApplyPatchToolCall + in OpenAI::Responses::ResponseApplyPatchToolCallOutput in OpenAI::Conversations::ConversationItem::McpListTools in OpenAI::Conversations::ConversationItem::McpApprovalRequest in OpenAI::Conversations::ConversationItem::McpApprovalResponse @@ -289,6 +329,38 @@ def test_list output: String, status: OpenAI::Conversations::ConversationItem::LocalShellCallOutput::Status | nil } + in { + type: :shell_call, + id: String, + action: OpenAI::Responses::ResponseFunctionShellToolCall::Action, + call_id: String, + status: OpenAI::Responses::ResponseFunctionShellToolCall::Status, + created_by: String | nil + } + in { + type: :shell_call_output, + id: String, + call_id: String, + max_output_length: Integer | nil, + output: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseFunctionShellToolCallOutput::Output]), + created_by: String | nil + } + in { + type: :apply_patch_call, + id: String, + call_id: String, + status: OpenAI::Responses::ResponseApplyPatchToolCall::Status, + created_by: String | nil, + operation: OpenAI::Responses::ResponseApplyPatchToolCall::Operation | nil + } + in { + type: :apply_patch_call_output, + id: String, + call_id: String, + output: String | nil, + status: OpenAI::Responses::ResponseApplyPatchToolCallOutput::Status, + created_by: String | nil + } in { type: :mcp_list_tools, id: String, diff --git a/test/openai/resources/responses/input_items_test.rb b/test/openai/resources/responses/input_items_test.rb index de0e1377..6b171b88 100644 --- a/test/openai/resources/responses/input_items_test.rb +++ b/test/openai/resources/responses/input_items_test.rb @@ -31,6 +31,10 @@ def test_list in OpenAI::Responses::ResponseCodeInterpreterToolCall in OpenAI::Responses::ResponseItem::LocalShellCall in OpenAI::Responses::ResponseItem::LocalShellCallOutput + in OpenAI::Responses::ResponseFunctionShellToolCall + in OpenAI::Responses::ResponseFunctionShellToolCallOutput + in OpenAI::Responses::ResponseApplyPatchToolCall + in OpenAI::Responses::ResponseApplyPatchToolCallOutput in OpenAI::Responses::ResponseItem::McpListTools in OpenAI::Responses::ResponseItem::McpApprovalRequest in OpenAI::Responses::ResponseItem::McpApprovalResponse @@ -117,6 +121,38 @@ def test_list output: String, status: OpenAI::Responses::ResponseItem::LocalShellCallOutput::Status | nil } + in { + type: :shell_call, + id: String, + action: OpenAI::Responses::ResponseFunctionShellToolCall::Action, + call_id: String, + status: OpenAI::Responses::ResponseFunctionShellToolCall::Status, + created_by: String | nil + } + in { + type: :shell_call_output, + id: String, + call_id: String, + max_output_length: Integer | nil, + output: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseFunctionShellToolCallOutput::Output]), + created_by: String | nil + } + in { + type: :apply_patch_call, + id: String, + call_id: String, + status: OpenAI::Responses::ResponseApplyPatchToolCall::Status, + created_by: String | nil, + operation: OpenAI::Responses::ResponseApplyPatchToolCall::Operation | nil + } + in { + type: :apply_patch_call_output, + id: String, + call_id: String, + output: String | nil, + status: OpenAI::Responses::ResponseApplyPatchToolCallOutput::Status, + created_by: String | nil + } in { type: :mcp_list_tools, id: String, diff --git a/test/openai/resources/responses_test.rb b/test/openai/resources/responses_test.rb index d8e57585..49512874 100644 --- a/test/openai/resources/responses_test.rb +++ b/test/openai/resources/responses_test.rb @@ -33,6 +33,7 @@ def test_create previous_response_id: String | nil, prompt: OpenAI::Responses::ResponsePrompt | nil, prompt_cache_key: String | nil, + prompt_cache_retention: OpenAI::Responses::Response::PromptCacheRetention | nil, reasoning: OpenAI::Reasoning | nil, safety_identifier: String | nil, service_tier: OpenAI::Responses::Response::ServiceTier | nil, @@ -76,6 +77,7 @@ def test_retrieve previous_response_id: String | nil, prompt: OpenAI::Responses::ResponsePrompt | nil, prompt_cache_key: String | nil, + prompt_cache_retention: OpenAI::Responses::Response::PromptCacheRetention | nil, reasoning: OpenAI::Reasoning | nil, safety_identifier: String | nil, service_tier: OpenAI::Responses::Response::ServiceTier | nil, @@ -127,6 +129,7 @@ def test_cancel previous_response_id: String | nil, prompt: OpenAI::Responses::ResponsePrompt | nil, prompt_cache_key: String | nil, + prompt_cache_retention: OpenAI::Responses::Response::PromptCacheRetention | nil, reasoning: OpenAI::Reasoning | nil, safety_identifier: String | nil, service_tier: OpenAI::Responses::Response::ServiceTier | nil,