diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 4ad3fef3..d661066e 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.18.0" + ".": "0.18.1" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index ce30bcee..71c2d79d 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 109 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-24be531010b354303d741fc9247c1f84f75978f9f7de68aca92cb4f240a04722.yml -openapi_spec_hash: 3e46f439f6a863beadc71577eb4efa15 -config_hash: ed87b9139ac595a04a2162d754df2fed +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-7ef7a457c3bf05364e66e48c9ca34f31bfef1f6c9b7c15b1812346105e0abb16.yml +openapi_spec_hash: a2b1f5d8fbb62175c93b0ebea9f10063 +config_hash: 76afa3236f36854a8705f1281b1990b8 diff --git a/CHANGELOG.md b/CHANGELOG.md index f32d4899..a2887548 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 0.18.1 (2025-08-19) + +Full Changelog: [v0.18.0...v0.18.1](https://github.com/openai/openai-ruby/compare/v0.18.0...v0.18.1) + +### Chores + +* **api:** accurately represent shape for verbosity on Chat Completions ([a19cd00](https://github.com/openai/openai-ruby/commit/a19cd00e6df3cc3f47239a25fe15f33c2cb77962)) + ## 0.18.0 (2025-08-15) Full Changelog: [v0.17.1...v0.18.0](https://github.com/openai/openai-ruby/compare/v0.17.1...v0.18.0) diff --git a/Gemfile.lock b/Gemfile.lock index 2a86b134..fa140816 100644 --- a/Gemfile.lock +++ b/Gemfile.lock @@ -11,7 +11,7 @@ GIT PATH remote: . specs: - openai (0.18.0) + openai (0.18.1) connection_pool GEM diff --git a/README.md b/README.md index 631e0c9c..e7226e51 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ To use this gem, install via Bundler by adding the following to your application ```ruby -gem "openai", "~> 0.18.0" +gem "openai", "~> 0.18.1" ``` diff --git a/lib/openai/models/chat/completion_create_params.rb b/lib/openai/models/chat/completion_create_params.rb index 9f15cf26..b9e16488 100644 --- a/lib/openai/models/chat/completion_create_params.rb +++ b/lib/openai/models/chat/completion_create_params.rb @@ -272,7 +272,7 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # our [model distillation](https://platform.openai.com/docs/guides/distillation) # or [evals](https://platform.openai.com/docs/guides/evals) products. # - # Supports text and image inputs. Note: image inputs over 10MB will be dropped. + # Supports text and image inputs. Note: image inputs over 8MB will be dropped. # # @return [Boolean, nil] optional :store, OpenAI::Internal::Type::Boolean, nil?: true @@ -292,11 +292,6 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # @return [Float, nil] optional :temperature, Float, nil?: true - # @!attribute text - # - # @return [OpenAI::Models::Chat::CompletionCreateParams::Text, nil] - optional :text, -> { OpenAI::Chat::CompletionCreateParams::Text } - # @!attribute tool_choice # Controls which (if any) tool is called by the model. `none` means the model will # not call any tool and instead generates a message. `auto` means the model can @@ -370,7 +365,7 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # @return [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions, nil] optional :web_search_options, -> { OpenAI::Chat::CompletionCreateParams::WebSearchOptions } - # @!method initialize(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, verbosity: nil, web_search_options: nil, request_options: {}) + # @!method initialize(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, verbosity: nil, web_search_options: nil, request_options: {}) # Some parameter documentations has been truncated, see # {OpenAI::Models::Chat::CompletionCreateParams} for more details. # @@ -426,8 +421,6 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m # - # @param text [OpenAI::Models::Chat::CompletionCreateParams::Text] - # # @param tool_choice [Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionAllowedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoiceCustom] Controls which (if any) tool is called by the model. # # @param tools [Array] A list of tools the model may call. You can provide either @@ -638,38 +631,6 @@ module Stop StringArray = OpenAI::Internal::Type::ArrayOf[String] end - class Text < OpenAI::Internal::Type::BaseModel - # @!attribute verbosity - # Constrains the verbosity of the model's response. Lower values will result in - # more concise responses, while higher values will result in more verbose - # responses. Currently supported values are `low`, `medium`, and `high`. - # - # @return [Symbol, OpenAI::Models::Chat::CompletionCreateParams::Text::Verbosity, nil] - optional :verbosity, enum: -> { OpenAI::Chat::CompletionCreateParams::Text::Verbosity }, nil?: true - - # @!method initialize(verbosity: nil) - # Some parameter documentations has been truncated, see - # {OpenAI::Models::Chat::CompletionCreateParams::Text} for more details. - # - # @param verbosity [Symbol, OpenAI::Models::Chat::CompletionCreateParams::Text::Verbosity, nil] Constrains the verbosity of the model's response. Lower values will result in - - # Constrains the verbosity of the model's response. Lower values will result in - # more concise responses, while higher values will result in more verbose - # responses. Currently supported values are `low`, `medium`, and `high`. - # - # @see OpenAI::Models::Chat::CompletionCreateParams::Text#verbosity - module Verbosity - extend OpenAI::Internal::Type::Enum - - LOW = :low - MEDIUM = :medium - HIGH = :high - - # @!method self.values - # @return [Array] - end - end - # Constrains the verbosity of the model's response. Lower values will result in # more concise responses, while higher values will result in more verbose # responses. Currently supported values are `low`, `medium`, and `high`. diff --git a/lib/openai/models/graders/text_similarity_grader.rb b/lib/openai/models/graders/text_similarity_grader.rb index a64e3314..8cc51499 100644 --- a/lib/openai/models/graders/text_similarity_grader.rb +++ b/lib/openai/models/graders/text_similarity_grader.rb @@ -5,8 +5,8 @@ module Models module Graders class TextSimilarityGrader < OpenAI::Internal::Type::BaseModel # @!attribute evaluation_metric - # The evaluation metric to use. One of `fuzzy_match`, `bleu`, `gleu`, `meteor`, - # `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. + # The evaluation metric to use. One of `cosine`, `fuzzy_match`, `bleu`, `gleu`, + # `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. # # @return [Symbol, OpenAI::Models::Graders::TextSimilarityGrader::EvaluationMetric] required :evaluation_metric, enum: -> { OpenAI::Graders::TextSimilarityGrader::EvaluationMetric } @@ -41,7 +41,7 @@ class TextSimilarityGrader < OpenAI::Internal::Type::BaseModel # # A TextSimilarityGrader object which grades text based on similarity metrics. # - # @param evaluation_metric [Symbol, OpenAI::Models::Graders::TextSimilarityGrader::EvaluationMetric] The evaluation metric to use. One of `fuzzy_match`, `bleu`, `gleu`, `meteor`, `r + # @param evaluation_metric [Symbol, OpenAI::Models::Graders::TextSimilarityGrader::EvaluationMetric] The evaluation metric to use. One of `cosine`, `fuzzy_match`, `bleu`, # # @param input [String] The text being graded. # @@ -51,13 +51,14 @@ class TextSimilarityGrader < OpenAI::Internal::Type::BaseModel # # @param type [Symbol, :text_similarity] The type of grader. - # The evaluation metric to use. One of `fuzzy_match`, `bleu`, `gleu`, `meteor`, - # `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. + # The evaluation metric to use. One of `cosine`, `fuzzy_match`, `bleu`, `gleu`, + # `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. # # @see OpenAI::Models::Graders::TextSimilarityGrader#evaluation_metric module EvaluationMetric extend OpenAI::Internal::Type::Enum + COSINE = :cosine FUZZY_MATCH = :fuzzy_match BLEU = :bleu GLEU = :gleu diff --git a/lib/openai/models/responses/response.rb b/lib/openai/models/responses/response.rb index 76002c86..aaf64807 100644 --- a/lib/openai/models/responses/response.rb +++ b/lib/openai/models/responses/response.rb @@ -229,9 +229,14 @@ class Response < OpenAI::Internal::Type::BaseModel optional :status, enum: -> { OpenAI::Responses::ResponseStatus } # @!attribute text + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: # - # @return [OpenAI::Models::Responses::Response::Text, nil] - optional :text, -> { OpenAI::Responses::Response::Text } + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + # + # @return [OpenAI::Models::Responses::ResponseTextConfig, nil] + optional :text, -> { OpenAI::Responses::ResponseTextConfig } # @!attribute top_logprobs # An integer between 0 and 20 specifying the number of most likely tokens to @@ -341,7 +346,7 @@ def output_text # # @param status [Symbol, OpenAI::Models::Responses::ResponseStatus] The status of the response generation. One of `completed`, `failed`, # - # @param text [OpenAI::Models::Responses::Response::Text] + # @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain # # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to # @@ -475,59 +480,6 @@ module ServiceTier # @return [Array] end - # @see OpenAI::Models::Responses::Response#text - class Text < OpenAI::Internal::Type::BaseModel - # @!attribute format_ - # An object specifying the format that the model must output. - # - # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which - # ensures the model will match your supplied JSON schema. Learn more in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - # - # The default format is `{ "type": "text" }` with no additional options. - # - # **Not recommended for gpt-4o and newer models:** - # - # Setting to `{ "type": "json_object" }` enables the older JSON mode, which - # ensures the message the model generates is valid JSON. Using `json_schema` is - # preferred for models that support it. - # - # @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject, nil] - optional :format_, union: -> { OpenAI::Responses::ResponseFormatTextConfig }, api_name: :format - - # @!attribute verbosity - # Constrains the verbosity of the model's response. Lower values will result in - # more concise responses, while higher values will result in more verbose - # responses. Currently supported values are `low`, `medium`, and `high`. - # - # @return [Symbol, OpenAI::Models::Responses::Response::Text::Verbosity, nil] - optional :verbosity, enum: -> { OpenAI::Responses::Response::Text::Verbosity }, nil?: true - - # @!method initialize(format_: nil, verbosity: nil) - # Some parameter documentations has been truncated, see - # {OpenAI::Models::Responses::Response::Text} for more details. - # - # @param format_ [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output. - # - # @param verbosity [Symbol, OpenAI::Models::Responses::Response::Text::Verbosity, nil] Constrains the verbosity of the model's response. Lower values will result in - - # Constrains the verbosity of the model's response. Lower values will result in - # more concise responses, while higher values will result in more verbose - # responses. Currently supported values are `low`, `medium`, and `high`. - # - # @see OpenAI::Models::Responses::Response::Text#verbosity - module Verbosity - extend OpenAI::Internal::Type::Enum - - LOW = :low - MEDIUM = :medium - HIGH = :high - - # @!method self.values - # @return [Array] - end - end - # The truncation strategy to use for the model response. # # - `auto`: If the context of this response and previous ones exceeds the model's diff --git a/lib/openai/models/responses/response_create_params.rb b/lib/openai/models/responses/response_create_params.rb index d7786b18..a2a8432a 100644 --- a/lib/openai/models/responses/response_create_params.rb +++ b/lib/openai/models/responses/response_create_params.rb @@ -193,6 +193,8 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel optional :temperature, Float, nil?: true # @!attribute text + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: # # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) @@ -316,7 +318,7 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel # # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m # - # @param text [OpenAI::Models::Responses::ResponseCreateParams::Text] + # @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain # # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom] How the model should select which tool (or tools) to use when generating # @@ -407,62 +409,6 @@ class StreamOptions < OpenAI::Internal::Type::BaseModel # @param include_obfuscation [Boolean] When true, stream obfuscation will be enabled. Stream obfuscation adds end - class Text < OpenAI::Internal::Type::BaseModel - # @!attribute format_ - # An object specifying the format that the model must output. - # - # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which - # ensures the model will match your supplied JSON schema. Learn more in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - # - # The default format is `{ "type": "text" }` with no additional options. - # - # **Not recommended for gpt-4o and newer models:** - # - # Setting to `{ "type": "json_object" }` enables the older JSON mode, which - # ensures the message the model generates is valid JSON. Using `json_schema` is - # preferred for models that support it. - # - # @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject, nil] - optional :format_, union: -> { OpenAI::Responses::ResponseFormatTextConfig }, api_name: :format - - # @!attribute verbosity - # Constrains the verbosity of the model's response. Lower values will result in - # more concise responses, while higher values will result in more verbose - # responses. Currently supported values are `low`, `medium`, and `high`. - # - # @return [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Text::Verbosity, nil] - optional :verbosity, - enum: -> { - OpenAI::Responses::ResponseCreateParams::Text::Verbosity - }, - nil?: true - - # @!method initialize(format_: nil, verbosity: nil) - # Some parameter documentations has been truncated, see - # {OpenAI::Models::Responses::ResponseCreateParams::Text} for more details. - # - # @param format_ [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output. - # - # @param verbosity [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Text::Verbosity, nil] Constrains the verbosity of the model's response. Lower values will result in - - # Constrains the verbosity of the model's response. Lower values will result in - # more concise responses, while higher values will result in more verbose - # responses. Currently supported values are `low`, `medium`, and `high`. - # - # @see OpenAI::Models::Responses::ResponseCreateParams::Text#verbosity - module Verbosity - extend OpenAI::Internal::Type::Enum - - LOW = :low - MEDIUM = :medium - HIGH = :high - - # @!method self.values - # @return [Array] - end - end - # How the model should select which tool (or tools) to use when generating a # response. See the `tools` parameter to see how to specify which tools the model # can call. diff --git a/lib/openai/resources/chat/completions.rb b/lib/openai/resources/chat/completions.rb index 6b08d9c4..a3518b48 100644 --- a/lib/openai/resources/chat/completions.rb +++ b/lib/openai/resources/chat/completions.rb @@ -30,7 +30,7 @@ class Completions # unsupported parameters in reasoning models, # [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). # - # @overload create(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, verbosity: nil, web_search_options: nil, request_options: {}) + # @overload create(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, verbosity: nil, web_search_options: nil, request_options: {}) # # @param messages [Array] A list of messages comprising the conversation so far. Depending on the # @@ -84,8 +84,6 @@ class Completions # # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m # - # @param text [OpenAI::Models::Chat::CompletionCreateParams::Text] - # # @param tool_choice [Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionAllowedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoiceCustom] Controls which (if any) tool is called by the model. # # @param tools [Array] A list of tools the model may call. You can provide either @@ -236,7 +234,7 @@ def stream # unsupported parameters in reasoning models, # [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). # - # @overload stream_raw(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, verbosity: nil, web_search_options: nil, request_options: {}) + # @overload stream_raw(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, verbosity: nil, web_search_options: nil, request_options: {}) # # @param messages [Array] A list of messages comprising the conversation so far. Depending on the # @@ -290,8 +288,6 @@ def stream # # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m # - # @param text [OpenAI::Models::Chat::CompletionCreateParams::Text] - # # @param tool_choice [Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionAllowedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoiceCustom] Controls which (if any) tool is called by the model. # # @param tools [Array] A list of tools the model may call. You can provide either diff --git a/lib/openai/resources/responses.rb b/lib/openai/resources/responses.rb index e6f7e4ef..fd382880 100644 --- a/lib/openai/resources/responses.rb +++ b/lib/openai/resources/responses.rb @@ -61,7 +61,7 @@ class Responses # # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m # - # @param text [OpenAI::Models::Responses::ResponseCreateParams::Text] + # @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain # # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom] How the model should select which tool (or tools) to use when generating # @@ -276,7 +276,7 @@ def stream(params) # # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m # - # @param text [OpenAI::Models::Responses::ResponseCreateParams::Text] + # @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain # # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom] How the model should select which tool (or tools) to use when generating # diff --git a/lib/openai/version.rb b/lib/openai/version.rb index a326627c..8309d38b 100644 --- a/lib/openai/version.rb +++ b/lib/openai/version.rb @@ -1,5 +1,5 @@ # frozen_string_literal: true module OpenAI - VERSION = "0.18.0" + VERSION = "0.18.1" end diff --git a/rbi/openai/models/chat/completion_create_params.rbi b/rbi/openai/models/chat/completion_create_params.rbi index b8f9aef5..14fa8ae1 100644 --- a/rbi/openai/models/chat/completion_create_params.rbi +++ b/rbi/openai/models/chat/completion_create_params.rbi @@ -329,7 +329,7 @@ module OpenAI # our [model distillation](https://platform.openai.com/docs/guides/distillation) # or [evals](https://platform.openai.com/docs/guides/evals) products. # - # Supports text and image inputs. Note: image inputs over 10MB will be dropped. + # Supports text and image inputs. Note: image inputs over 8MB will be dropped. sig { returns(T.nilable(T::Boolean)) } attr_accessor :store @@ -352,14 +352,6 @@ module OpenAI sig { returns(T.nilable(Float)) } attr_accessor :temperature - sig { returns(T.nilable(OpenAI::Chat::CompletionCreateParams::Text)) } - attr_reader :text - - sig do - params(text: OpenAI::Chat::CompletionCreateParams::Text::OrHash).void - end - attr_writer :text - # Controls which (if any) tool is called by the model. `none` means the model will # not call any tool and instead generates a message. `auto` means the model can # pick between generating a message or calling one or more tools. `required` means @@ -540,7 +532,6 @@ module OpenAI stream_options: T.nilable(OpenAI::Chat::ChatCompletionStreamOptions::OrHash), temperature: T.nilable(Float), - text: OpenAI::Chat::CompletionCreateParams::Text::OrHash, tool_choice: T.any( OpenAI::Chat::ChatCompletionToolChoiceOption::Auto::OrSymbol, @@ -726,7 +717,7 @@ module OpenAI # our [model distillation](https://platform.openai.com/docs/guides/distillation) # or [evals](https://platform.openai.com/docs/guides/evals) products. # - # Supports text and image inputs. Note: image inputs over 10MB will be dropped. + # Supports text and image inputs. Note: image inputs over 8MB will be dropped. store: nil, # Options for streaming response. Only set this when you set `stream: true`. stream_options: nil, @@ -735,7 +726,6 @@ module OpenAI # focused and deterministic. We generally recommend altering this or `top_p` but # not both. temperature: nil, - text: nil, # Controls which (if any) tool is called by the model. `none` means the model will # not call any tool and instead generates a message. `auto` means the model can # pick between generating a message or calling one or more tools. `required` means @@ -838,7 +828,6 @@ module OpenAI stream_options: T.nilable(OpenAI::Chat::ChatCompletionStreamOptions), temperature: T.nilable(Float), - text: OpenAI::Chat::CompletionCreateParams::Text, tool_choice: T.any( OpenAI::Chat::ChatCompletionToolChoiceOption::Auto::OrSymbol, @@ -1187,99 +1176,6 @@ module OpenAI ) end - class Text < OpenAI::Internal::Type::BaseModel - OrHash = - T.type_alias do - T.any( - OpenAI::Chat::CompletionCreateParams::Text, - OpenAI::Internal::AnyHash - ) - end - - # Constrains the verbosity of the model's response. Lower values will result in - # more concise responses, while higher values will result in more verbose - # responses. Currently supported values are `low`, `medium`, and `high`. - sig do - returns( - T.nilable( - OpenAI::Chat::CompletionCreateParams::Text::Verbosity::OrSymbol - ) - ) - end - attr_accessor :verbosity - - sig do - params( - verbosity: - T.nilable( - OpenAI::Chat::CompletionCreateParams::Text::Verbosity::OrSymbol - ) - ).returns(T.attached_class) - end - def self.new( - # Constrains the verbosity of the model's response. Lower values will result in - # more concise responses, while higher values will result in more verbose - # responses. Currently supported values are `low`, `medium`, and `high`. - verbosity: nil - ) - end - - sig do - override.returns( - { - verbosity: - T.nilable( - OpenAI::Chat::CompletionCreateParams::Text::Verbosity::OrSymbol - ) - } - ) - end - def to_hash - end - - # Constrains the verbosity of the model's response. Lower values will result in - # more concise responses, while higher values will result in more verbose - # responses. Currently supported values are `low`, `medium`, and `high`. - module Verbosity - extend OpenAI::Internal::Type::Enum - - TaggedSymbol = - T.type_alias do - T.all( - Symbol, - OpenAI::Chat::CompletionCreateParams::Text::Verbosity - ) - end - OrSymbol = T.type_alias { T.any(Symbol, String) } - - LOW = - T.let( - :low, - OpenAI::Chat::CompletionCreateParams::Text::Verbosity::TaggedSymbol - ) - MEDIUM = - T.let( - :medium, - OpenAI::Chat::CompletionCreateParams::Text::Verbosity::TaggedSymbol - ) - HIGH = - T.let( - :high, - OpenAI::Chat::CompletionCreateParams::Text::Verbosity::TaggedSymbol - ) - - sig do - override.returns( - T::Array[ - OpenAI::Chat::CompletionCreateParams::Text::Verbosity::TaggedSymbol - ] - ) - end - def self.values - end - end - end - # Constrains the verbosity of the model's response. Lower values will result in # more concise responses, while higher values will result in more verbose # responses. Currently supported values are `low`, `medium`, and `high`. diff --git a/rbi/openai/models/graders/text_similarity_grader.rbi b/rbi/openai/models/graders/text_similarity_grader.rbi index 687e2f93..389c8b3b 100644 --- a/rbi/openai/models/graders/text_similarity_grader.rbi +++ b/rbi/openai/models/graders/text_similarity_grader.rbi @@ -14,8 +14,8 @@ module OpenAI ) end - # The evaluation metric to use. One of `fuzzy_match`, `bleu`, `gleu`, `meteor`, - # `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. + # The evaluation metric to use. One of `cosine`, `fuzzy_match`, `bleu`, `gleu`, + # `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. sig do returns( OpenAI::Graders::TextSimilarityGrader::EvaluationMetric::OrSymbol @@ -51,8 +51,8 @@ module OpenAI ).returns(T.attached_class) end def self.new( - # The evaluation metric to use. One of `fuzzy_match`, `bleu`, `gleu`, `meteor`, - # `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. + # The evaluation metric to use. One of `cosine`, `fuzzy_match`, `bleu`, `gleu`, + # `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. evaluation_metric:, # The text being graded. input:, @@ -80,8 +80,8 @@ module OpenAI def to_hash end - # The evaluation metric to use. One of `fuzzy_match`, `bleu`, `gleu`, `meteor`, - # `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. + # The evaluation metric to use. One of `cosine`, `fuzzy_match`, `bleu`, `gleu`, + # `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. module EvaluationMetric extend OpenAI::Internal::Type::Enum @@ -94,6 +94,11 @@ module OpenAI end OrSymbol = T.type_alias { T.any(Symbol, String) } + COSINE = + T.let( + :cosine, + OpenAI::Graders::TextSimilarityGrader::EvaluationMetric::TaggedSymbol + ) FUZZY_MATCH = T.let( :fuzzy_match, diff --git a/rbi/openai/models/responses/response.rbi b/rbi/openai/models/responses/response.rbi index 8a36dab9..94087328 100644 --- a/rbi/openai/models/responses/response.rbi +++ b/rbi/openai/models/responses/response.rbi @@ -230,10 +230,15 @@ module OpenAI sig { params(status: OpenAI::Responses::ResponseStatus::OrSymbol).void } attr_writer :status - sig { returns(T.nilable(OpenAI::Responses::Response::Text)) } + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + sig { returns(T.nilable(OpenAI::Responses::ResponseTextConfig)) } attr_reader :text - sig { params(text: OpenAI::Responses::Response::Text::OrHash).void } + sig { params(text: OpenAI::Responses::ResponseTextConfig::OrHash).void } attr_writer :text # An integer between 0 and 20 specifying the number of most likely tokens to @@ -345,7 +350,7 @@ module OpenAI service_tier: T.nilable(OpenAI::Responses::Response::ServiceTier::OrSymbol), status: OpenAI::Responses::ResponseStatus::OrSymbol, - text: OpenAI::Responses::Response::Text::OrHash, + text: OpenAI::Responses::ResponseTextConfig::OrHash, top_logprobs: T.nilable(Integer), truncation: T.nilable(OpenAI::Responses::Response::Truncation::OrSymbol), @@ -478,6 +483,11 @@ module OpenAI # The status of the response generation. One of `completed`, `failed`, # `in_progress`, `cancelled`, `queued`, or `incomplete`. status: nil, + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) text: nil, # An integer between 0 and 20 specifying the number of most likely tokens to # return at each token position, each with an associated log probability. @@ -536,7 +546,7 @@ module OpenAI OpenAI::Responses::Response::ServiceTier::TaggedSymbol ), status: OpenAI::Responses::ResponseStatus::TaggedSymbol, - text: OpenAI::Responses::Response::Text, + text: OpenAI::Responses::ResponseTextConfig, top_logprobs: T.nilable(Integer), truncation: T.nilable( @@ -751,149 +761,6 @@ module OpenAI end end - class Text < OpenAI::Internal::Type::BaseModel - OrHash = - T.type_alias do - T.any( - OpenAI::Responses::Response::Text, - OpenAI::Internal::AnyHash - ) - end - - # An object specifying the format that the model must output. - # - # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which - # ensures the model will match your supplied JSON schema. Learn more in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - # - # The default format is `{ "type": "text" }` with no additional options. - # - # **Not recommended for gpt-4o and newer models:** - # - # Setting to `{ "type": "json_object" }` enables the older JSON mode, which - # ensures the message the model generates is valid JSON. Using `json_schema` is - # preferred for models that support it. - sig do - returns( - T.nilable(OpenAI::Responses::ResponseFormatTextConfig::Variants) - ) - end - attr_reader :format_ - - sig do - params( - format_: - T.any( - OpenAI::ResponseFormatText::OrHash, - OpenAI::Responses::ResponseFormatTextJSONSchemaConfig::OrHash, - OpenAI::ResponseFormatJSONObject::OrHash - ) - ).void - end - attr_writer :format_ - - # Constrains the verbosity of the model's response. Lower values will result in - # more concise responses, while higher values will result in more verbose - # responses. Currently supported values are `low`, `medium`, and `high`. - sig do - returns( - T.nilable( - OpenAI::Responses::Response::Text::Verbosity::TaggedSymbol - ) - ) - end - attr_accessor :verbosity - - sig do - params( - format_: - T.any( - OpenAI::ResponseFormatText::OrHash, - OpenAI::Responses::ResponseFormatTextJSONSchemaConfig::OrHash, - OpenAI::ResponseFormatJSONObject::OrHash - ), - verbosity: - T.nilable( - OpenAI::Responses::Response::Text::Verbosity::OrSymbol - ) - ).returns(T.attached_class) - end - def self.new( - # An object specifying the format that the model must output. - # - # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which - # ensures the model will match your supplied JSON schema. Learn more in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - # - # The default format is `{ "type": "text" }` with no additional options. - # - # **Not recommended for gpt-4o and newer models:** - # - # Setting to `{ "type": "json_object" }` enables the older JSON mode, which - # ensures the message the model generates is valid JSON. Using `json_schema` is - # preferred for models that support it. - format_: nil, - # Constrains the verbosity of the model's response. Lower values will result in - # more concise responses, while higher values will result in more verbose - # responses. Currently supported values are `low`, `medium`, and `high`. - verbosity: nil - ) - end - - sig do - override.returns( - { - format_: OpenAI::Responses::ResponseFormatTextConfig::Variants, - verbosity: - T.nilable( - OpenAI::Responses::Response::Text::Verbosity::TaggedSymbol - ) - } - ) - end - def to_hash - end - - # Constrains the verbosity of the model's response. Lower values will result in - # more concise responses, while higher values will result in more verbose - # responses. Currently supported values are `low`, `medium`, and `high`. - module Verbosity - extend OpenAI::Internal::Type::Enum - - TaggedSymbol = - T.type_alias do - T.all(Symbol, OpenAI::Responses::Response::Text::Verbosity) - end - OrSymbol = T.type_alias { T.any(Symbol, String) } - - LOW = - T.let( - :low, - OpenAI::Responses::Response::Text::Verbosity::TaggedSymbol - ) - MEDIUM = - T.let( - :medium, - OpenAI::Responses::Response::Text::Verbosity::TaggedSymbol - ) - HIGH = - T.let( - :high, - OpenAI::Responses::Response::Text::Verbosity::TaggedSymbol - ) - - sig do - override.returns( - T::Array[ - OpenAI::Responses::Response::Text::Verbosity::TaggedSymbol - ] - ) - end - def self.values - end - end - end - # The truncation strategy to use for the model response. # # - `auto`: If the context of this response and previous ones exceeds the model's diff --git a/rbi/openai/models/responses/response_create_params.rbi b/rbi/openai/models/responses/response_create_params.rbi index 9ff76fa1..c0e490be 100644 --- a/rbi/openai/models/responses/response_create_params.rbi +++ b/rbi/openai/models/responses/response_create_params.rbi @@ -232,9 +232,12 @@ module OpenAI sig { returns(T.nilable(Float)) } attr_accessor :temperature - sig do - returns(T.nilable(OpenAI::Responses::ResponseCreateParams::Text)) - end + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + sig { returns(T.nilable(OpenAI::Responses::ResponseTextConfig)) } attr_reader :text sig do @@ -413,7 +416,7 @@ module OpenAI OpenAI::Responses::ResponseCreateParams::StreamOptions::OrHash ), temperature: T.nilable(Float), - text: OpenAI::Responses::ResponseCreateParams::Text::OrHash, + text: OpenAI::Responses::ResponseTextConfig::OrHash, tool_choice: T.any( OpenAI::Responses::ToolChoiceOptions::OrSymbol, @@ -556,6 +559,11 @@ module OpenAI # focused and deterministic. We generally recommend altering this or `top_p` but # not both. temperature: nil, + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) text: nil, # How the model should select which tool (or tools) to use when generating a # response. See the `tools` parameter to see how to specify which tools the model @@ -640,7 +648,7 @@ module OpenAI OpenAI::Responses::ResponseCreateParams::StreamOptions ), temperature: T.nilable(Float), - text: OpenAI::Responses::ResponseCreateParams::Text, + text: OpenAI::Responses::ResponseTextConfig, tool_choice: T.any( OpenAI::Responses::ToolChoiceOptions::OrSymbol, @@ -813,163 +821,6 @@ module OpenAI end end - class Text < OpenAI::Internal::Type::BaseModel - OrHash = - T.type_alias do - T.any( - OpenAI::Responses::ResponseCreateParams::Text, - OpenAI::Internal::AnyHash - ) - end - - # An object specifying the format that the model must output. - # - # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which - # ensures the model will match your supplied JSON schema. Learn more in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - # - # The default format is `{ "type": "text" }` with no additional options. - # - # **Not recommended for gpt-4o and newer models:** - # - # Setting to `{ "type": "json_object" }` enables the older JSON mode, which - # ensures the message the model generates is valid JSON. Using `json_schema` is - # preferred for models that support it. - sig do - returns( - T.nilable( - T.any( - OpenAI::ResponseFormatText, - OpenAI::Responses::ResponseFormatTextJSONSchemaConfig, - OpenAI::ResponseFormatJSONObject - ) - ) - ) - end - attr_reader :format_ - - sig do - params( - format_: - T.any( - OpenAI::ResponseFormatText::OrHash, - OpenAI::Responses::ResponseFormatTextJSONSchemaConfig::OrHash, - OpenAI::ResponseFormatJSONObject::OrHash - ) - ).void - end - attr_writer :format_ - - # Constrains the verbosity of the model's response. Lower values will result in - # more concise responses, while higher values will result in more verbose - # responses. Currently supported values are `low`, `medium`, and `high`. - sig do - returns( - T.nilable( - OpenAI::Responses::ResponseCreateParams::Text::Verbosity::OrSymbol - ) - ) - end - attr_accessor :verbosity - - sig do - params( - format_: - T.any( - OpenAI::ResponseFormatText::OrHash, - OpenAI::Responses::ResponseFormatTextJSONSchemaConfig::OrHash, - OpenAI::ResponseFormatJSONObject::OrHash - ), - verbosity: - T.nilable( - OpenAI::Responses::ResponseCreateParams::Text::Verbosity::OrSymbol - ) - ).returns(T.attached_class) - end - def self.new( - # An object specifying the format that the model must output. - # - # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which - # ensures the model will match your supplied JSON schema. Learn more in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - # - # The default format is `{ "type": "text" }` with no additional options. - # - # **Not recommended for gpt-4o and newer models:** - # - # Setting to `{ "type": "json_object" }` enables the older JSON mode, which - # ensures the message the model generates is valid JSON. Using `json_schema` is - # preferred for models that support it. - format_: nil, - # Constrains the verbosity of the model's response. Lower values will result in - # more concise responses, while higher values will result in more verbose - # responses. Currently supported values are `low`, `medium`, and `high`. - verbosity: nil - ) - end - - sig do - override.returns( - { - format_: - T.any( - OpenAI::ResponseFormatText, - OpenAI::Responses::ResponseFormatTextJSONSchemaConfig, - OpenAI::ResponseFormatJSONObject - ), - verbosity: - T.nilable( - OpenAI::Responses::ResponseCreateParams::Text::Verbosity::OrSymbol - ) - } - ) - end - def to_hash - end - - # Constrains the verbosity of the model's response. Lower values will result in - # more concise responses, while higher values will result in more verbose - # responses. Currently supported values are `low`, `medium`, and `high`. - module Verbosity - extend OpenAI::Internal::Type::Enum - - TaggedSymbol = - T.type_alias do - T.all( - Symbol, - OpenAI::Responses::ResponseCreateParams::Text::Verbosity - ) - end - OrSymbol = T.type_alias { T.any(Symbol, String) } - - LOW = - T.let( - :low, - OpenAI::Responses::ResponseCreateParams::Text::Verbosity::TaggedSymbol - ) - MEDIUM = - T.let( - :medium, - OpenAI::Responses::ResponseCreateParams::Text::Verbosity::TaggedSymbol - ) - HIGH = - T.let( - :high, - OpenAI::Responses::ResponseCreateParams::Text::Verbosity::TaggedSymbol - ) - - sig do - override.returns( - T::Array[ - OpenAI::Responses::ResponseCreateParams::Text::Verbosity::TaggedSymbol - ] - ) - end - def self.values - end - end - end - # How the model should select which tool (or tools) to use when generating a # response. See the `tools` parameter to see how to specify which tools the model # can call. diff --git a/rbi/openai/resources/chat/completions.rbi b/rbi/openai/resources/chat/completions.rbi index bd3b6845..f65a22d6 100644 --- a/rbi/openai/resources/chat/completions.rbi +++ b/rbi/openai/resources/chat/completions.rbi @@ -86,7 +86,6 @@ module OpenAI stream_options: T.nilable(OpenAI::Chat::ChatCompletionStreamOptions::OrHash), temperature: T.nilable(Float), - text: OpenAI::Chat::CompletionCreateParams::Text::OrHash, tool_choice: T.any( OpenAI::Chat::ChatCompletionToolChoiceOption::Auto::OrSymbol, @@ -273,7 +272,7 @@ module OpenAI # our [model distillation](https://platform.openai.com/docs/guides/distillation) # or [evals](https://platform.openai.com/docs/guides/evals) products. # - # Supports text and image inputs. Note: image inputs over 10MB will be dropped. + # Supports text and image inputs. Note: image inputs over 8MB will be dropped. store: nil, # Options for streaming response. Only set this when you set `stream: true`. stream_options: nil, @@ -282,7 +281,6 @@ module OpenAI # focused and deterministic. We generally recommend altering this or `top_p` but # not both. temperature: nil, - text: nil, # Controls which (if any) tool is called by the model. `none` means the model will # not call any tool and instead generates a message. `auto` means the model can # pick between generating a message or calling one or more tools. `required` means @@ -406,7 +404,6 @@ module OpenAI stream_options: T.nilable(OpenAI::Chat::ChatCompletionStreamOptions::OrHash), temperature: T.nilable(Float), - text: OpenAI::Chat::CompletionCreateParams::Text::OrHash, tool_choice: T.any( OpenAI::Chat::ChatCompletionToolChoiceOption::Auto::OrSymbol, @@ -592,7 +589,7 @@ module OpenAI # our [model distillation](https://platform.openai.com/docs/guides/distillation) # or [evals](https://platform.openai.com/docs/guides/evals) products. # - # Supports text and image inputs. Note: image inputs over 10MB will be dropped. + # Supports text and image inputs. Note: image inputs over 8MB will be dropped. store: nil, # Options for streaming response. Only set this when you set `stream: true`. stream_options: nil, @@ -601,7 +598,6 @@ module OpenAI # focused and deterministic. We generally recommend altering this or `top_p` but # not both. temperature: nil, - text: nil, # Controls which (if any) tool is called by the model. `none` means the model will # not call any tool and instead generates a message. `auto` means the model can # pick between generating a message or calling one or more tools. `required` means diff --git a/rbi/openai/resources/responses.rbi b/rbi/openai/resources/responses.rbi index de0f3a49..0bab0865 100644 --- a/rbi/openai/resources/responses.rbi +++ b/rbi/openai/resources/responses.rbi @@ -201,6 +201,11 @@ module OpenAI # focused and deterministic. We generally recommend altering this or `top_p` but # not both. temperature: nil, + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) text: nil, # How the model should select which tool (or tools) to use when generating a # response. See the `tools` parameter to see how to specify which tools the model @@ -454,6 +459,11 @@ module OpenAI # focused and deterministic. We generally recommend altering this or `top_p` but # not both. temperature: nil, + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) text: nil, # How the model should select which tool (or tools) to use when generating a # response. See the `tools` parameter to see how to specify which tools the model diff --git a/sig/openai/models/chat/completion_create_params.rbs b/sig/openai/models/chat/completion_create_params.rbs index d6328830..e02095c4 100644 --- a/sig/openai/models/chat/completion_create_params.rbs +++ b/sig/openai/models/chat/completion_create_params.rbs @@ -29,7 +29,6 @@ module OpenAI store: bool?, stream_options: OpenAI::Chat::ChatCompletionStreamOptions?, temperature: Float?, - text: OpenAI::Chat::CompletionCreateParams::Text, tool_choice: OpenAI::Models::Chat::chat_completion_tool_choice_option, tools: ::Array[OpenAI::Models::Chat::chat_completion_tool], top_logprobs: Integer?, @@ -114,12 +113,6 @@ module OpenAI attr_accessor temperature: Float? - attr_reader text: OpenAI::Chat::CompletionCreateParams::Text? - - def text=: ( - OpenAI::Chat::CompletionCreateParams::Text - ) -> OpenAI::Chat::CompletionCreateParams::Text - attr_reader tool_choice: OpenAI::Models::Chat::chat_completion_tool_choice_option? def tool_choice=: ( @@ -175,7 +168,6 @@ module OpenAI ?store: bool?, ?stream_options: OpenAI::Chat::ChatCompletionStreamOptions?, ?temperature: Float?, - ?text: OpenAI::Chat::CompletionCreateParams::Text, ?tool_choice: OpenAI::Models::Chat::chat_completion_tool_choice_option, ?tools: ::Array[OpenAI::Models::Chat::chat_completion_tool], ?top_logprobs: Integer?, @@ -213,7 +205,6 @@ module OpenAI store: bool?, stream_options: OpenAI::Chat::ChatCompletionStreamOptions?, temperature: Float?, - text: OpenAI::Chat::CompletionCreateParams::Text, tool_choice: OpenAI::Models::Chat::chat_completion_tool_choice_option, tools: ::Array[OpenAI::Models::Chat::chat_completion_tool], top_logprobs: Integer?, @@ -332,35 +323,6 @@ module OpenAI StringArray: OpenAI::Internal::Type::Converter end - type text = - { - verbosity: OpenAI::Models::Chat::CompletionCreateParams::Text::verbosity? - } - - class Text < OpenAI::Internal::Type::BaseModel - attr_accessor verbosity: OpenAI::Models::Chat::CompletionCreateParams::Text::verbosity? - - def initialize: ( - ?verbosity: OpenAI::Models::Chat::CompletionCreateParams::Text::verbosity? - ) -> void - - def to_hash: -> { - verbosity: OpenAI::Models::Chat::CompletionCreateParams::Text::verbosity? - } - - type verbosity = :low | :medium | :high - - module Verbosity - extend OpenAI::Internal::Type::Enum - - LOW: :low - MEDIUM: :medium - HIGH: :high - - def self?.values: -> ::Array[OpenAI::Models::Chat::CompletionCreateParams::Text::verbosity] - end - end - type verbosity = :low | :medium | :high module Verbosity diff --git a/sig/openai/models/graders/text_similarity_grader.rbs b/sig/openai/models/graders/text_similarity_grader.rbs index 9002b540..e1da4b35 100644 --- a/sig/openai/models/graders/text_similarity_grader.rbs +++ b/sig/openai/models/graders/text_similarity_grader.rbs @@ -40,7 +40,8 @@ module OpenAI } type evaluation_metric = - :fuzzy_match + :cosine + | :fuzzy_match | :bleu | :gleu | :meteor @@ -54,6 +55,7 @@ module OpenAI module EvaluationMetric extend OpenAI::Internal::Type::Enum + COSINE: :cosine FUZZY_MATCH: :fuzzy_match BLEU: :bleu GLEU: :gleu diff --git a/sig/openai/models/responses/response.rbs b/sig/openai/models/responses/response.rbs index 34a35148..6bf78a3c 100644 --- a/sig/openai/models/responses/response.rbs +++ b/sig/openai/models/responses/response.rbs @@ -27,7 +27,7 @@ module OpenAI safety_identifier: String, service_tier: OpenAI::Models::Responses::Response::service_tier?, status: OpenAI::Models::Responses::response_status, - text: OpenAI::Responses::Response::Text, + text: OpenAI::Responses::ResponseTextConfig, top_logprobs: Integer?, truncation: OpenAI::Models::Responses::Response::truncation?, usage: OpenAI::Responses::ResponseUsage, @@ -91,11 +91,11 @@ module OpenAI OpenAI::Models::Responses::response_status ) -> OpenAI::Models::Responses::response_status - attr_reader text: OpenAI::Responses::Response::Text? + attr_reader text: OpenAI::Responses::ResponseTextConfig? def text=: ( - OpenAI::Responses::Response::Text - ) -> OpenAI::Responses::Response::Text + OpenAI::Responses::ResponseTextConfig + ) -> OpenAI::Responses::ResponseTextConfig attr_accessor top_logprobs: Integer? @@ -135,7 +135,7 @@ module OpenAI ?safety_identifier: String, ?service_tier: OpenAI::Models::Responses::Response::service_tier?, ?status: OpenAI::Models::Responses::response_status, - ?text: OpenAI::Responses::Response::Text, + ?text: OpenAI::Responses::ResponseTextConfig, ?top_logprobs: Integer?, ?truncation: OpenAI::Models::Responses::Response::truncation?, ?usage: OpenAI::Responses::ResponseUsage, @@ -168,7 +168,7 @@ module OpenAI safety_identifier: String, service_tier: OpenAI::Models::Responses::Response::service_tier?, status: OpenAI::Models::Responses::response_status, - text: OpenAI::Responses::Response::Text, + text: OpenAI::Responses::ResponseTextConfig, top_logprobs: Integer?, truncation: OpenAI::Models::Responses::Response::truncation?, usage: OpenAI::Responses::ResponseUsage, @@ -246,44 +246,6 @@ module OpenAI def self?.values: -> ::Array[OpenAI::Models::Responses::Response::service_tier] end - type text = - { - format_: OpenAI::Models::Responses::response_format_text_config, - verbosity: OpenAI::Models::Responses::Response::Text::verbosity? - } - - class Text < OpenAI::Internal::Type::BaseModel - attr_reader format_: OpenAI::Models::Responses::response_format_text_config? - - def format_=: ( - OpenAI::Models::Responses::response_format_text_config - ) -> OpenAI::Models::Responses::response_format_text_config - - attr_accessor verbosity: OpenAI::Models::Responses::Response::Text::verbosity? - - def initialize: ( - ?format_: OpenAI::Models::Responses::response_format_text_config, - ?verbosity: OpenAI::Models::Responses::Response::Text::verbosity? - ) -> void - - def to_hash: -> { - format_: OpenAI::Models::Responses::response_format_text_config, - verbosity: OpenAI::Models::Responses::Response::Text::verbosity? - } - - type verbosity = :low | :medium | :high - - module Verbosity - extend OpenAI::Internal::Type::Enum - - LOW: :low - MEDIUM: :medium - HIGH: :high - - def self?.values: -> ::Array[OpenAI::Models::Responses::Response::Text::verbosity] - end - end - type truncation = :auto | :disabled module Truncation diff --git a/sig/openai/models/responses/response_create_params.rbs b/sig/openai/models/responses/response_create_params.rbs index 9abee74d..83f641b4 100644 --- a/sig/openai/models/responses/response_create_params.rbs +++ b/sig/openai/models/responses/response_create_params.rbs @@ -21,7 +21,7 @@ module OpenAI store: bool?, stream_options: OpenAI::Responses::ResponseCreateParams::StreamOptions?, temperature: Float?, - text: OpenAI::Responses::ResponseCreateParams::Text, + text: OpenAI::Responses::ResponseTextConfig, tool_choice: OpenAI::Models::Responses::ResponseCreateParams::tool_choice, tools: ::Array[OpenAI::Models::Responses::tool], top_logprobs: Integer?, @@ -83,11 +83,11 @@ module OpenAI attr_accessor temperature: Float? - attr_reader text: OpenAI::Responses::ResponseCreateParams::Text? + attr_reader text: OpenAI::Responses::ResponseTextConfig? def text=: ( - OpenAI::Responses::ResponseCreateParams::Text - ) -> OpenAI::Responses::ResponseCreateParams::Text + OpenAI::Responses::ResponseTextConfig + ) -> OpenAI::Responses::ResponseTextConfig attr_reader tool_choice: OpenAI::Models::Responses::ResponseCreateParams::tool_choice? @@ -130,7 +130,7 @@ module OpenAI ?store: bool?, ?stream_options: OpenAI::Responses::ResponseCreateParams::StreamOptions?, ?temperature: Float?, - ?text: OpenAI::Responses::ResponseCreateParams::Text, + ?text: OpenAI::Responses::ResponseTextConfig, ?tool_choice: OpenAI::Models::Responses::ResponseCreateParams::tool_choice, ?tools: ::Array[OpenAI::Models::Responses::tool], ?top_logprobs: Integer?, @@ -159,7 +159,7 @@ module OpenAI store: bool?, stream_options: OpenAI::Responses::ResponseCreateParams::StreamOptions?, temperature: Float?, - text: OpenAI::Responses::ResponseCreateParams::Text, + text: OpenAI::Responses::ResponseTextConfig, tool_choice: OpenAI::Models::Responses::ResponseCreateParams::tool_choice, tools: ::Array[OpenAI::Models::Responses::tool], top_logprobs: Integer?, @@ -203,44 +203,6 @@ module OpenAI def to_hash: -> { include_obfuscation: bool } end - type text = - { - format_: OpenAI::Models::Responses::response_format_text_config, - verbosity: OpenAI::Models::Responses::ResponseCreateParams::Text::verbosity? - } - - class Text < OpenAI::Internal::Type::BaseModel - attr_reader format_: OpenAI::Models::Responses::response_format_text_config? - - def format_=: ( - OpenAI::Models::Responses::response_format_text_config - ) -> OpenAI::Models::Responses::response_format_text_config - - attr_accessor verbosity: OpenAI::Models::Responses::ResponseCreateParams::Text::verbosity? - - def initialize: ( - ?format_: OpenAI::Models::Responses::response_format_text_config, - ?verbosity: OpenAI::Models::Responses::ResponseCreateParams::Text::verbosity? - ) -> void - - def to_hash: -> { - format_: OpenAI::Models::Responses::response_format_text_config, - verbosity: OpenAI::Models::Responses::ResponseCreateParams::Text::verbosity? - } - - type verbosity = :low | :medium | :high - - module Verbosity - extend OpenAI::Internal::Type::Enum - - LOW: :low - MEDIUM: :medium - HIGH: :high - - def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseCreateParams::Text::verbosity] - end - end - type tool_choice = OpenAI::Models::Responses::tool_choice_options | OpenAI::Responses::ToolChoiceAllowed diff --git a/sig/openai/resources/chat/completions.rbs b/sig/openai/resources/chat/completions.rbs index 0634d0eb..a4237ff1 100644 --- a/sig/openai/resources/chat/completions.rbs +++ b/sig/openai/resources/chat/completions.rbs @@ -31,7 +31,6 @@ module OpenAI ?store: bool?, ?stream_options: OpenAI::Chat::ChatCompletionStreamOptions?, ?temperature: Float?, - ?text: OpenAI::Chat::CompletionCreateParams::Text, ?tool_choice: OpenAI::Models::Chat::chat_completion_tool_choice_option, ?tools: ::Array[OpenAI::Models::Chat::chat_completion_tool], ?top_logprobs: Integer?, @@ -69,7 +68,6 @@ module OpenAI ?store: bool?, ?stream_options: OpenAI::Chat::ChatCompletionStreamOptions?, ?temperature: Float?, - ?text: OpenAI::Chat::CompletionCreateParams::Text, ?tool_choice: OpenAI::Models::Chat::chat_completion_tool_choice_option, ?tools: ::Array[OpenAI::Models::Chat::chat_completion_tool], ?top_logprobs: Integer?, diff --git a/sig/openai/resources/responses.rbs b/sig/openai/resources/responses.rbs index 235cf7b8..502b7174 100644 --- a/sig/openai/resources/responses.rbs +++ b/sig/openai/resources/responses.rbs @@ -22,7 +22,7 @@ module OpenAI ?store: bool?, ?stream_options: OpenAI::Responses::ResponseCreateParams::StreamOptions?, ?temperature: Float?, - ?text: OpenAI::Responses::ResponseCreateParams::Text, + ?text: OpenAI::Responses::ResponseTextConfig, ?tool_choice: OpenAI::Models::Responses::ResponseCreateParams::tool_choice, ?tools: ::Array[OpenAI::Models::Responses::tool], ?top_logprobs: Integer?, @@ -51,7 +51,7 @@ module OpenAI ?store: bool?, ?stream_options: OpenAI::Responses::ResponseCreateParams::StreamOptions?, ?temperature: Float?, - ?text: OpenAI::Responses::ResponseCreateParams::Text, + ?text: OpenAI::Responses::ResponseTextConfig, ?tool_choice: OpenAI::Models::Responses::ResponseCreateParams::tool_choice, ?tools: ::Array[OpenAI::Models::Responses::tool], ?top_logprobs: Integer?, diff --git a/test/openai/resources/responses_test.rb b/test/openai/resources/responses_test.rb index cf4ce20f..28d1d7e4 100644 --- a/test/openai/resources/responses_test.rb +++ b/test/openai/resources/responses_test.rb @@ -36,7 +36,7 @@ def test_create safety_identifier: String | nil, service_tier: OpenAI::Responses::Response::ServiceTier | nil, status: OpenAI::Responses::ResponseStatus | nil, - text: OpenAI::Responses::Response::Text | nil, + text: OpenAI::Responses::ResponseTextConfig | nil, top_logprobs: Integer | nil, truncation: OpenAI::Responses::Response::Truncation | nil, usage: OpenAI::Responses::ResponseUsage | nil, @@ -78,7 +78,7 @@ def test_retrieve safety_identifier: String | nil, service_tier: OpenAI::Responses::Response::ServiceTier | nil, status: OpenAI::Responses::ResponseStatus | nil, - text: OpenAI::Responses::Response::Text | nil, + text: OpenAI::Responses::ResponseTextConfig | nil, top_logprobs: Integer | nil, truncation: OpenAI::Responses::Response::Truncation | nil, usage: OpenAI::Responses::ResponseUsage | nil, @@ -128,7 +128,7 @@ def test_cancel safety_identifier: String | nil, service_tier: OpenAI::Responses::Response::ServiceTier | nil, status: OpenAI::Responses::ResponseStatus | nil, - text: OpenAI::Responses::Response::Text | nil, + text: OpenAI::Responses::ResponseTextConfig | nil, top_logprobs: Integer | nil, truncation: OpenAI::Responses::Response::Truncation | nil, usage: OpenAI::Responses::ResponseUsage | nil,