diff --git a/.solargraph.yml b/.solargraph.yml index 4f571833..18a89fcb 100644 --- a/.solargraph.yml +++ b/.solargraph.yml @@ -5,6 +5,7 @@ include: - 'Rakefile' - 'examples/**/*.rb' - 'lib/**/*.rb' + - 'test/openai/resource_namespaces.rb' - 'test/openai/test_helper.rb' exclude: - 'rbi/**/*' diff --git a/lib/openai/internal/transport/base_client.rb b/lib/openai/internal/transport/base_client.rb index a46be452..b2e526d5 100644 --- a/lib/openai/internal/transport/base_client.rb +++ b/lib/openai/internal/transport/base_client.rb @@ -393,7 +393,7 @@ def initialize( end # Execute the request specified by `req`. This is the method that all resource - # methods call into. + # methods call into. # # @overload request(method, path, query: {}, headers: {}, body: nil, unwrap: nil, page: nil, stream: nil, model: OpenAI::Internal::Type::Unknown, options: {}) # diff --git a/lib/openai/internal/transport/pooled_net_requester.rb b/lib/openai/internal/transport/pooled_net_requester.rb index 74f76024..3ef69e08 100644 --- a/lib/openai/internal/transport/pooled_net_requester.rb +++ b/lib/openai/internal/transport/pooled_net_requester.rb @@ -6,7 +6,7 @@ module Transport # @api private class PooledNetRequester # from the golang stdlib - # https://github.com/golang/go/blob/c8eced8580028328fde7c03cbfcb720ce15b2358/src/net/http/transport.go#L49 + # https://github.com/golang/go/blob/c8eced8580028328fde7c03cbfcb720ce15b2358/src/net/http/transport.go#L49 KEEP_ALIVE_TIMEOUT = 30 class << self diff --git a/lib/openai/internal/type/base_model.rb b/lib/openai/internal/type/base_model.rb index 1f4f669f..026af072 100644 --- a/lib/openai/internal/type/base_model.rb +++ b/lib/openai/internal/type/base_model.rb @@ -19,7 +19,7 @@ class << self # @api private # # Assumes superclass fields are totally defined before fields are accessed / - # defined on subclasses. + # defined on subclasses. # # @return [Hash{Symbol=>Hash{Symbol=>Object}}] def known_fields @@ -143,7 +143,7 @@ def optional(name_sym, type_info, spec = {}) # @api private # # `request_only` attributes not excluded from `.#coerce` when receiving responses - # even if well behaved servers should not send them + # even if well behaved servers should not send them # # @param blk [Proc] private def request_only(&blk) @@ -291,11 +291,11 @@ def dump(value) end # Returns the raw value associated with the given key, if found. Otherwise, nil is - # returned. + # returned. # - # It is valid to lookup keys that are not in the API spec, for example to access - # undocumented features. This method does not parse response data into - # higher-level types. Lookup by anything other than a Symbol is an ArgumentError. + # It is valid to lookup keys that are not in the API spec, for example to access + # undocumented features. This method does not parse response data into + # higher-level types. Lookup by anything other than a Symbol is an ArgumentError. # # @param key [Symbol] # @@ -310,12 +310,12 @@ def [](key) # Returns a Hash of the data underlying this object. O(1) # - # Keys are Symbols and values are the raw values from the response. The return - # value indicates which values were ever set on the object. i.e. there will be a - # key in this hash if they ever were, even if the set value was nil. + # Keys are Symbols and values are the raw values from the response. The return + # value indicates which values were ever set on the object. i.e. there will be a + # key in this hash if they ever were, even if the set value was nil. # - # This method is not recursive. The returned value is shared by the object, so it - # should not be mutated. + # This method is not recursive. The returned value is shared by the object, so it + # should not be mutated. # # @return [Hash{Symbol=>Object}] def to_h = @data diff --git a/lib/openai/internal/type/converter.rb b/lib/openai/internal/type/converter.rb index 1fbf548f..ef77d9eb 100644 --- a/lib/openai/internal/type/converter.rb +++ b/lib/openai/internal/type/converter.rb @@ -75,37 +75,37 @@ def type_info(spec) # # Based on `target`, transform `value` into `target`, to the extent possible: # - # 1. if the given `value` conforms to `target` already, return the given `value` - # 2. if it's possible and safe to convert the given `value` to `target`, then the - # converted value - # 3. otherwise, the given `value` unaltered + # 1. if the given `value` conforms to `target` already, return the given `value` + # 2. if it's possible and safe to convert the given `value` to `target`, then the + # converted value + # 3. otherwise, the given `value` unaltered # - # The coercion process is subject to improvement between minor release versions. - # See https://docs.pydantic.dev/latest/concepts/unions/#smart-mode + # The coercion process is subject to improvement between minor release versions. + # See https://docs.pydantic.dev/latest/concepts/unions/#smart-mode # # @param target [OpenAI::Internal::Type::Converter, Class] # # @param value [Object] # # @param state [Hash{Symbol=>Object}] The `strictness` is one of `true`, `false`, or `:strong`. This informs the - # coercion strategy when we have to decide between multiple possible conversion - # targets: + # coercion strategy when we have to decide between multiple possible conversion + # targets: # - # - `true`: the conversion must be exact, with minimum coercion. - # - `false`: the conversion can be approximate, with some coercion. - # - `:strong`: the conversion must be exact, with no coercion, and raise an error - # if not possible. + # - `true`: the conversion must be exact, with minimum coercion. + # - `false`: the conversion can be approximate, with some coercion. + # - `:strong`: the conversion must be exact, with no coercion, and raise an error + # if not possible. # - # The `exactness` is `Hash` with keys being one of `yes`, `no`, or `maybe`. For - # any given conversion attempt, the exactness will be updated based on how closely - # the value recursively matches the target type: + # The `exactness` is `Hash` with keys being one of `yes`, `no`, or `maybe`. For + # any given conversion attempt, the exactness will be updated based on how closely + # the value recursively matches the target type: # - # - `yes`: the value can be converted to the target type with minimum coercion. - # - `maybe`: the value can be converted to the target type with some reasonable - # coercion. - # - `no`: the value cannot be converted to the target type. + # - `yes`: the value can be converted to the target type with minimum coercion. + # - `maybe`: the value can be converted to the target type with some reasonable + # coercion. + # - `no`: the value cannot be converted to the target type. # - # See implementation below for more details. + # See implementation below for more details. # # @option state [Boolean, :strong] :strictness # diff --git a/lib/openai/internal/type/enum.rb b/lib/openai/internal/type/enum.rb index 5ba8860c..2a005d61 100644 --- a/lib/openai/internal/type/enum.rb +++ b/lib/openai/internal/type/enum.rb @@ -6,15 +6,15 @@ module Type # @api private # # A value from among a specified list of options. OpenAPI enum values map to Ruby - # values in the SDK as follows: + # values in the SDK as follows: # - # 1. boolean => true | false - # 2. integer => Integer - # 3. float => Float - # 4. string => Symbol + # 1. boolean => true | false + # 2. integer => Integer + # 3. float => Float + # 4. string => Symbol # - # We can therefore convert string values to Symbols, but can't convert other - # values safely. + # We can therefore convert string values to Symbols, but can't convert other + # values safely. # # @example # # `chat_model` is a `OpenAI::Models::ChatModel` @@ -70,7 +70,7 @@ def ==(other) # @api private # # Unlike with primitives, `Enum` additionally validates that the value is a member - # of the enum. + # of the enum. # # @param value [String, Symbol, Object] # diff --git a/lib/openai/internal/util.rb b/lib/openai/internal/util.rb index 7fa9d32d..fd5e4b81 100644 --- a/lib/openai/internal/util.rb +++ b/lib/openai/internal/util.rb @@ -152,7 +152,7 @@ class << self # @api private # # Recursively merge one hash with another. If the values at a given key are not - # both hashes, just take the new value. + # both hashes, just take the new value. # # @param values [Array] # diff --git a/lib/openai/models/audio/speech_create_params.rb b/lib/openai/models/audio/speech_create_params.rb index 94b31a52..ba364d38 100644 --- a/lib/openai/models/audio/speech_create_params.rb +++ b/lib/openai/models/audio/speech_create_params.rb @@ -17,23 +17,23 @@ class SpeechCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute model # One of the available [TTS models](https://platform.openai.com/docs/models#tts): - # `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`. + # `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`. # # @return [String, Symbol, OpenAI::Models::Audio::SpeechModel] required :model, union: -> { OpenAI::Models::Audio::SpeechCreateParams::Model } # @!attribute voice # The voice to use when generating the audio. Supported voices are `alloy`, `ash`, - # `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and - # `verse`. Previews of the voices are available in the - # [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). + # `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and + # `verse`. Previews of the voices are available in the + # [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). # # @return [String, Symbol, OpenAI::Models::Audio::SpeechCreateParams::Voice] required :voice, union: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice } # @!attribute [r] instructions # Control the voice of your generated audio with additional instructions. Does not - # work with `tts-1` or `tts-1-hd`. + # work with `tts-1` or `tts-1-hd`. # # @return [String, nil] optional :instructions, String @@ -44,7 +44,7 @@ class SpeechCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] response_format # The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, - # `wav`, and `pcm`. + # `wav`, and `pcm`. # # @return [Symbol, OpenAI::Models::Audio::SpeechCreateParams::ResponseFormat, nil] optional :response_format, enum: -> { OpenAI::Models::Audio::SpeechCreateParams::ResponseFormat } @@ -55,7 +55,7 @@ class SpeechCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] speed # The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is - # the default. + # the default. # # @return [Float, nil] optional :speed, Float @@ -78,7 +78,7 @@ class SpeechCreateParams < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # One of the available [TTS models](https://platform.openai.com/docs/models#tts): - # `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`. + # `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`. module Model extend OpenAI::Internal::Type::Union @@ -93,9 +93,9 @@ module Model end # The voice to use when generating the audio. Supported voices are `alloy`, `ash`, - # `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and - # `verse`. Previews of the voices are available in the - # [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). + # `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and + # `verse`. Previews of the voices are available in the + # [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). module Voice extend OpenAI::Internal::Type::Union @@ -145,7 +145,7 @@ module Voice end # The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, - # `wav`, and `pcm`. + # `wav`, and `pcm`. module ResponseFormat extend OpenAI::Internal::Type::Enum diff --git a/lib/openai/models/audio/transcription.rb b/lib/openai/models/audio/transcription.rb index abe567ce..ead34e3d 100644 --- a/lib/openai/models/audio/transcription.rb +++ b/lib/openai/models/audio/transcription.rb @@ -12,8 +12,8 @@ class Transcription < OpenAI::Internal::Type::BaseModel # @!attribute [r] logprobs # The log probabilities of the tokens in the transcription. Only returned with the - # models `gpt-4o-transcribe` and `gpt-4o-mini-transcribe` if `logprobs` is added - # to the `include` array. + # models `gpt-4o-transcribe` and `gpt-4o-mini-transcribe` if `logprobs` is added + # to the `include` array. # # @return [Array, nil] optional :logprobs, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Audio::Transcription::Logprob] } @@ -24,7 +24,7 @@ class Transcription < OpenAI::Internal::Type::BaseModel # @!parse # # Represents a transcription response returned by model, based on the provided - # # input. + # # input. # # # # @param text [String] # # @param logprobs [Array] diff --git a/lib/openai/models/audio/transcription_create_params.rb b/lib/openai/models/audio/transcription_create_params.rb index 0cd4367b..2dc55d74 100644 --- a/lib/openai/models/audio/transcription_create_params.rb +++ b/lib/openai/models/audio/transcription_create_params.rb @@ -13,25 +13,25 @@ class TranscriptionCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute file # The audio file object (not file name) to transcribe, in one of these formats: - # flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + # flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. # # @return [IO, StringIO] required :file, IO # @!attribute model # ID of the model to use. The options are `gpt-4o-transcribe`, - # `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source - # Whisper V2 model). + # `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source + # Whisper V2 model). # # @return [String, Symbol, OpenAI::Models::AudioModel] required :model, union: -> { OpenAI::Models::Audio::TranscriptionCreateParams::Model } # @!attribute [r] include # Additional information to include in the transcription response. `logprobs` will - # return the log probabilities of the tokens in the response to understand the - # model's confidence in the transcription. `logprobs` only works with - # response_format set to `json` and only with the models `gpt-4o-transcribe` and - # `gpt-4o-mini-transcribe`. + # return the log probabilities of the tokens in the response to understand the + # model's confidence in the transcription. `logprobs` only works with + # response_format set to `json` and only with the models `gpt-4o-transcribe` and + # `gpt-4o-mini-transcribe`. # # @return [Array, nil] optional :include, @@ -43,8 +43,8 @@ class TranscriptionCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] language # The language of the input audio. Supplying the input language in - # [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) - # format will improve accuracy and latency. + # [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + # format will improve accuracy and latency. # # @return [String, nil] optional :language, String @@ -55,9 +55,9 @@ class TranscriptionCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] prompt # An optional text to guide the model's style or continue a previous audio - # segment. The - # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) - # should match the audio language. + # segment. The + # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + # should match the audio language. # # @return [String, nil] optional :prompt, String @@ -68,8 +68,8 @@ class TranscriptionCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] response_format # The format of the output, in one of these options: `json`, `text`, `srt`, - # `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, - # the only supported format is `json`. + # `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, + # the only supported format is `json`. # # @return [Symbol, OpenAI::Models::AudioResponseFormat, nil] optional :response_format, enum: -> { OpenAI::Models::AudioResponseFormat } @@ -80,10 +80,10 @@ class TranscriptionCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] temperature # The sampling temperature, between 0 and 1. Higher values like 0.8 will make the - # output more random, while lower values like 0.2 will make it more focused and - # deterministic. If set to 0, the model will use - # [log probability](https://en.wikipedia.org/wiki/Log_probability) to - # automatically increase the temperature until certain thresholds are hit. + # output more random, while lower values like 0.2 will make it more focused and + # deterministic. If set to 0, the model will use + # [log probability](https://en.wikipedia.org/wiki/Log_probability) to + # automatically increase the temperature until certain thresholds are hit. # # @return [Float, nil] optional :temperature, Float @@ -94,10 +94,10 @@ class TranscriptionCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] timestamp_granularities # The timestamp granularities to populate for this transcription. - # `response_format` must be set `verbose_json` to use timestamp granularities. - # Either or both of these options are supported: `word`, or `segment`. Note: There - # is no additional latency for segment timestamps, but generating word timestamps - # incurs additional latency. + # `response_format` must be set `verbose_json` to use timestamp granularities. + # Either or both of these options are supported: `word`, or `segment`. Note: There + # is no additional latency for segment timestamps, but generating word timestamps + # incurs additional latency. # # @return [Array, nil] optional :timestamp_granularities, @@ -136,8 +136,8 @@ class TranscriptionCreateParams < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # ID of the model to use. The options are `gpt-4o-transcribe`, - # `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source - # Whisper V2 model). + # `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source + # Whisper V2 model). module Model extend OpenAI::Internal::Type::Union diff --git a/lib/openai/models/audio/transcription_create_response.rb b/lib/openai/models/audio/transcription_create_response.rb index 75cfc88b..69d68b0b 100644 --- a/lib/openai/models/audio/transcription_create_response.rb +++ b/lib/openai/models/audio/transcription_create_response.rb @@ -4,7 +4,7 @@ module OpenAI module Models module Audio # Represents a transcription response returned by model, based on the provided - # input. + # input. # # @see OpenAI::Resources::Audio::Transcriptions#create # diff --git a/lib/openai/models/audio/transcription_segment.rb b/lib/openai/models/audio/transcription_segment.rb index 02990b46..852a77d1 100644 --- a/lib/openai/models/audio/transcription_segment.rb +++ b/lib/openai/models/audio/transcription_segment.rb @@ -12,14 +12,14 @@ class TranscriptionSegment < OpenAI::Internal::Type::BaseModel # @!attribute avg_logprob # Average logprob of the segment. If the value is lower than -1, consider the - # logprobs failed. + # logprobs failed. # # @return [Float] required :avg_logprob, Float # @!attribute compression_ratio # Compression ratio of the segment. If the value is greater than 2.4, consider the - # compression failed. + # compression failed. # # @return [Float] required :compression_ratio, Float @@ -32,7 +32,7 @@ class TranscriptionSegment < OpenAI::Internal::Type::BaseModel # @!attribute no_speech_prob # Probability of no speech in the segment. If the value is higher than 1.0 and the - # `avg_logprob` is below -1, consider this segment silent. + # `avg_logprob` is below -1, consider this segment silent. # # @return [Float] required :no_speech_prob, Float diff --git a/lib/openai/models/audio/transcription_stream_event.rb b/lib/openai/models/audio/transcription_stream_event.rb index 171cfd23..de3b63d5 100644 --- a/lib/openai/models/audio/transcription_stream_event.rb +++ b/lib/openai/models/audio/transcription_stream_event.rb @@ -4,9 +4,9 @@ module OpenAI module Models module Audio # Emitted when there is an additional text delta. This is also the first event - # emitted when the transcription starts. Only emitted when you - # [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) - # with the `Stream` parameter set to `true`. + # emitted when the transcription starts. Only emitted when you + # [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + # with the `Stream` parameter set to `true`. module TranscriptionStreamEvent extend OpenAI::Internal::Type::Union diff --git a/lib/openai/models/audio/transcription_text_delta_event.rb b/lib/openai/models/audio/transcription_text_delta_event.rb index 4a858738..731bf107 100644 --- a/lib/openai/models/audio/transcription_text_delta_event.rb +++ b/lib/openai/models/audio/transcription_text_delta_event.rb @@ -18,8 +18,8 @@ class TranscriptionTextDeltaEvent < OpenAI::Internal::Type::BaseModel # @!attribute [r] logprobs # The log probabilities of the delta. Only included if you - # [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) - # with the `include[]` parameter set to `logprobs`. + # [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + # with the `include[]` parameter set to `logprobs`. # # @return [Array, nil] optional :logprobs, @@ -31,9 +31,9 @@ class TranscriptionTextDeltaEvent < OpenAI::Internal::Type::BaseModel # @!parse # # Emitted when there is an additional text delta. This is also the first event - # # emitted when the transcription starts. Only emitted when you - # # [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) - # # with the `Stream` parameter set to `true`. + # # emitted when the transcription starts. Only emitted when you + # # [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + # # with the `Stream` parameter set to `true`. # # # # @param delta [String] # # @param logprobs [Array] diff --git a/lib/openai/models/audio/transcription_text_done_event.rb b/lib/openai/models/audio/transcription_text_done_event.rb index 9795995a..be1ee0fe 100644 --- a/lib/openai/models/audio/transcription_text_done_event.rb +++ b/lib/openai/models/audio/transcription_text_done_event.rb @@ -18,9 +18,9 @@ class TranscriptionTextDoneEvent < OpenAI::Internal::Type::BaseModel # @!attribute [r] logprobs # The log probabilities of the individual tokens in the transcription. Only - # included if you - # [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) - # with the `include[]` parameter set to `logprobs`. + # included if you + # [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + # with the `include[]` parameter set to `logprobs`. # # @return [Array, nil] optional :logprobs, @@ -32,9 +32,9 @@ class TranscriptionTextDoneEvent < OpenAI::Internal::Type::BaseModel # @!parse # # Emitted when the transcription is complete. Contains the complete transcription - # # text. Only emitted when you - # # [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) - # # with the `Stream` parameter set to `true`. + # # text. Only emitted when you + # # [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + # # with the `Stream` parameter set to `true`. # # # # @param text [String] # # @param logprobs [Array] diff --git a/lib/openai/models/audio/transcription_verbose.rb b/lib/openai/models/audio/transcription_verbose.rb index 1dfb5931..80068f85 100644 --- a/lib/openai/models/audio/transcription_verbose.rb +++ b/lib/openai/models/audio/transcription_verbose.rb @@ -44,7 +44,7 @@ class TranscriptionVerbose < OpenAI::Internal::Type::BaseModel # @!parse # # Represents a verbose json transcription response returned by model, based on the - # # provided input. + # # provided input. # # # # @param duration [Float] # # @param language [String] diff --git a/lib/openai/models/audio/translation_create_params.rb b/lib/openai/models/audio/translation_create_params.rb index 93833eb2..82b0af2f 100644 --- a/lib/openai/models/audio/translation_create_params.rb +++ b/lib/openai/models/audio/translation_create_params.rb @@ -11,23 +11,23 @@ class TranslationCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute file # The audio file object (not file name) translate, in one of these formats: flac, - # mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + # mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. # # @return [IO, StringIO] required :file, IO # @!attribute model # ID of the model to use. Only `whisper-1` (which is powered by our open source - # Whisper V2 model) is currently available. + # Whisper V2 model) is currently available. # # @return [String, Symbol, OpenAI::Models::AudioModel] required :model, union: -> { OpenAI::Models::Audio::TranslationCreateParams::Model } # @!attribute [r] prompt # An optional text to guide the model's style or continue a previous audio - # segment. The - # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) - # should be in English. + # segment. The + # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + # should be in English. # # @return [String, nil] optional :prompt, String @@ -38,7 +38,7 @@ class TranslationCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] response_format # The format of the output, in one of these options: `json`, `text`, `srt`, - # `verbose_json`, or `vtt`. + # `verbose_json`, or `vtt`. # # @return [Symbol, OpenAI::Models::Audio::TranslationCreateParams::ResponseFormat, nil] optional :response_format, enum: -> { OpenAI::Models::Audio::TranslationCreateParams::ResponseFormat } @@ -49,10 +49,10 @@ class TranslationCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] temperature # The sampling temperature, between 0 and 1. Higher values like 0.8 will make the - # output more random, while lower values like 0.2 will make it more focused and - # deterministic. If set to 0, the model will use - # [log probability](https://en.wikipedia.org/wiki/Log_probability) to - # automatically increase the temperature until certain thresholds are hit. + # output more random, while lower values like 0.2 will make it more focused and + # deterministic. If set to 0, the model will use + # [log probability](https://en.wikipedia.org/wiki/Log_probability) to + # automatically increase the temperature until certain thresholds are hit. # # @return [Float, nil] optional :temperature, Float @@ -74,7 +74,7 @@ class TranslationCreateParams < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # ID of the model to use. Only `whisper-1` (which is powered by our open source - # Whisper V2 model) is currently available. + # Whisper V2 model) is currently available. module Model extend OpenAI::Internal::Type::Union @@ -89,7 +89,7 @@ module Model end # The format of the output, in one of these options: `json`, `text`, `srt`, - # `verbose_json`, or `vtt`. + # `verbose_json`, or `vtt`. module ResponseFormat extend OpenAI::Internal::Type::Enum diff --git a/lib/openai/models/audio_response_format.rb b/lib/openai/models/audio_response_format.rb index 0904c657..2babfc83 100644 --- a/lib/openai/models/audio_response_format.rb +++ b/lib/openai/models/audio_response_format.rb @@ -3,8 +3,8 @@ module OpenAI module Models # The format of the output, in one of these options: `json`, `text`, `srt`, - # `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, - # the only supported format is `json`. + # `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, + # the only supported format is `json`. module AudioResponseFormat extend OpenAI::Internal::Type::Enum diff --git a/lib/openai/models/auto_file_chunking_strategy_param.rb b/lib/openai/models/auto_file_chunking_strategy_param.rb index 15d03f11..a33c9dcc 100644 --- a/lib/openai/models/auto_file_chunking_strategy_param.rb +++ b/lib/openai/models/auto_file_chunking_strategy_param.rb @@ -11,7 +11,7 @@ class AutoFileChunkingStrategyParam < OpenAI::Internal::Type::BaseModel # @!parse # # The default strategy. This strategy currently uses a `max_chunk_size_tokens` of - # # `800` and `chunk_overlap_tokens` of `400`. + # # `800` and `chunk_overlap_tokens` of `400`. # # # # @param type [Symbol, :auto] # # diff --git a/lib/openai/models/batch.rb b/lib/openai/models/batch.rb index be9ede39..a490b124 100644 --- a/lib/openai/models/batch.rb +++ b/lib/openai/models/batch.rb @@ -146,11 +146,11 @@ class Batch < OpenAI::Internal::Type::BaseModel # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true diff --git a/lib/openai/models/batch_create_params.rb b/lib/openai/models/batch_create_params.rb index 1549f66e..95548d81 100644 --- a/lib/openai/models/batch_create_params.rb +++ b/lib/openai/models/batch_create_params.rb @@ -10,16 +10,16 @@ class BatchCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute completion_window # The time frame within which the batch should be processed. Currently only `24h` - # is supported. + # is supported. # # @return [Symbol, OpenAI::Models::BatchCreateParams::CompletionWindow] required :completion_window, enum: -> { OpenAI::Models::BatchCreateParams::CompletionWindow } # @!attribute endpoint # The endpoint to be used for all requests in the batch. Currently - # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` - # are supported. Note that `/v1/embeddings` batches are also restricted to a - # maximum of 50,000 embedding inputs across all requests in the batch. + # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` + # are supported. Note that `/v1/embeddings` batches are also restricted to a + # maximum of 50,000 embedding inputs across all requests in the batch. # # @return [Symbol, OpenAI::Models::BatchCreateParams::Endpoint] required :endpoint, enum: -> { OpenAI::Models::BatchCreateParams::Endpoint } @@ -27,24 +27,24 @@ class BatchCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute input_file_id # The ID of an uploaded file that contains requests for the new batch. # - # See [upload file](https://platform.openai.com/docs/api-reference/files/create) - # for how to upload a file. + # See [upload file](https://platform.openai.com/docs/api-reference/files/create) + # for how to upload a file. # - # Your input file must be formatted as a - # [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), - # and must be uploaded with the purpose `batch`. The file can contain up to 50,000 - # requests, and can be up to 200 MB in size. + # Your input file must be formatted as a + # [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), + # and must be uploaded with the purpose `batch`. The file can contain up to 50,000 + # requests, and can be up to 200 MB in size. # # @return [String] required :input_file_id, String # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true @@ -61,7 +61,7 @@ class BatchCreateParams < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # The time frame within which the batch should be processed. Currently only `24h` - # is supported. + # is supported. module CompletionWindow extend OpenAI::Internal::Type::Enum @@ -75,9 +75,9 @@ module CompletionWindow end # The endpoint to be used for all requests in the batch. Currently - # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` - # are supported. Note that `/v1/embeddings` batches are also restricted to a - # maximum of 50,000 embedding inputs across all requests in the batch. + # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` + # are supported. Note that `/v1/embeddings` batches are also restricted to a + # maximum of 50,000 embedding inputs across all requests in the batch. module Endpoint extend OpenAI::Internal::Type::Enum diff --git a/lib/openai/models/batch_list_params.rb b/lib/openai/models/batch_list_params.rb index 651111c7..675cc802 100644 --- a/lib/openai/models/batch_list_params.rb +++ b/lib/openai/models/batch_list_params.rb @@ -10,9 +10,9 @@ class BatchListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] after # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. # # @return [String, nil] optional :after, String @@ -23,7 +23,7 @@ class BatchListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] limit # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. # # @return [Integer, nil] optional :limit, Integer diff --git a/lib/openai/models/beta/assistant.rb b/lib/openai/models/beta/assistant.rb index 727f24f2..bd04e326 100644 --- a/lib/openai/models/beta/assistant.rb +++ b/lib/openai/models/beta/assistant.rb @@ -25,28 +25,28 @@ class Assistant < OpenAI::Internal::Type::BaseModel # @!attribute instructions # The system instructions that the assistant uses. The maximum length is 256,000 - # characters. + # characters. # # @return [String, nil] required :instructions, String, nil?: true # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true # @!attribute model # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. # # @return [String] required :model, String @@ -65,60 +65,60 @@ class Assistant < OpenAI::Internal::Type::BaseModel # @!attribute tools # A list of tool enabled on the assistant. There can be a maximum of 128 tools per - # assistant. Tools can be of types `code_interpreter`, `file_search`, or - # `function`. + # assistant. Tools can be of types `code_interpreter`, `file_search`, or + # `function`. # # @return [Array] required :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Models::Beta::AssistantTool] } # @!attribute response_format # Specifies the format that the model must output. Compatible with - # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), - # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - # - # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - # message the model generates is valid JSON. - # - # **Important:** when using JSON mode, you **must** also instruct the model to - # produce JSON yourself via a system or user message. Without this, the model may - # generate an unending stream of whitespace until the generation reaches the token - # limit, resulting in a long-running and seemingly "stuck" request. Also note that - # the message content may be partially cut off if `finish_reason="length"`, which - # indicates the generation exceeded `max_tokens` or the conversation exceeded the - # max context length. + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. # # @return [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] optional :response_format, union: -> { OpenAI::Models::Beta::AssistantResponseFormatOption }, nil?: true # @!attribute temperature # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. # # @return [Float, nil] optional :temperature, Float, nil?: true # @!attribute tool_resources # A set of resources that are used by the assistant's tools. The resources are - # specific to the type of tool. For example, the `code_interpreter` tool requires - # a list of file IDs, while the `file_search` tool requires a list of vector store - # IDs. + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. # # @return [OpenAI::Models::Beta::Assistant::ToolResources, nil] optional :tool_resources, -> { OpenAI::Models::Beta::Assistant::ToolResources }, nil?: true # @!attribute top_p # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or temperature but not both. + # We generally recommend altering this or temperature but not both. # # @return [Float, nil] optional :top_p, Float, nil?: true @@ -183,9 +183,9 @@ class ToolResources < OpenAI::Internal::Type::BaseModel # @!parse # # A set of resources that are used by the assistant's tools. The resources are - # # specific to the type of tool. For example, the `code_interpreter` tool requires - # # a list of file IDs, while the `file_search` tool requires a list of vector store - # # IDs. + # # specific to the type of tool. For example, the `code_interpreter` tool requires + # # a list of file IDs, while the `file_search` tool requires a list of vector store + # # IDs. # # # # @param code_interpreter [OpenAI::Models::Beta::Assistant::ToolResources::CodeInterpreter] # # @param file_search [OpenAI::Models::Beta::Assistant::ToolResources::FileSearch] @@ -198,8 +198,8 @@ class ToolResources < OpenAI::Internal::Type::BaseModel class CodeInterpreter < OpenAI::Internal::Type::BaseModel # @!attribute [r] file_ids # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made - # available to the `code_interpreter`` tool. There can be a maximum of 20 files - # associated with the tool. + # available to the `code_interpreter`` tool. There can be a maximum of 20 files + # associated with the tool. # # @return [Array, nil] optional :file_ids, OpenAI::Internal::Type::ArrayOf[String] @@ -220,9 +220,9 @@ class CodeInterpreter < OpenAI::Internal::Type::BaseModel class FileSearch < OpenAI::Internal::Type::BaseModel # @!attribute [r] vector_store_ids # The ID of the - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # attached to this assistant. There can be a maximum of 1 vector store attached to - # the assistant. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this assistant. There can be a maximum of 1 vector store attached to + # the assistant. # # @return [Array, nil] optional :vector_store_ids, OpenAI::Internal::Type::ArrayOf[String] diff --git a/lib/openai/models/beta/assistant_create_params.rb b/lib/openai/models/beta/assistant_create_params.rb index 2b426970..3e3fd1d0 100644 --- a/lib/openai/models/beta/assistant_create_params.rb +++ b/lib/openai/models/beta/assistant_create_params.rb @@ -11,10 +11,10 @@ class AssistantCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute model # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. # # @return [String, Symbol, OpenAI::Models::ChatModel] required :model, union: -> { OpenAI::Models::Beta::AssistantCreateParams::Model } @@ -27,18 +27,18 @@ class AssistantCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute instructions # The system instructions that the assistant uses. The maximum length is 256,000 - # characters. + # characters. # # @return [String, nil] optional :instructions, String, nil?: true # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true @@ -52,60 +52,60 @@ class AssistantCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute reasoning_effort # **o-series models only** # - # Constrains effort on reasoning for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] optional :reasoning_effort, enum: -> { OpenAI::Models::ReasoningEffort }, nil?: true # @!attribute response_format # Specifies the format that the model must output. Compatible with - # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), - # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - # message the model generates is valid JSON. + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. # - # **Important:** when using JSON mode, you **must** also instruct the model to - # produce JSON yourself via a system or user message. Without this, the model may - # generate an unending stream of whitespace until the generation reaches the token - # limit, resulting in a long-running and seemingly "stuck" request. Also note that - # the message content may be partially cut off if `finish_reason="length"`, which - # indicates the generation exceeded `max_tokens` or the conversation exceeded the - # max context length. + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. # # @return [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] optional :response_format, union: -> { OpenAI::Models::Beta::AssistantResponseFormatOption }, nil?: true # @!attribute temperature # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. # # @return [Float, nil] optional :temperature, Float, nil?: true # @!attribute tool_resources # A set of resources that are used by the assistant's tools. The resources are - # specific to the type of tool. For example, the `code_interpreter` tool requires - # a list of file IDs, while the `file_search` tool requires a list of vector store - # IDs. + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. # # @return [OpenAI::Models::Beta::AssistantCreateParams::ToolResources, nil] optional :tool_resources, -> { OpenAI::Models::Beta::AssistantCreateParams::ToolResources }, nil?: true # @!attribute [r] tools # A list of tool enabled on the assistant. There can be a maximum of 128 tools per - # assistant. Tools can be of types `code_interpreter`, `file_search`, or - # `function`. + # assistant. Tools can be of types `code_interpreter`, `file_search`, or + # `function`. # # @return [Array, nil] optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Models::Beta::AssistantTool] } @@ -116,10 +116,10 @@ class AssistantCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute top_p # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or temperature but not both. + # We generally recommend altering this or temperature but not both. # # @return [Float, nil] optional :top_p, Float, nil?: true @@ -159,10 +159,10 @@ class AssistantCreateParams < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. module Model extend OpenAI::Internal::Type::Union @@ -198,9 +198,9 @@ class ToolResources < OpenAI::Internal::Type::BaseModel # @!parse # # A set of resources that are used by the assistant's tools. The resources are - # # specific to the type of tool. For example, the `code_interpreter` tool requires - # # a list of file IDs, while the `file_search` tool requires a list of vector store - # # IDs. + # # specific to the type of tool. For example, the `code_interpreter` tool requires + # # a list of file IDs, while the `file_search` tool requires a list of vector store + # # IDs. # # # # @param code_interpreter [OpenAI::Models::Beta::AssistantCreateParams::ToolResources::CodeInterpreter] # # @param file_search [OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch] @@ -213,8 +213,8 @@ class ToolResources < OpenAI::Internal::Type::BaseModel class CodeInterpreter < OpenAI::Internal::Type::BaseModel # @!attribute [r] file_ids # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made - # available to the `code_interpreter` tool. There can be a maximum of 20 files - # associated with the tool. + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. # # @return [Array, nil] optional :file_ids, OpenAI::Internal::Type::ArrayOf[String] @@ -235,9 +235,9 @@ class CodeInterpreter < OpenAI::Internal::Type::BaseModel class FileSearch < OpenAI::Internal::Type::BaseModel # @!attribute [r] vector_store_ids # The - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # attached to this assistant. There can be a maximum of 1 vector store attached to - # the assistant. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this assistant. There can be a maximum of 1 vector store attached to + # the assistant. # # @return [Array, nil] optional :vector_store_ids, OpenAI::Internal::Type::ArrayOf[String] @@ -248,9 +248,9 @@ class FileSearch < OpenAI::Internal::Type::BaseModel # @!attribute [r] vector_stores # A helper to create a - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # with file_ids and attach it to this assistant. There can be a maximum of 1 - # vector store attached to the assistant. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # with file_ids and attach it to this assistant. There can be a maximum of 1 + # vector store attached to the assistant. # # @return [Array, nil] optional :vector_stores, @@ -271,7 +271,7 @@ class FileSearch < OpenAI::Internal::Type::BaseModel class VectorStore < OpenAI::Internal::Type::BaseModel # @!attribute [r] chunking_strategy # The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. + # strategy. # # @return [OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static, nil] optional :chunking_strategy, @@ -283,8 +283,8 @@ class VectorStore < OpenAI::Internal::Type::BaseModel # @!attribute [r] file_ids # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to - # add to the vector store. There can be a maximum of 10000 files in a vector - # store. + # add to the vector store. There can be a maximum of 10000 files in a vector + # store. # # @return [Array, nil] optional :file_ids, OpenAI::Internal::Type::ArrayOf[String] @@ -295,11 +295,11 @@ class VectorStore < OpenAI::Internal::Type::BaseModel # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true @@ -314,7 +314,7 @@ class VectorStore < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. + # strategy. # # @see OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore#chunking_strategy module ChunkingStrategy @@ -338,7 +338,7 @@ class Auto < OpenAI::Internal::Type::BaseModel # @!parse # # The default strategy. This strategy currently uses a `max_chunk_size_tokens` of - # # `800` and `chunk_overlap_tokens` of `400`. + # # `800` and `chunk_overlap_tokens` of `400`. # # # # @param type [Symbol, :auto] # # @@ -373,14 +373,14 @@ class Static < OpenAI::Internal::Type::BaseModel # @!attribute chunk_overlap_tokens # The number of tokens that overlap between chunks. The default value is `400`. # - # Note that the overlap must not exceed half of `max_chunk_size_tokens`. + # Note that the overlap must not exceed half of `max_chunk_size_tokens`. # # @return [Integer] required :chunk_overlap_tokens, Integer # @!attribute max_chunk_size_tokens # The maximum number of tokens in each chunk. The default value is `800`. The - # minimum value is `100` and the maximum value is `4096`. + # minimum value is `100` and the maximum value is `4096`. # # @return [Integer] required :max_chunk_size_tokens, Integer diff --git a/lib/openai/models/beta/assistant_list_params.rb b/lib/openai/models/beta/assistant_list_params.rb index c01b9f64..d46562ae 100644 --- a/lib/openai/models/beta/assistant_list_params.rb +++ b/lib/openai/models/beta/assistant_list_params.rb @@ -11,9 +11,9 @@ class AssistantListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] after # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. # # @return [String, nil] optional :after, String @@ -24,9 +24,9 @@ class AssistantListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] before # A cursor for use in pagination. `before` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # starting with obj_foo, your subsequent call can include before=obj_foo in order - # to fetch the previous page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. # # @return [String, nil] optional :before, String @@ -37,7 +37,7 @@ class AssistantListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] limit # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. # # @return [Integer, nil] optional :limit, Integer @@ -48,7 +48,7 @@ class AssistantListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] order # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. # # @return [Symbol, OpenAI::Models::Beta::AssistantListParams::Order, nil] optional :order, enum: -> { OpenAI::Models::Beta::AssistantListParams::Order } @@ -69,7 +69,7 @@ class AssistantListParams < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. module Order extend OpenAI::Internal::Type::Enum diff --git a/lib/openai/models/beta/assistant_response_format_option.rb b/lib/openai/models/beta/assistant_response_format_option.rb index 2c797ac5..f1e5fc86 100644 --- a/lib/openai/models/beta/assistant_response_format_option.rb +++ b/lib/openai/models/beta/assistant_response_format_option.rb @@ -4,25 +4,25 @@ module OpenAI module Models module Beta # Specifies the format that the model must output. Compatible with - # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), - # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - # message the model generates is valid JSON. + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. # - # **Important:** when using JSON mode, you **must** also instruct the model to - # produce JSON yourself via a system or user message. Without this, the model may - # generate an unending stream of whitespace until the generation reaches the token - # limit, resulting in a long-running and seemingly "stuck" request. Also note that - # the message content may be partially cut off if `finish_reason="length"`, which - # indicates the generation exceeded `max_tokens` or the conversation exceeded the - # max context length. + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. module AssistantResponseFormatOption extend OpenAI::Internal::Type::Union diff --git a/lib/openai/models/beta/assistant_stream_event.rb b/lib/openai/models/beta/assistant_stream_event.rb index b46d6568..5663b14f 100644 --- a/lib/openai/models/beta/assistant_stream_event.rb +++ b/lib/openai/models/beta/assistant_stream_event.rb @@ -5,24 +5,24 @@ module Models module Beta # Represents an event emitted when streaming a Run. # - # Each event in a server-sent events stream has an `event` and `data` property: + # Each event in a server-sent events stream has an `event` and `data` property: # - # ``` - # event: thread.created - # data: {"id": "thread_123", "object": "thread", ...} - # ``` + # ``` + # event: thread.created + # data: {"id": "thread_123", "object": "thread", ...} + # ``` # - # We emit events whenever a new object is created, transitions to a new state, or - # is being streamed in parts (deltas). For example, we emit `thread.run.created` - # when a new run is created, `thread.run.completed` when a run completes, and so - # on. When an Assistant chooses to create a message during a run, we emit a - # `thread.message.created event`, a `thread.message.in_progress` event, many - # `thread.message.delta` events, and finally a `thread.message.completed` event. + # We emit events whenever a new object is created, transitions to a new state, or + # is being streamed in parts (deltas). For example, we emit `thread.run.created` + # when a new run is created, `thread.run.completed` when a run completes, and so + # on. When an Assistant chooses to create a message during a run, we emit a + # `thread.message.created event`, a `thread.message.in_progress` event, many + # `thread.message.delta` events, and finally a `thread.message.completed` event. # - # We may add additional events over time, so we recommend handling unknown events - # gracefully in your code. See the - # [Assistants API quickstart](https://platform.openai.com/docs/assistants/overview) - # to learn how to integrate the Assistants API with streaming. + # We may add additional events over time, so we recommend handling unknown events + # gracefully in your code. See the + # [Assistants API quickstart](https://platform.openai.com/docs/assistants/overview) + # to learn how to integrate the Assistants API with streaming. module AssistantStreamEvent extend OpenAI::Internal::Type::Union @@ -112,7 +112,7 @@ module AssistantStreamEvent class ThreadCreated < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a thread that contains - # [messages](https://platform.openai.com/docs/api-reference/messages). + # [messages](https://platform.openai.com/docs/api-reference/messages). # # @return [OpenAI::Models::Beta::Thread] required :data, -> { OpenAI::Models::Beta::Thread } @@ -134,8 +134,8 @@ class ThreadCreated < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a new - # # [thread](https://platform.openai.com/docs/api-reference/threads/object) is - # # created. + # # [thread](https://platform.openai.com/docs/api-reference/threads/object) is + # # created. # # # # @param data [OpenAI::Models::Beta::Thread] # # @param enabled [Boolean] @@ -149,7 +149,7 @@ class ThreadCreated < OpenAI::Internal::Type::BaseModel class ThreadRunCreated < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] required :data, -> { OpenAI::Models::Beta::Threads::Run } @@ -161,7 +161,7 @@ class ThreadRunCreated < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a new - # # [run](https://platform.openai.com/docs/api-reference/runs/object) is created. + # # [run](https://platform.openai.com/docs/api-reference/runs/object) is created. # # # # @param data [OpenAI::Models::Beta::Threads::Run] # # @param event [Symbol, :"thread.run.created"] @@ -174,7 +174,7 @@ class ThreadRunCreated < OpenAI::Internal::Type::BaseModel class ThreadRunQueued < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] required :data, -> { OpenAI::Models::Beta::Threads::Run } @@ -186,7 +186,7 @@ class ThreadRunQueued < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # moves to a `queued` status. + # # moves to a `queued` status. # # # # @param data [OpenAI::Models::Beta::Threads::Run] # # @param event [Symbol, :"thread.run.queued"] @@ -199,7 +199,7 @@ class ThreadRunQueued < OpenAI::Internal::Type::BaseModel class ThreadRunInProgress < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] required :data, -> { OpenAI::Models::Beta::Threads::Run } @@ -211,7 +211,7 @@ class ThreadRunInProgress < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # moves to an `in_progress` status. + # # moves to an `in_progress` status. # # # # @param data [OpenAI::Models::Beta::Threads::Run] # # @param event [Symbol, :"thread.run.in_progress"] @@ -224,7 +224,7 @@ class ThreadRunInProgress < OpenAI::Internal::Type::BaseModel class ThreadRunRequiresAction < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] required :data, -> { OpenAI::Models::Beta::Threads::Run } @@ -236,7 +236,7 @@ class ThreadRunRequiresAction < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # moves to a `requires_action` status. + # # moves to a `requires_action` status. # # # # @param data [OpenAI::Models::Beta::Threads::Run] # # @param event [Symbol, :"thread.run.requires_action"] @@ -249,7 +249,7 @@ class ThreadRunRequiresAction < OpenAI::Internal::Type::BaseModel class ThreadRunCompleted < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] required :data, -> { OpenAI::Models::Beta::Threads::Run } @@ -261,7 +261,7 @@ class ThreadRunCompleted < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # is completed. + # # is completed. # # # # @param data [OpenAI::Models::Beta::Threads::Run] # # @param event [Symbol, :"thread.run.completed"] @@ -274,7 +274,7 @@ class ThreadRunCompleted < OpenAI::Internal::Type::BaseModel class ThreadRunIncomplete < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] required :data, -> { OpenAI::Models::Beta::Threads::Run } @@ -286,7 +286,7 @@ class ThreadRunIncomplete < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # ends with status `incomplete`. + # # ends with status `incomplete`. # # # # @param data [OpenAI::Models::Beta::Threads::Run] # # @param event [Symbol, :"thread.run.incomplete"] @@ -299,7 +299,7 @@ class ThreadRunIncomplete < OpenAI::Internal::Type::BaseModel class ThreadRunFailed < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] required :data, -> { OpenAI::Models::Beta::Threads::Run } @@ -311,7 +311,7 @@ class ThreadRunFailed < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # fails. + # # fails. # # # # @param data [OpenAI::Models::Beta::Threads::Run] # # @param event [Symbol, :"thread.run.failed"] @@ -324,7 +324,7 @@ class ThreadRunFailed < OpenAI::Internal::Type::BaseModel class ThreadRunCancelling < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] required :data, -> { OpenAI::Models::Beta::Threads::Run } @@ -336,7 +336,7 @@ class ThreadRunCancelling < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # moves to a `cancelling` status. + # # moves to a `cancelling` status. # # # # @param data [OpenAI::Models::Beta::Threads::Run] # # @param event [Symbol, :"thread.run.cancelling"] @@ -349,7 +349,7 @@ class ThreadRunCancelling < OpenAI::Internal::Type::BaseModel class ThreadRunCancelled < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] required :data, -> { OpenAI::Models::Beta::Threads::Run } @@ -361,7 +361,7 @@ class ThreadRunCancelled < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # is cancelled. + # # is cancelled. # # # # @param data [OpenAI::Models::Beta::Threads::Run] # # @param event [Symbol, :"thread.run.cancelled"] @@ -374,7 +374,7 @@ class ThreadRunCancelled < OpenAI::Internal::Type::BaseModel class ThreadRunExpired < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] required :data, -> { OpenAI::Models::Beta::Threads::Run } @@ -386,7 +386,7 @@ class ThreadRunExpired < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # expires. + # # expires. # # # # @param data [OpenAI::Models::Beta::Threads::Run] # # @param event [Symbol, :"thread.run.expired"] @@ -410,8 +410,8 @@ class ThreadRunStepCreated < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a - # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # # is created. + # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # # is created. # # # # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] # # @param event [Symbol, :"thread.run.step.created"] @@ -435,8 +435,8 @@ class ThreadRunStepInProgress < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a - # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # # moves to an `in_progress` state. + # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # # moves to an `in_progress` state. # # # # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] # # @param event [Symbol, :"thread.run.step.in_progress"] @@ -449,7 +449,7 @@ class ThreadRunStepInProgress < OpenAI::Internal::Type::BaseModel class ThreadRunStepDelta < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a run step delta i.e. any changed fields on a run step during - # streaming. + # streaming. # # @return [OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent] required :data, -> { OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent } @@ -461,8 +461,8 @@ class ThreadRunStepDelta < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when parts of a - # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # # are being streamed. + # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # # are being streamed. # # # # @param data [OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent] # # @param event [Symbol, :"thread.run.step.delta"] @@ -486,8 +486,8 @@ class ThreadRunStepCompleted < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a - # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # # is completed. + # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # # is completed. # # # # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] # # @param event [Symbol, :"thread.run.step.completed"] @@ -511,8 +511,8 @@ class ThreadRunStepFailed < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a - # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # # fails. + # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # # fails. # # # # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] # # @param event [Symbol, :"thread.run.step.failed"] @@ -536,8 +536,8 @@ class ThreadRunStepCancelled < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a - # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # # is cancelled. + # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # # is cancelled. # # # # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] # # @param event [Symbol, :"thread.run.step.cancelled"] @@ -561,8 +561,8 @@ class ThreadRunStepExpired < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a - # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # # expires. + # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # # expires. # # # # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] # # @param event [Symbol, :"thread.run.step.expired"] @@ -575,7 +575,7 @@ class ThreadRunStepExpired < OpenAI::Internal::Type::BaseModel class ThreadMessageCreated < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a message within a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Message] required :data, -> { OpenAI::Models::Beta::Threads::Message } @@ -587,8 +587,8 @@ class ThreadMessageCreated < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a - # # [message](https://platform.openai.com/docs/api-reference/messages/object) is - # # created. + # # [message](https://platform.openai.com/docs/api-reference/messages/object) is + # # created. # # # # @param data [OpenAI::Models::Beta::Threads::Message] # # @param event [Symbol, :"thread.message.created"] @@ -601,7 +601,7 @@ class ThreadMessageCreated < OpenAI::Internal::Type::BaseModel class ThreadMessageInProgress < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a message within a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Message] required :data, -> { OpenAI::Models::Beta::Threads::Message } @@ -613,8 +613,8 @@ class ThreadMessageInProgress < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a - # # [message](https://platform.openai.com/docs/api-reference/messages/object) moves - # # to an `in_progress` state. + # # [message](https://platform.openai.com/docs/api-reference/messages/object) moves + # # to an `in_progress` state. # # # # @param data [OpenAI::Models::Beta::Threads::Message] # # @param event [Symbol, :"thread.message.in_progress"] @@ -627,7 +627,7 @@ class ThreadMessageInProgress < OpenAI::Internal::Type::BaseModel class ThreadMessageDelta < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a message delta i.e. any changed fields on a message during - # streaming. + # streaming. # # @return [OpenAI::Models::Beta::Threads::MessageDeltaEvent] required :data, -> { OpenAI::Models::Beta::Threads::MessageDeltaEvent } @@ -639,8 +639,8 @@ class ThreadMessageDelta < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when parts of a - # # [Message](https://platform.openai.com/docs/api-reference/messages/object) are - # # being streamed. + # # [Message](https://platform.openai.com/docs/api-reference/messages/object) are + # # being streamed. # # # # @param data [OpenAI::Models::Beta::Threads::MessageDeltaEvent] # # @param event [Symbol, :"thread.message.delta"] @@ -653,7 +653,7 @@ class ThreadMessageDelta < OpenAI::Internal::Type::BaseModel class ThreadMessageCompleted < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a message within a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Message] required :data, -> { OpenAI::Models::Beta::Threads::Message } @@ -665,8 +665,8 @@ class ThreadMessageCompleted < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a - # # [message](https://platform.openai.com/docs/api-reference/messages/object) is - # # completed. + # # [message](https://platform.openai.com/docs/api-reference/messages/object) is + # # completed. # # # # @param data [OpenAI::Models::Beta::Threads::Message] # # @param event [Symbol, :"thread.message.completed"] @@ -679,7 +679,7 @@ class ThreadMessageCompleted < OpenAI::Internal::Type::BaseModel class ThreadMessageIncomplete < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a message within a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Message] required :data, -> { OpenAI::Models::Beta::Threads::Message } @@ -691,8 +691,8 @@ class ThreadMessageIncomplete < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a - # # [message](https://platform.openai.com/docs/api-reference/messages/object) ends - # # before it is completed. + # # [message](https://platform.openai.com/docs/api-reference/messages/object) ends + # # before it is completed. # # # # @param data [OpenAI::Models::Beta::Threads::Message] # # @param event [Symbol, :"thread.message.incomplete"] @@ -715,8 +715,8 @@ class ErrorEvent < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when an - # # [error](https://platform.openai.com/docs/guides/error-codes#api-errors) occurs. - # # This can happen due to an internal server error or a timeout. + # # [error](https://platform.openai.com/docs/guides/error-codes#api-errors) occurs. + # # This can happen due to an internal server error or a timeout. # # # # @param data [OpenAI::Models::ErrorObject] # # @param event [Symbol, :error] diff --git a/lib/openai/models/beta/assistant_tool_choice.rb b/lib/openai/models/beta/assistant_tool_choice.rb index 0c1c8cb9..1dff1877 100644 --- a/lib/openai/models/beta/assistant_tool_choice.rb +++ b/lib/openai/models/beta/assistant_tool_choice.rb @@ -21,7 +21,7 @@ class AssistantToolChoice < OpenAI::Internal::Type::BaseModel # @!parse # # Specifies a tool the model should use. Use to force the model to call a specific - # # tool. + # # tool. # # # # @param type [Symbol, OpenAI::Models::Beta::AssistantToolChoice::Type] # # @param function [OpenAI::Models::Beta::AssistantToolChoiceFunction] diff --git a/lib/openai/models/beta/assistant_tool_choice_option.rb b/lib/openai/models/beta/assistant_tool_choice_option.rb index 5445f1a6..69293cbc 100644 --- a/lib/openai/models/beta/assistant_tool_choice_option.rb +++ b/lib/openai/models/beta/assistant_tool_choice_option.rb @@ -4,12 +4,12 @@ module OpenAI module Models module Beta # Controls which (if any) tool is called by the model. `none` means the model will - # not call any tools and instead generates a message. `auto` is the default value - # and means the model can pick between generating a message or calling one or more - # tools. `required` means the model must call one or more tools before responding - # to the user. Specifying a particular tool like `{"type": "file_search"}` or - # `{"type": "function", "function": {"name": "my_function"}}` forces the model to - # call that tool. + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. module AssistantToolChoiceOption extend OpenAI::Internal::Type::Union @@ -20,9 +20,9 @@ module AssistantToolChoiceOption variant -> { OpenAI::Models::Beta::AssistantToolChoice } # `none` means the model will not call any tools and instead generates a message. - # `auto` means the model can pick between generating a message or calling one or - # more tools. `required` means the model must call one or more tools before - # responding to the user. + # `auto` means the model can pick between generating a message or calling one or + # more tools. `required` means the model must call one or more tools before + # responding to the user. module Auto extend OpenAI::Internal::Type::Enum diff --git a/lib/openai/models/beta/assistant_update_params.rb b/lib/openai/models/beta/assistant_update_params.rb index 2fe97acb..299fb194 100644 --- a/lib/openai/models/beta/assistant_update_params.rb +++ b/lib/openai/models/beta/assistant_update_params.rb @@ -17,28 +17,28 @@ class AssistantUpdateParams < OpenAI::Internal::Type::BaseModel # @!attribute instructions # The system instructions that the assistant uses. The maximum length is 256,000 - # characters. + # characters. # # @return [String, nil] optional :instructions, String, nil?: true # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true # @!attribute [r] model # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. # # @return [String, Symbol, OpenAI::Models::Beta::AssistantUpdateParams::Model, nil] optional :model, union: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model } @@ -56,60 +56,60 @@ class AssistantUpdateParams < OpenAI::Internal::Type::BaseModel # @!attribute reasoning_effort # **o-series models only** # - # Constrains effort on reasoning for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] optional :reasoning_effort, enum: -> { OpenAI::Models::ReasoningEffort }, nil?: true # @!attribute response_format # Specifies the format that the model must output. Compatible with - # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), - # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - # message the model generates is valid JSON. + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. # - # **Important:** when using JSON mode, you **must** also instruct the model to - # produce JSON yourself via a system or user message. Without this, the model may - # generate an unending stream of whitespace until the generation reaches the token - # limit, resulting in a long-running and seemingly "stuck" request. Also note that - # the message content may be partially cut off if `finish_reason="length"`, which - # indicates the generation exceeded `max_tokens` or the conversation exceeded the - # max context length. + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. # # @return [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] optional :response_format, union: -> { OpenAI::Models::Beta::AssistantResponseFormatOption }, nil?: true # @!attribute temperature # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. # # @return [Float, nil] optional :temperature, Float, nil?: true # @!attribute tool_resources # A set of resources that are used by the assistant's tools. The resources are - # specific to the type of tool. For example, the `code_interpreter` tool requires - # a list of file IDs, while the `file_search` tool requires a list of vector store - # IDs. + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. # # @return [OpenAI::Models::Beta::AssistantUpdateParams::ToolResources, nil] optional :tool_resources, -> { OpenAI::Models::Beta::AssistantUpdateParams::ToolResources }, nil?: true # @!attribute [r] tools # A list of tool enabled on the assistant. There can be a maximum of 128 tools per - # assistant. Tools can be of types `code_interpreter`, `file_search`, or - # `function`. + # assistant. Tools can be of types `code_interpreter`, `file_search`, or + # `function`. # # @return [Array, nil] optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Models::Beta::AssistantTool] } @@ -120,10 +120,10 @@ class AssistantUpdateParams < OpenAI::Internal::Type::BaseModel # @!attribute top_p # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or temperature but not both. + # We generally recommend altering this or temperature but not both. # # @return [Float, nil] optional :top_p, Float, nil?: true @@ -163,10 +163,10 @@ class AssistantUpdateParams < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. module Model extend OpenAI::Internal::Type::Union @@ -294,9 +294,9 @@ class ToolResources < OpenAI::Internal::Type::BaseModel # @!parse # # A set of resources that are used by the assistant's tools. The resources are - # # specific to the type of tool. For example, the `code_interpreter` tool requires - # # a list of file IDs, while the `file_search` tool requires a list of vector store - # # IDs. + # # specific to the type of tool. For example, the `code_interpreter` tool requires + # # a list of file IDs, while the `file_search` tool requires a list of vector store + # # IDs. # # # # @param code_interpreter [OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter] # # @param file_search [OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::FileSearch] @@ -309,9 +309,9 @@ class ToolResources < OpenAI::Internal::Type::BaseModel class CodeInterpreter < OpenAI::Internal::Type::BaseModel # @!attribute [r] file_ids # Overrides the list of - # [file](https://platform.openai.com/docs/api-reference/files) IDs made available - # to the `code_interpreter` tool. There can be a maximum of 20 files associated - # with the tool. + # [file](https://platform.openai.com/docs/api-reference/files) IDs made available + # to the `code_interpreter` tool. There can be a maximum of 20 files associated + # with the tool. # # @return [Array, nil] optional :file_ids, OpenAI::Internal::Type::ArrayOf[String] @@ -332,9 +332,9 @@ class CodeInterpreter < OpenAI::Internal::Type::BaseModel class FileSearch < OpenAI::Internal::Type::BaseModel # @!attribute [r] vector_store_ids # Overrides the - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # attached to this assistant. There can be a maximum of 1 vector store attached to - # the assistant. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this assistant. There can be a maximum of 1 vector store attached to + # the assistant. # # @return [Array, nil] optional :vector_store_ids, OpenAI::Internal::Type::ArrayOf[String] diff --git a/lib/openai/models/beta/file_search_tool.rb b/lib/openai/models/beta/file_search_tool.rb index 2428a1a0..76343e2f 100644 --- a/lib/openai/models/beta/file_search_tool.rb +++ b/lib/openai/models/beta/file_search_tool.rb @@ -32,13 +32,13 @@ class FileSearchTool < OpenAI::Internal::Type::BaseModel class FileSearch < OpenAI::Internal::Type::BaseModel # @!attribute [r] max_num_results # The maximum number of results the file search tool should output. The default is - # 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between - # 1 and 50 inclusive. + # 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between + # 1 and 50 inclusive. # - # Note that the file search tool may output fewer than `max_num_results` results. - # See the - # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) - # for more information. + # Note that the file search tool may output fewer than `max_num_results` results. + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. # # @return [Integer, nil] optional :max_num_results, Integer @@ -49,11 +49,11 @@ class FileSearch < OpenAI::Internal::Type::BaseModel # @!attribute [r] ranking_options # The ranking options for the file search. If not specified, the file search tool - # will use the `auto` ranker and a score_threshold of 0. + # will use the `auto` ranker and a score_threshold of 0. # - # See the - # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) - # for more information. + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. # # @return [OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions, nil] optional :ranking_options, -> { OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions } @@ -76,14 +76,14 @@ class FileSearch < OpenAI::Internal::Type::BaseModel class RankingOptions < OpenAI::Internal::Type::BaseModel # @!attribute score_threshold # The score threshold for the file search. All values must be a floating point - # number between 0 and 1. + # number between 0 and 1. # # @return [Float] required :score_threshold, Float # @!attribute [r] ranker # The ranker to use for the file search. If not specified will use the `auto` - # ranker. + # ranker. # # @return [Symbol, OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions::Ranker, nil] optional :ranker, enum: -> { OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions::Ranker } @@ -94,11 +94,11 @@ class RankingOptions < OpenAI::Internal::Type::BaseModel # @!parse # # The ranking options for the file search. If not specified, the file search tool - # # will use the `auto` ranker and a score_threshold of 0. + # # will use the `auto` ranker and a score_threshold of 0. # # - # # See the - # # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) - # # for more information. + # # See the + # # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # # for more information. # # # # @param score_threshold [Float] # # @param ranker [Symbol, OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions::Ranker] @@ -108,7 +108,7 @@ class RankingOptions < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # The ranker to use for the file search. If not specified will use the `auto` - # ranker. + # ranker. # # @see OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions#ranker module Ranker diff --git a/lib/openai/models/beta/message_stream_event.rb b/lib/openai/models/beta/message_stream_event.rb index 0657f158..09590507 100644 --- a/lib/openai/models/beta/message_stream_event.rb +++ b/lib/openai/models/beta/message_stream_event.rb @@ -4,8 +4,8 @@ module OpenAI module Models module Beta # Occurs when a - # [message](https://platform.openai.com/docs/api-reference/messages/object) is - # created. + # [message](https://platform.openai.com/docs/api-reference/messages/object) is + # created. module MessageStreamEvent extend OpenAI::Internal::Type::Union @@ -32,7 +32,7 @@ module MessageStreamEvent class ThreadMessageCreated < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a message within a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Message] required :data, -> { OpenAI::Models::Beta::Threads::Message } @@ -44,8 +44,8 @@ class ThreadMessageCreated < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a - # # [message](https://platform.openai.com/docs/api-reference/messages/object) is - # # created. + # # [message](https://platform.openai.com/docs/api-reference/messages/object) is + # # created. # # # # @param data [OpenAI::Models::Beta::Threads::Message] # # @param event [Symbol, :"thread.message.created"] @@ -58,7 +58,7 @@ class ThreadMessageCreated < OpenAI::Internal::Type::BaseModel class ThreadMessageInProgress < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a message within a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Message] required :data, -> { OpenAI::Models::Beta::Threads::Message } @@ -70,8 +70,8 @@ class ThreadMessageInProgress < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a - # # [message](https://platform.openai.com/docs/api-reference/messages/object) moves - # # to an `in_progress` state. + # # [message](https://platform.openai.com/docs/api-reference/messages/object) moves + # # to an `in_progress` state. # # # # @param data [OpenAI::Models::Beta::Threads::Message] # # @param event [Symbol, :"thread.message.in_progress"] @@ -84,7 +84,7 @@ class ThreadMessageInProgress < OpenAI::Internal::Type::BaseModel class ThreadMessageDelta < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a message delta i.e. any changed fields on a message during - # streaming. + # streaming. # # @return [OpenAI::Models::Beta::Threads::MessageDeltaEvent] required :data, -> { OpenAI::Models::Beta::Threads::MessageDeltaEvent } @@ -96,8 +96,8 @@ class ThreadMessageDelta < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when parts of a - # # [Message](https://platform.openai.com/docs/api-reference/messages/object) are - # # being streamed. + # # [Message](https://platform.openai.com/docs/api-reference/messages/object) are + # # being streamed. # # # # @param data [OpenAI::Models::Beta::Threads::MessageDeltaEvent] # # @param event [Symbol, :"thread.message.delta"] @@ -110,7 +110,7 @@ class ThreadMessageDelta < OpenAI::Internal::Type::BaseModel class ThreadMessageCompleted < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a message within a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Message] required :data, -> { OpenAI::Models::Beta::Threads::Message } @@ -122,8 +122,8 @@ class ThreadMessageCompleted < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a - # # [message](https://platform.openai.com/docs/api-reference/messages/object) is - # # completed. + # # [message](https://platform.openai.com/docs/api-reference/messages/object) is + # # completed. # # # # @param data [OpenAI::Models::Beta::Threads::Message] # # @param event [Symbol, :"thread.message.completed"] @@ -136,7 +136,7 @@ class ThreadMessageCompleted < OpenAI::Internal::Type::BaseModel class ThreadMessageIncomplete < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a message within a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Message] required :data, -> { OpenAI::Models::Beta::Threads::Message } @@ -148,8 +148,8 @@ class ThreadMessageIncomplete < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a - # # [message](https://platform.openai.com/docs/api-reference/messages/object) ends - # # before it is completed. + # # [message](https://platform.openai.com/docs/api-reference/messages/object) ends + # # before it is completed. # # # # @param data [OpenAI::Models::Beta::Threads::Message] # # @param event [Symbol, :"thread.message.incomplete"] diff --git a/lib/openai/models/beta/run_step_stream_event.rb b/lib/openai/models/beta/run_step_stream_event.rb index 93f240f7..444add26 100644 --- a/lib/openai/models/beta/run_step_stream_event.rb +++ b/lib/openai/models/beta/run_step_stream_event.rb @@ -4,8 +4,8 @@ module OpenAI module Models module Beta # Occurs when a - # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # is created. + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is created. module RunStepStreamEvent extend OpenAI::Internal::Type::Union @@ -49,8 +49,8 @@ class ThreadRunStepCreated < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a - # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # # is created. + # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # # is created. # # # # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] # # @param event [Symbol, :"thread.run.step.created"] @@ -74,8 +74,8 @@ class ThreadRunStepInProgress < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a - # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # # moves to an `in_progress` state. + # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # # moves to an `in_progress` state. # # # # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] # # @param event [Symbol, :"thread.run.step.in_progress"] @@ -88,7 +88,7 @@ class ThreadRunStepInProgress < OpenAI::Internal::Type::BaseModel class ThreadRunStepDelta < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a run step delta i.e. any changed fields on a run step during - # streaming. + # streaming. # # @return [OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent] required :data, -> { OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent } @@ -100,8 +100,8 @@ class ThreadRunStepDelta < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when parts of a - # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # # are being streamed. + # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # # are being streamed. # # # # @param data [OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent] # # @param event [Symbol, :"thread.run.step.delta"] @@ -125,8 +125,8 @@ class ThreadRunStepCompleted < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a - # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # # is completed. + # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # # is completed. # # # # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] # # @param event [Symbol, :"thread.run.step.completed"] @@ -150,8 +150,8 @@ class ThreadRunStepFailed < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a - # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # # fails. + # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # # fails. # # # # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] # # @param event [Symbol, :"thread.run.step.failed"] @@ -175,8 +175,8 @@ class ThreadRunStepCancelled < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a - # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # # is cancelled. + # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # # is cancelled. # # # # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] # # @param event [Symbol, :"thread.run.step.cancelled"] @@ -200,8 +200,8 @@ class ThreadRunStepExpired < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a - # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # # expires. + # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # # expires. # # # # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] # # @param event [Symbol, :"thread.run.step.expired"] diff --git a/lib/openai/models/beta/run_stream_event.rb b/lib/openai/models/beta/run_stream_event.rb index 14e5177f..90552346 100644 --- a/lib/openai/models/beta/run_stream_event.rb +++ b/lib/openai/models/beta/run_stream_event.rb @@ -4,7 +4,7 @@ module OpenAI module Models module Beta # Occurs when a new - # [run](https://platform.openai.com/docs/api-reference/runs/object) is created. + # [run](https://platform.openai.com/docs/api-reference/runs/object) is created. module RunStreamEvent extend OpenAI::Internal::Type::Union @@ -44,7 +44,7 @@ module RunStreamEvent class ThreadRunCreated < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] required :data, -> { OpenAI::Models::Beta::Threads::Run } @@ -56,7 +56,7 @@ class ThreadRunCreated < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a new - # # [run](https://platform.openai.com/docs/api-reference/runs/object) is created. + # # [run](https://platform.openai.com/docs/api-reference/runs/object) is created. # # # # @param data [OpenAI::Models::Beta::Threads::Run] # # @param event [Symbol, :"thread.run.created"] @@ -69,7 +69,7 @@ class ThreadRunCreated < OpenAI::Internal::Type::BaseModel class ThreadRunQueued < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] required :data, -> { OpenAI::Models::Beta::Threads::Run } @@ -81,7 +81,7 @@ class ThreadRunQueued < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # moves to a `queued` status. + # # moves to a `queued` status. # # # # @param data [OpenAI::Models::Beta::Threads::Run] # # @param event [Symbol, :"thread.run.queued"] @@ -94,7 +94,7 @@ class ThreadRunQueued < OpenAI::Internal::Type::BaseModel class ThreadRunInProgress < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] required :data, -> { OpenAI::Models::Beta::Threads::Run } @@ -106,7 +106,7 @@ class ThreadRunInProgress < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # moves to an `in_progress` status. + # # moves to an `in_progress` status. # # # # @param data [OpenAI::Models::Beta::Threads::Run] # # @param event [Symbol, :"thread.run.in_progress"] @@ -119,7 +119,7 @@ class ThreadRunInProgress < OpenAI::Internal::Type::BaseModel class ThreadRunRequiresAction < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] required :data, -> { OpenAI::Models::Beta::Threads::Run } @@ -131,7 +131,7 @@ class ThreadRunRequiresAction < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # moves to a `requires_action` status. + # # moves to a `requires_action` status. # # # # @param data [OpenAI::Models::Beta::Threads::Run] # # @param event [Symbol, :"thread.run.requires_action"] @@ -144,7 +144,7 @@ class ThreadRunRequiresAction < OpenAI::Internal::Type::BaseModel class ThreadRunCompleted < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] required :data, -> { OpenAI::Models::Beta::Threads::Run } @@ -156,7 +156,7 @@ class ThreadRunCompleted < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # is completed. + # # is completed. # # # # @param data [OpenAI::Models::Beta::Threads::Run] # # @param event [Symbol, :"thread.run.completed"] @@ -169,7 +169,7 @@ class ThreadRunCompleted < OpenAI::Internal::Type::BaseModel class ThreadRunIncomplete < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] required :data, -> { OpenAI::Models::Beta::Threads::Run } @@ -181,7 +181,7 @@ class ThreadRunIncomplete < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # ends with status `incomplete`. + # # ends with status `incomplete`. # # # # @param data [OpenAI::Models::Beta::Threads::Run] # # @param event [Symbol, :"thread.run.incomplete"] @@ -194,7 +194,7 @@ class ThreadRunIncomplete < OpenAI::Internal::Type::BaseModel class ThreadRunFailed < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] required :data, -> { OpenAI::Models::Beta::Threads::Run } @@ -206,7 +206,7 @@ class ThreadRunFailed < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # fails. + # # fails. # # # # @param data [OpenAI::Models::Beta::Threads::Run] # # @param event [Symbol, :"thread.run.failed"] @@ -219,7 +219,7 @@ class ThreadRunFailed < OpenAI::Internal::Type::BaseModel class ThreadRunCancelling < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] required :data, -> { OpenAI::Models::Beta::Threads::Run } @@ -231,7 +231,7 @@ class ThreadRunCancelling < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # moves to a `cancelling` status. + # # moves to a `cancelling` status. # # # # @param data [OpenAI::Models::Beta::Threads::Run] # # @param event [Symbol, :"thread.run.cancelling"] @@ -244,7 +244,7 @@ class ThreadRunCancelling < OpenAI::Internal::Type::BaseModel class ThreadRunCancelled < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] required :data, -> { OpenAI::Models::Beta::Threads::Run } @@ -256,7 +256,7 @@ class ThreadRunCancelled < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # is cancelled. + # # is cancelled. # # # # @param data [OpenAI::Models::Beta::Threads::Run] # # @param event [Symbol, :"thread.run.cancelled"] @@ -269,7 +269,7 @@ class ThreadRunCancelled < OpenAI::Internal::Type::BaseModel class ThreadRunExpired < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] required :data, -> { OpenAI::Models::Beta::Threads::Run } @@ -281,7 +281,7 @@ class ThreadRunExpired < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # expires. + # # expires. # # # # @param data [OpenAI::Models::Beta::Threads::Run] # # @param event [Symbol, :"thread.run.expired"] diff --git a/lib/openai/models/beta/thread.rb b/lib/openai/models/beta/thread.rb index ab9384f2..5f08e2ff 100644 --- a/lib/openai/models/beta/thread.rb +++ b/lib/openai/models/beta/thread.rb @@ -19,11 +19,11 @@ class Thread < OpenAI::Internal::Type::BaseModel # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true @@ -36,16 +36,16 @@ class Thread < OpenAI::Internal::Type::BaseModel # @!attribute tool_resources # A set of resources that are made available to the assistant's tools in this - # thread. The resources are specific to the type of tool. For example, the - # `code_interpreter` tool requires a list of file IDs, while the `file_search` - # tool requires a list of vector store IDs. + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. # # @return [OpenAI::Models::Beta::Thread::ToolResources, nil] required :tool_resources, -> { OpenAI::Models::Beta::Thread::ToolResources }, nil?: true # @!parse # # Represents a thread that contains - # # [messages](https://platform.openai.com/docs/api-reference/messages). + # # [messages](https://platform.openai.com/docs/api-reference/messages). # # # # @param id [String] # # @param created_at [Integer] @@ -79,9 +79,9 @@ class ToolResources < OpenAI::Internal::Type::BaseModel # @!parse # # A set of resources that are made available to the assistant's tools in this - # # thread. The resources are specific to the type of tool. For example, the - # # `code_interpreter` tool requires a list of file IDs, while the `file_search` - # # tool requires a list of vector store IDs. + # # thread. The resources are specific to the type of tool. For example, the + # # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # # tool requires a list of vector store IDs. # # # # @param code_interpreter [OpenAI::Models::Beta::Thread::ToolResources::CodeInterpreter] # # @param file_search [OpenAI::Models::Beta::Thread::ToolResources::FileSearch] @@ -94,8 +94,8 @@ class ToolResources < OpenAI::Internal::Type::BaseModel class CodeInterpreter < OpenAI::Internal::Type::BaseModel # @!attribute [r] file_ids # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made - # available to the `code_interpreter` tool. There can be a maximum of 20 files - # associated with the tool. + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. # # @return [Array, nil] optional :file_ids, OpenAI::Internal::Type::ArrayOf[String] @@ -116,9 +116,9 @@ class CodeInterpreter < OpenAI::Internal::Type::BaseModel class FileSearch < OpenAI::Internal::Type::BaseModel # @!attribute [r] vector_store_ids # The - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # attached to this thread. There can be a maximum of 1 vector store attached to - # the thread. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this thread. There can be a maximum of 1 vector store attached to + # the thread. # # @return [Array, nil] optional :vector_store_ids, OpenAI::Internal::Type::ArrayOf[String] diff --git a/lib/openai/models/beta/thread_create_and_run_params.rb b/lib/openai/models/beta/thread_create_and_run_params.rb index 38c89eea..4612ebd8 100644 --- a/lib/openai/models/beta/thread_create_and_run_params.rb +++ b/lib/openai/models/beta/thread_create_and_run_params.rb @@ -13,63 +13,63 @@ class ThreadCreateAndRunParams < OpenAI::Internal::Type::BaseModel # @!attribute assistant_id # The ID of the - # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to - # execute this run. + # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + # execute this run. # # @return [String] required :assistant_id, String # @!attribute instructions # Override the default system message of the assistant. This is useful for - # modifying the behavior on a per-run basis. + # modifying the behavior on a per-run basis. # # @return [String, nil] optional :instructions, String, nil?: true # @!attribute max_completion_tokens # The maximum number of completion tokens that may be used over the course of the - # run. The run will make a best effort to use only the number of completion tokens - # specified, across multiple turns of the run. If the run exceeds the number of - # completion tokens specified, the run will end with status `incomplete`. See - # `incomplete_details` for more info. + # run. The run will make a best effort to use only the number of completion tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # completion tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. # # @return [Integer, nil] optional :max_completion_tokens, Integer, nil?: true # @!attribute max_prompt_tokens # The maximum number of prompt tokens that may be used over the course of the run. - # The run will make a best effort to use only the number of prompt tokens - # specified, across multiple turns of the run. If the run exceeds the number of - # prompt tokens specified, the run will end with status `incomplete`. See - # `incomplete_details` for more info. + # The run will make a best effort to use only the number of prompt tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # prompt tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. # # @return [Integer, nil] optional :max_prompt_tokens, Integer, nil?: true # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true # @!attribute model # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to - # be used to execute this run. If a value is provided here, it will override the - # model associated with the assistant. If not, the model associated with the - # assistant will be used. + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. # # @return [String, Symbol, OpenAI::Models::ChatModel, nil] optional :model, union: -> { OpenAI::Models::Beta::ThreadCreateAndRunParams::Model }, nil?: true # @!attribute [r] parallel_tool_calls # Whether to enable - # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) - # during tool use. + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. # # @return [Boolean, nil] optional :parallel_tool_calls, OpenAI::Internal::Type::Boolean @@ -80,40 +80,40 @@ class ThreadCreateAndRunParams < OpenAI::Internal::Type::BaseModel # @!attribute response_format # Specifies the format that the model must output. Compatible with - # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), - # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - # message the model generates is valid JSON. + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. # - # **Important:** when using JSON mode, you **must** also instruct the model to - # produce JSON yourself via a system or user message. Without this, the model may - # generate an unending stream of whitespace until the generation reaches the token - # limit, resulting in a long-running and seemingly "stuck" request. Also note that - # the message content may be partially cut off if `finish_reason="length"`, which - # indicates the generation exceeded `max_tokens` or the conversation exceeded the - # max context length. + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. # # @return [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] optional :response_format, union: -> { OpenAI::Models::Beta::AssistantResponseFormatOption }, nil?: true # @!attribute temperature # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. # # @return [Float, nil] optional :temperature, Float, nil?: true # @!attribute [r] thread # Options to create a new thread. If no thread is provided when running a request, - # an empty thread will be created. + # an empty thread will be created. # # @return [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread, nil] optional :thread, -> { OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread } @@ -124,28 +124,28 @@ class ThreadCreateAndRunParams < OpenAI::Internal::Type::BaseModel # @!attribute tool_choice # Controls which (if any) tool is called by the model. `none` means the model will - # not call any tools and instead generates a message. `auto` is the default value - # and means the model can pick between generating a message or calling one or more - # tools. `required` means the model must call one or more tools before responding - # to the user. Specifying a particular tool like `{"type": "file_search"}` or - # `{"type": "function", "function": {"name": "my_function"}}` forces the model to - # call that tool. + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. # # @return [Symbol, OpenAI::Models::Beta::AssistantToolChoiceOption::Auto, OpenAI::Models::Beta::AssistantToolChoice, nil] optional :tool_choice, union: -> { OpenAI::Models::Beta::AssistantToolChoiceOption }, nil?: true # @!attribute tool_resources # A set of resources that are used by the assistant's tools. The resources are - # specific to the type of tool. For example, the `code_interpreter` tool requires - # a list of file IDs, while the `file_search` tool requires a list of vector store - # IDs. + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. # # @return [OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources, nil] optional :tool_resources, -> { OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources }, nil?: true # @!attribute tools # Override the tools the assistant can use for this run. This is useful for - # modifying the behavior on a per-run basis. + # modifying the behavior on a per-run basis. # # @return [Array, nil] optional :tools, @@ -154,17 +154,17 @@ class ThreadCreateAndRunParams < OpenAI::Internal::Type::BaseModel # @!attribute top_p # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or temperature but not both. + # We generally recommend altering this or temperature but not both. # # @return [Float, nil] optional :top_p, Float, nil?: true # @!attribute truncation_strategy # Controls for how a thread will be truncated prior to the run. Use this to - # control the intial context window of the run. + # control the intial context window of the run. # # @return [OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy, nil] optional :truncation_strategy, @@ -214,9 +214,9 @@ class ThreadCreateAndRunParams < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to - # be used to execute this run. If a value is provided here, it will override the - # model associated with the assistant. If not, the model associated with the - # assistant will be used. + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. module Model extend OpenAI::Internal::Type::Union @@ -233,7 +233,7 @@ module Model class Thread < OpenAI::Internal::Type::BaseModel # @!attribute [r] messages # A list of [messages](https://platform.openai.com/docs/api-reference/messages) to - # start the thread with. + # start the thread with. # # @return [Array, nil] optional :messages, @@ -245,20 +245,20 @@ class Thread < OpenAI::Internal::Type::BaseModel # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true # @!attribute tool_resources # A set of resources that are made available to the assistant's tools in this - # thread. The resources are specific to the type of tool. For example, the - # `code_interpreter` tool requires a list of file IDs, while the `file_search` - # tool requires a list of vector store IDs. + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. # # @return [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources, nil] optional :tool_resources, @@ -267,7 +267,7 @@ class Thread < OpenAI::Internal::Type::BaseModel # @!parse # # Options to create a new thread. If no thread is provided when running a request, - # # an empty thread will be created. + # # an empty thread will be created. # # # # @param messages [Array] # # @param metadata [Hash{Symbol=>String}, nil] @@ -287,10 +287,10 @@ class Message < OpenAI::Internal::Type::BaseModel # @!attribute role # The role of the entity that is creating the message. Allowed values include: # - # - `user`: Indicates the message is sent by an actual user and should be used in - # most cases to represent user-generated messages. - # - `assistant`: Indicates the message is generated by the assistant. Use this - # value to insert messages from the assistant into the conversation. + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. # # @return [Symbol, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Role] required :role, enum: -> { OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Role } @@ -305,11 +305,11 @@ class Message < OpenAI::Internal::Type::BaseModel # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true @@ -346,10 +346,10 @@ module Content # The role of the entity that is creating the message. Allowed values include: # - # - `user`: Indicates the message is sent by an actual user and should be used in - # most cases to represent user-generated messages. - # - `assistant`: Indicates the message is generated by the assistant. Use this - # value to insert messages from the assistant into the conversation. + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. # # @see OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message#role module Role @@ -451,9 +451,9 @@ class ToolResources < OpenAI::Internal::Type::BaseModel # @!parse # # A set of resources that are made available to the assistant's tools in this - # # thread. The resources are specific to the type of tool. For example, the - # # `code_interpreter` tool requires a list of file IDs, while the `file_search` - # # tool requires a list of vector store IDs. + # # thread. The resources are specific to the type of tool. For example, the + # # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # # tool requires a list of vector store IDs. # # # # @param code_interpreter [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter] # # @param file_search [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch] @@ -466,8 +466,8 @@ class ToolResources < OpenAI::Internal::Type::BaseModel class CodeInterpreter < OpenAI::Internal::Type::BaseModel # @!attribute [r] file_ids # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made - # available to the `code_interpreter` tool. There can be a maximum of 20 files - # associated with the tool. + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. # # @return [Array, nil] optional :file_ids, OpenAI::Internal::Type::ArrayOf[String] @@ -488,9 +488,9 @@ class CodeInterpreter < OpenAI::Internal::Type::BaseModel class FileSearch < OpenAI::Internal::Type::BaseModel # @!attribute [r] vector_store_ids # The - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # attached to this thread. There can be a maximum of 1 vector store attached to - # the thread. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this thread. There can be a maximum of 1 vector store attached to + # the thread. # # @return [Array, nil] optional :vector_store_ids, OpenAI::Internal::Type::ArrayOf[String] @@ -501,9 +501,9 @@ class FileSearch < OpenAI::Internal::Type::BaseModel # @!attribute [r] vector_stores # A helper to create a - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # with file_ids and attach it to this thread. There can be a maximum of 1 vector - # store attached to the thread. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # with file_ids and attach it to this thread. There can be a maximum of 1 vector + # store attached to the thread. # # @return [Array, nil] optional :vector_stores, @@ -524,7 +524,7 @@ class FileSearch < OpenAI::Internal::Type::BaseModel class VectorStore < OpenAI::Internal::Type::BaseModel # @!attribute [r] chunking_strategy # The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. + # strategy. # # @return [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static, nil] optional :chunking_strategy, @@ -536,8 +536,8 @@ class VectorStore < OpenAI::Internal::Type::BaseModel # @!attribute [r] file_ids # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to - # add to the vector store. There can be a maximum of 10000 files in a vector - # store. + # add to the vector store. There can be a maximum of 10000 files in a vector + # store. # # @return [Array, nil] optional :file_ids, OpenAI::Internal::Type::ArrayOf[String] @@ -548,11 +548,11 @@ class VectorStore < OpenAI::Internal::Type::BaseModel # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true @@ -567,7 +567,7 @@ class VectorStore < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. + # strategy. # # @see OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore#chunking_strategy module ChunkingStrategy @@ -591,7 +591,7 @@ class Auto < OpenAI::Internal::Type::BaseModel # @!parse # # The default strategy. This strategy currently uses a `max_chunk_size_tokens` of - # # `800` and `chunk_overlap_tokens` of `400`. + # # `800` and `chunk_overlap_tokens` of `400`. # # # # @param type [Symbol, :auto] # # @@ -626,14 +626,14 @@ class Static < OpenAI::Internal::Type::BaseModel # @!attribute chunk_overlap_tokens # The number of tokens that overlap between chunks. The default value is `400`. # - # Note that the overlap must not exceed half of `max_chunk_size_tokens`. + # Note that the overlap must not exceed half of `max_chunk_size_tokens`. # # @return [Integer] required :chunk_overlap_tokens, Integer # @!attribute max_chunk_size_tokens # The maximum number of tokens in each chunk. The default value is `800`. The - # minimum value is `100` and the maximum value is `4096`. + # minimum value is `100` and the maximum value is `4096`. # # @return [Integer] required :max_chunk_size_tokens, Integer @@ -679,9 +679,9 @@ class ToolResources < OpenAI::Internal::Type::BaseModel # @!parse # # A set of resources that are used by the assistant's tools. The resources are - # # specific to the type of tool. For example, the `code_interpreter` tool requires - # # a list of file IDs, while the `file_search` tool requires a list of vector store - # # IDs. + # # specific to the type of tool. For example, the `code_interpreter` tool requires + # # a list of file IDs, while the `file_search` tool requires a list of vector store + # # IDs. # # # # @param code_interpreter [OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter] # # @param file_search [OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::FileSearch] @@ -694,8 +694,8 @@ class ToolResources < OpenAI::Internal::Type::BaseModel class CodeInterpreter < OpenAI::Internal::Type::BaseModel # @!attribute [r] file_ids # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made - # available to the `code_interpreter` tool. There can be a maximum of 20 files - # associated with the tool. + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. # # @return [Array, nil] optional :file_ids, OpenAI::Internal::Type::ArrayOf[String] @@ -716,9 +716,9 @@ class CodeInterpreter < OpenAI::Internal::Type::BaseModel class FileSearch < OpenAI::Internal::Type::BaseModel # @!attribute [r] vector_store_ids # The ID of the - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # attached to this assistant. There can be a maximum of 1 vector store attached to - # the assistant. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this assistant. There can be a maximum of 1 vector store attached to + # the assistant. # # @return [Array, nil] optional :vector_store_ids, OpenAI::Internal::Type::ArrayOf[String] @@ -753,23 +753,23 @@ module Tool class TruncationStrategy < OpenAI::Internal::Type::BaseModel # @!attribute type # The truncation strategy to use for the thread. The default is `auto`. If set to - # `last_messages`, the thread will be truncated to the n most recent messages in - # the thread. When set to `auto`, messages in the middle of the thread will be - # dropped to fit the context length of the model, `max_prompt_tokens`. + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. # # @return [Symbol, OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy::Type] required :type, enum: -> { OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy::Type } # @!attribute last_messages # The number of most recent messages from the thread when constructing the context - # for the run. + # for the run. # # @return [Integer, nil] optional :last_messages, Integer, nil?: true # @!parse # # Controls for how a thread will be truncated prior to the run. Use this to - # # control the intial context window of the run. + # # control the intial context window of the run. # # # # @param type [Symbol, OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy::Type] # # @param last_messages [Integer, nil] @@ -779,9 +779,9 @@ class TruncationStrategy < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # The truncation strategy to use for the thread. The default is `auto`. If set to - # `last_messages`, the thread will be truncated to the n most recent messages in - # the thread. When set to `auto`, messages in the middle of the thread will be - # dropped to fit the context length of the model, `max_prompt_tokens`. + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. # # @see OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy#type module Type diff --git a/lib/openai/models/beta/thread_create_params.rb b/lib/openai/models/beta/thread_create_params.rb index 6e46487b..08818856 100644 --- a/lib/openai/models/beta/thread_create_params.rb +++ b/lib/openai/models/beta/thread_create_params.rb @@ -11,7 +11,7 @@ class ThreadCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] messages # A list of [messages](https://platform.openai.com/docs/api-reference/messages) to - # start the thread with. + # start the thread with. # # @return [Array, nil] optional :messages, @@ -23,20 +23,20 @@ class ThreadCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true # @!attribute tool_resources # A set of resources that are made available to the assistant's tools in this - # thread. The resources are specific to the type of tool. For example, the - # `code_interpreter` tool requires a list of file IDs, while the `file_search` - # tool requires a list of vector store IDs. + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. # # @return [OpenAI::Models::Beta::ThreadCreateParams::ToolResources, nil] optional :tool_resources, -> { OpenAI::Models::Beta::ThreadCreateParams::ToolResources }, nil?: true @@ -61,10 +61,10 @@ class Message < OpenAI::Internal::Type::BaseModel # @!attribute role # The role of the entity that is creating the message. Allowed values include: # - # - `user`: Indicates the message is sent by an actual user and should be used in - # most cases to represent user-generated messages. - # - `assistant`: Indicates the message is generated by the assistant. Use this - # value to insert messages from the assistant into the conversation. + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. # # @return [Symbol, OpenAI::Models::Beta::ThreadCreateParams::Message::Role] required :role, enum: -> { OpenAI::Models::Beta::ThreadCreateParams::Message::Role } @@ -79,11 +79,11 @@ class Message < OpenAI::Internal::Type::BaseModel # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true @@ -120,10 +120,10 @@ module Content # The role of the entity that is creating the message. Allowed values include: # - # - `user`: Indicates the message is sent by an actual user and should be used in - # most cases to represent user-generated messages. - # - `assistant`: Indicates the message is generated by the assistant. Use this - # value to insert messages from the assistant into the conversation. + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. # # @see OpenAI::Models::Beta::ThreadCreateParams::Message#role module Role @@ -223,9 +223,9 @@ class ToolResources < OpenAI::Internal::Type::BaseModel # @!parse # # A set of resources that are made available to the assistant's tools in this - # # thread. The resources are specific to the type of tool. For example, the - # # `code_interpreter` tool requires a list of file IDs, while the `file_search` - # # tool requires a list of vector store IDs. + # # thread. The resources are specific to the type of tool. For example, the + # # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # # tool requires a list of vector store IDs. # # # # @param code_interpreter [OpenAI::Models::Beta::ThreadCreateParams::ToolResources::CodeInterpreter] # # @param file_search [OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch] @@ -238,8 +238,8 @@ class ToolResources < OpenAI::Internal::Type::BaseModel class CodeInterpreter < OpenAI::Internal::Type::BaseModel # @!attribute [r] file_ids # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made - # available to the `code_interpreter` tool. There can be a maximum of 20 files - # associated with the tool. + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. # # @return [Array, nil] optional :file_ids, OpenAI::Internal::Type::ArrayOf[String] @@ -260,9 +260,9 @@ class CodeInterpreter < OpenAI::Internal::Type::BaseModel class FileSearch < OpenAI::Internal::Type::BaseModel # @!attribute [r] vector_store_ids # The - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # attached to this thread. There can be a maximum of 1 vector store attached to - # the thread. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this thread. There can be a maximum of 1 vector store attached to + # the thread. # # @return [Array, nil] optional :vector_store_ids, OpenAI::Internal::Type::ArrayOf[String] @@ -273,9 +273,9 @@ class FileSearch < OpenAI::Internal::Type::BaseModel # @!attribute [r] vector_stores # A helper to create a - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # with file_ids and attach it to this thread. There can be a maximum of 1 vector - # store attached to the thread. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # with file_ids and attach it to this thread. There can be a maximum of 1 vector + # store attached to the thread. # # @return [Array, nil] optional :vector_stores, @@ -296,7 +296,7 @@ class FileSearch < OpenAI::Internal::Type::BaseModel class VectorStore < OpenAI::Internal::Type::BaseModel # @!attribute [r] chunking_strategy # The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. + # strategy. # # @return [OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static, nil] optional :chunking_strategy, @@ -308,8 +308,8 @@ class VectorStore < OpenAI::Internal::Type::BaseModel # @!attribute [r] file_ids # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to - # add to the vector store. There can be a maximum of 10000 files in a vector - # store. + # add to the vector store. There can be a maximum of 10000 files in a vector + # store. # # @return [Array, nil] optional :file_ids, OpenAI::Internal::Type::ArrayOf[String] @@ -320,11 +320,11 @@ class VectorStore < OpenAI::Internal::Type::BaseModel # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true @@ -339,7 +339,7 @@ class VectorStore < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. + # strategy. # # @see OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore#chunking_strategy module ChunkingStrategy @@ -363,7 +363,7 @@ class Auto < OpenAI::Internal::Type::BaseModel # @!parse # # The default strategy. This strategy currently uses a `max_chunk_size_tokens` of - # # `800` and `chunk_overlap_tokens` of `400`. + # # `800` and `chunk_overlap_tokens` of `400`. # # # # @param type [Symbol, :auto] # # @@ -398,14 +398,14 @@ class Static < OpenAI::Internal::Type::BaseModel # @!attribute chunk_overlap_tokens # The number of tokens that overlap between chunks. The default value is `400`. # - # Note that the overlap must not exceed half of `max_chunk_size_tokens`. + # Note that the overlap must not exceed half of `max_chunk_size_tokens`. # # @return [Integer] required :chunk_overlap_tokens, Integer # @!attribute max_chunk_size_tokens # The maximum number of tokens in each chunk. The default value is `800`. The - # minimum value is `100` and the maximum value is `4096`. + # minimum value is `100` and the maximum value is `4096`. # # @return [Integer] required :max_chunk_size_tokens, Integer diff --git a/lib/openai/models/beta/thread_stream_event.rb b/lib/openai/models/beta/thread_stream_event.rb index 416358fd..e67ecf5f 100644 --- a/lib/openai/models/beta/thread_stream_event.rb +++ b/lib/openai/models/beta/thread_stream_event.rb @@ -6,7 +6,7 @@ module Beta class ThreadStreamEvent < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a thread that contains - # [messages](https://platform.openai.com/docs/api-reference/messages). + # [messages](https://platform.openai.com/docs/api-reference/messages). # # @return [OpenAI::Models::Beta::Thread] required :data, -> { OpenAI::Models::Beta::Thread } @@ -28,8 +28,8 @@ class ThreadStreamEvent < OpenAI::Internal::Type::BaseModel # @!parse # # Occurs when a new - # # [thread](https://platform.openai.com/docs/api-reference/threads/object) is - # # created. + # # [thread](https://platform.openai.com/docs/api-reference/threads/object) is + # # created. # # # # @param data [OpenAI::Models::Beta::Thread] # # @param enabled [Boolean] diff --git a/lib/openai/models/beta/thread_update_params.rb b/lib/openai/models/beta/thread_update_params.rb index 10ec2820..07938465 100644 --- a/lib/openai/models/beta/thread_update_params.rb +++ b/lib/openai/models/beta/thread_update_params.rb @@ -11,20 +11,20 @@ class ThreadUpdateParams < OpenAI::Internal::Type::BaseModel # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true # @!attribute tool_resources # A set of resources that are made available to the assistant's tools in this - # thread. The resources are specific to the type of tool. For example, the - # `code_interpreter` tool requires a list of file IDs, while the `file_search` - # tool requires a list of vector store IDs. + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. # # @return [OpenAI::Models::Beta::ThreadUpdateParams::ToolResources, nil] optional :tool_resources, -> { OpenAI::Models::Beta::ThreadUpdateParams::ToolResources }, nil?: true @@ -60,9 +60,9 @@ class ToolResources < OpenAI::Internal::Type::BaseModel # @!parse # # A set of resources that are made available to the assistant's tools in this - # # thread. The resources are specific to the type of tool. For example, the - # # `code_interpreter` tool requires a list of file IDs, while the `file_search` - # # tool requires a list of vector store IDs. + # # thread. The resources are specific to the type of tool. For example, the + # # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # # tool requires a list of vector store IDs. # # # # @param code_interpreter [OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter] # # @param file_search [OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::FileSearch] @@ -75,8 +75,8 @@ class ToolResources < OpenAI::Internal::Type::BaseModel class CodeInterpreter < OpenAI::Internal::Type::BaseModel # @!attribute [r] file_ids # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made - # available to the `code_interpreter` tool. There can be a maximum of 20 files - # associated with the tool. + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. # # @return [Array, nil] optional :file_ids, OpenAI::Internal::Type::ArrayOf[String] @@ -97,9 +97,9 @@ class CodeInterpreter < OpenAI::Internal::Type::BaseModel class FileSearch < OpenAI::Internal::Type::BaseModel # @!attribute [r] vector_store_ids # The - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # attached to this thread. There can be a maximum of 1 vector store attached to - # the thread. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this thread. There can be a maximum of 1 vector store attached to + # the thread. # # @return [Array, nil] optional :vector_store_ids, OpenAI::Internal::Type::ArrayOf[String] diff --git a/lib/openai/models/beta/threads/annotation.rb b/lib/openai/models/beta/threads/annotation.rb index 0086d3c1..10511e12 100644 --- a/lib/openai/models/beta/threads/annotation.rb +++ b/lib/openai/models/beta/threads/annotation.rb @@ -5,8 +5,8 @@ module Models module Beta module Threads # A citation within the message that points to a specific quote from a specific - # File associated with the assistant or the message. Generated when the assistant - # uses the "file_search" tool to search files. + # File associated with the assistant or the message. Generated when the assistant + # uses the "file_search" tool to search files. module Annotation extend OpenAI::Internal::Type::Union diff --git a/lib/openai/models/beta/threads/annotation_delta.rb b/lib/openai/models/beta/threads/annotation_delta.rb index b5a749fd..726c91ad 100644 --- a/lib/openai/models/beta/threads/annotation_delta.rb +++ b/lib/openai/models/beta/threads/annotation_delta.rb @@ -5,8 +5,8 @@ module Models module Beta module Threads # A citation within the message that points to a specific quote from a specific - # File associated with the assistant or the message. Generated when the assistant - # uses the "file_search" tool to search files. + # File associated with the assistant or the message. Generated when the assistant + # uses the "file_search" tool to search files. module AnnotationDelta extend OpenAI::Internal::Type::Union diff --git a/lib/openai/models/beta/threads/file_citation_annotation.rb b/lib/openai/models/beta/threads/file_citation_annotation.rb index 404d9340..cf4eab2d 100644 --- a/lib/openai/models/beta/threads/file_citation_annotation.rb +++ b/lib/openai/models/beta/threads/file_citation_annotation.rb @@ -34,8 +34,8 @@ class FileCitationAnnotation < OpenAI::Internal::Type::BaseModel # @!parse # # A citation within the message that points to a specific quote from a specific - # # File associated with the assistant or the message. Generated when the assistant - # # uses the "file_search" tool to search files. + # # File associated with the assistant or the message. Generated when the assistant + # # uses the "file_search" tool to search files. # # # # @param end_index [Integer] # # @param file_citation [OpenAI::Models::Beta::Threads::FileCitationAnnotation::FileCitation] diff --git a/lib/openai/models/beta/threads/file_citation_delta_annotation.rb b/lib/openai/models/beta/threads/file_citation_delta_annotation.rb index 5077a210..d1ac99c2 100644 --- a/lib/openai/models/beta/threads/file_citation_delta_annotation.rb +++ b/lib/openai/models/beta/threads/file_citation_delta_annotation.rb @@ -56,8 +56,8 @@ class FileCitationDeltaAnnotation < OpenAI::Internal::Type::BaseModel # @!parse # # A citation within the message that points to a specific quote from a specific - # # File associated with the assistant or the message. Generated when the assistant - # # uses the "file_search" tool to search files. + # # File associated with the assistant or the message. Generated when the assistant + # # uses the "file_search" tool to search files. # # # # @param index [Integer] # # @param end_index [Integer] diff --git a/lib/openai/models/beta/threads/file_path_annotation.rb b/lib/openai/models/beta/threads/file_path_annotation.rb index 9bf12486..c1a51ef6 100644 --- a/lib/openai/models/beta/threads/file_path_annotation.rb +++ b/lib/openai/models/beta/threads/file_path_annotation.rb @@ -34,7 +34,7 @@ class FilePathAnnotation < OpenAI::Internal::Type::BaseModel # @!parse # # A URL for the file that's generated when the assistant used the - # # `code_interpreter` tool to generate a file. + # # `code_interpreter` tool to generate a file. # # # # @param end_index [Integer] # # @param file_path [OpenAI::Models::Beta::Threads::FilePathAnnotation::FilePath] diff --git a/lib/openai/models/beta/threads/file_path_delta_annotation.rb b/lib/openai/models/beta/threads/file_path_delta_annotation.rb index cb1c00a6..975bd449 100644 --- a/lib/openai/models/beta/threads/file_path_delta_annotation.rb +++ b/lib/openai/models/beta/threads/file_path_delta_annotation.rb @@ -56,7 +56,7 @@ class FilePathDeltaAnnotation < OpenAI::Internal::Type::BaseModel # @!parse # # A URL for the file that's generated when the assistant used the - # # `code_interpreter` tool to generate a file. + # # `code_interpreter` tool to generate a file. # # # # @param index [Integer] # # @param end_index [Integer] diff --git a/lib/openai/models/beta/threads/image_file.rb b/lib/openai/models/beta/threads/image_file.rb index bd2e1f25..631803f6 100644 --- a/lib/openai/models/beta/threads/image_file.rb +++ b/lib/openai/models/beta/threads/image_file.rb @@ -7,15 +7,15 @@ module Threads class ImageFile < OpenAI::Internal::Type::BaseModel # @!attribute file_id # The [File](https://platform.openai.com/docs/api-reference/files) ID of the image - # in the message content. Set `purpose="vision"` when uploading the File if you - # need to later display the file content. + # in the message content. Set `purpose="vision"` when uploading the File if you + # need to later display the file content. # # @return [String] required :file_id, String # @!attribute [r] detail # Specifies the detail level of the image if specified by the user. `low` uses - # fewer tokens, you can opt in to high resolution using `high`. + # fewer tokens, you can opt in to high resolution using `high`. # # @return [Symbol, OpenAI::Models::Beta::Threads::ImageFile::Detail, nil] optional :detail, enum: -> { OpenAI::Models::Beta::Threads::ImageFile::Detail } @@ -33,7 +33,7 @@ class ImageFile < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # Specifies the detail level of the image if specified by the user. `low` uses - # fewer tokens, you can opt in to high resolution using `high`. + # fewer tokens, you can opt in to high resolution using `high`. # # @see OpenAI::Models::Beta::Threads::ImageFile#detail module Detail diff --git a/lib/openai/models/beta/threads/image_file_content_block.rb b/lib/openai/models/beta/threads/image_file_content_block.rb index 60fc0073..4a5a487e 100644 --- a/lib/openai/models/beta/threads/image_file_content_block.rb +++ b/lib/openai/models/beta/threads/image_file_content_block.rb @@ -18,7 +18,7 @@ class ImageFileContentBlock < OpenAI::Internal::Type::BaseModel # @!parse # # References an image [File](https://platform.openai.com/docs/api-reference/files) - # # in the content of a message. + # # in the content of a message. # # # # @param image_file [OpenAI::Models::Beta::Threads::ImageFile] # # @param type [Symbol, :image_file] diff --git a/lib/openai/models/beta/threads/image_file_delta.rb b/lib/openai/models/beta/threads/image_file_delta.rb index 4d654fe4..219cfba9 100644 --- a/lib/openai/models/beta/threads/image_file_delta.rb +++ b/lib/openai/models/beta/threads/image_file_delta.rb @@ -7,7 +7,7 @@ module Threads class ImageFileDelta < OpenAI::Internal::Type::BaseModel # @!attribute [r] detail # Specifies the detail level of the image if specified by the user. `low` uses - # fewer tokens, you can opt in to high resolution using `high`. + # fewer tokens, you can opt in to high resolution using `high`. # # @return [Symbol, OpenAI::Models::Beta::Threads::ImageFileDelta::Detail, nil] optional :detail, enum: -> { OpenAI::Models::Beta::Threads::ImageFileDelta::Detail } @@ -18,8 +18,8 @@ class ImageFileDelta < OpenAI::Internal::Type::BaseModel # @!attribute [r] file_id # The [File](https://platform.openai.com/docs/api-reference/files) ID of the image - # in the message content. Set `purpose="vision"` when uploading the File if you - # need to later display the file content. + # in the message content. Set `purpose="vision"` when uploading the File if you + # need to later display the file content. # # @return [String, nil] optional :file_id, String @@ -37,7 +37,7 @@ class ImageFileDelta < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # Specifies the detail level of the image if specified by the user. `low` uses - # fewer tokens, you can opt in to high resolution using `high`. + # fewer tokens, you can opt in to high resolution using `high`. # # @see OpenAI::Models::Beta::Threads::ImageFileDelta#detail module Detail diff --git a/lib/openai/models/beta/threads/image_file_delta_block.rb b/lib/openai/models/beta/threads/image_file_delta_block.rb index 51118d76..9a5625ad 100644 --- a/lib/openai/models/beta/threads/image_file_delta_block.rb +++ b/lib/openai/models/beta/threads/image_file_delta_block.rb @@ -28,7 +28,7 @@ class ImageFileDeltaBlock < OpenAI::Internal::Type::BaseModel # @!parse # # References an image [File](https://platform.openai.com/docs/api-reference/files) - # # in the content of a message. + # # in the content of a message. # # # # @param index [Integer] # # @param image_file [OpenAI::Models::Beta::Threads::ImageFileDelta] diff --git a/lib/openai/models/beta/threads/image_url.rb b/lib/openai/models/beta/threads/image_url.rb index c5db8e23..a76f8467 100644 --- a/lib/openai/models/beta/threads/image_url.rb +++ b/lib/openai/models/beta/threads/image_url.rb @@ -7,14 +7,14 @@ module Threads class ImageURL < OpenAI::Internal::Type::BaseModel # @!attribute url # The external URL of the image, must be a supported image types: jpeg, jpg, png, - # gif, webp. + # gif, webp. # # @return [String] required :url, String # @!attribute [r] detail # Specifies the detail level of the image. `low` uses fewer tokens, you can opt in - # to high resolution using `high`. Default value is `auto` + # to high resolution using `high`. Default value is `auto` # # @return [Symbol, OpenAI::Models::Beta::Threads::ImageURL::Detail, nil] optional :detail, enum: -> { OpenAI::Models::Beta::Threads::ImageURL::Detail } @@ -32,7 +32,7 @@ class ImageURL < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # Specifies the detail level of the image. `low` uses fewer tokens, you can opt in - # to high resolution using `high`. Default value is `auto` + # to high resolution using `high`. Default value is `auto` # # @see OpenAI::Models::Beta::Threads::ImageURL#detail module Detail diff --git a/lib/openai/models/beta/threads/image_url_delta.rb b/lib/openai/models/beta/threads/image_url_delta.rb index 0c69ce94..4ae3e547 100644 --- a/lib/openai/models/beta/threads/image_url_delta.rb +++ b/lib/openai/models/beta/threads/image_url_delta.rb @@ -7,7 +7,7 @@ module Threads class ImageURLDelta < OpenAI::Internal::Type::BaseModel # @!attribute [r] detail # Specifies the detail level of the image. `low` uses fewer tokens, you can opt in - # to high resolution using `high`. + # to high resolution using `high`. # # @return [Symbol, OpenAI::Models::Beta::Threads::ImageURLDelta::Detail, nil] optional :detail, enum: -> { OpenAI::Models::Beta::Threads::ImageURLDelta::Detail } @@ -18,7 +18,7 @@ class ImageURLDelta < OpenAI::Internal::Type::BaseModel # @!attribute [r] url # The URL of the image, must be a supported image types: jpeg, jpg, png, gif, - # webp. + # webp. # # @return [String, nil] optional :url, String @@ -36,7 +36,7 @@ class ImageURLDelta < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # Specifies the detail level of the image. `low` uses fewer tokens, you can opt in - # to high resolution using `high`. + # to high resolution using `high`. # # @see OpenAI::Models::Beta::Threads::ImageURLDelta#detail module Detail diff --git a/lib/openai/models/beta/threads/message.rb b/lib/openai/models/beta/threads/message.rb index 4987ff90..355140de 100644 --- a/lib/openai/models/beta/threads/message.rb +++ b/lib/openai/models/beta/threads/message.rb @@ -14,8 +14,8 @@ class Message < OpenAI::Internal::Type::BaseModel # @!attribute assistant_id # If applicable, the ID of the - # [assistant](https://platform.openai.com/docs/api-reference/assistants) that - # authored this message. + # [assistant](https://platform.openai.com/docs/api-reference/assistants) that + # authored this message. # # @return [String, nil] required :assistant_id, String, nil?: true @@ -61,11 +61,11 @@ class Message < OpenAI::Internal::Type::BaseModel # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true @@ -84,29 +84,29 @@ class Message < OpenAI::Internal::Type::BaseModel # @!attribute run_id # The ID of the [run](https://platform.openai.com/docs/api-reference/runs) - # associated with the creation of this message. Value is `null` when messages are - # created manually using the create message or create thread endpoints. + # associated with the creation of this message. Value is `null` when messages are + # created manually using the create message or create thread endpoints. # # @return [String, nil] required :run_id, String, nil?: true # @!attribute status # The status of the message, which can be either `in_progress`, `incomplete`, or - # `completed`. + # `completed`. # # @return [Symbol, OpenAI::Models::Beta::Threads::Message::Status] required :status, enum: -> { OpenAI::Models::Beta::Threads::Message::Status } # @!attribute thread_id # The [thread](https://platform.openai.com/docs/api-reference/threads) ID that - # this message belongs to. + # this message belongs to. # # @return [String] required :thread_id, String # @!parse # # Represents a message within a - # # [thread](https://platform.openai.com/docs/api-reference/threads). + # # [thread](https://platform.openai.com/docs/api-reference/threads). # # # # @param id [String] # # @param assistant_id [String, nil] @@ -257,7 +257,7 @@ module Role end # The status of the message, which can be either `in_progress`, `incomplete`, or - # `completed`. + # `completed`. # # @see OpenAI::Models::Beta::Threads::Message#status module Status diff --git a/lib/openai/models/beta/threads/message_content.rb b/lib/openai/models/beta/threads/message_content.rb index 793bbb1d..b6ed040c 100644 --- a/lib/openai/models/beta/threads/message_content.rb +++ b/lib/openai/models/beta/threads/message_content.rb @@ -5,7 +5,7 @@ module Models module Beta module Threads # References an image [File](https://platform.openai.com/docs/api-reference/files) - # in the content of a message. + # in the content of a message. module MessageContent extend OpenAI::Internal::Type::Union diff --git a/lib/openai/models/beta/threads/message_content_delta.rb b/lib/openai/models/beta/threads/message_content_delta.rb index 0d6aafd8..a69eadda 100644 --- a/lib/openai/models/beta/threads/message_content_delta.rb +++ b/lib/openai/models/beta/threads/message_content_delta.rb @@ -5,7 +5,7 @@ module Models module Beta module Threads # References an image [File](https://platform.openai.com/docs/api-reference/files) - # in the content of a message. + # in the content of a message. module MessageContentDelta extend OpenAI::Internal::Type::Union diff --git a/lib/openai/models/beta/threads/message_content_part_param.rb b/lib/openai/models/beta/threads/message_content_part_param.rb index 33049950..faab6ef1 100644 --- a/lib/openai/models/beta/threads/message_content_part_param.rb +++ b/lib/openai/models/beta/threads/message_content_part_param.rb @@ -5,7 +5,7 @@ module Models module Beta module Threads # References an image [File](https://platform.openai.com/docs/api-reference/files) - # in the content of a message. + # in the content of a message. module MessageContentPartParam extend OpenAI::Internal::Type::Union diff --git a/lib/openai/models/beta/threads/message_create_params.rb b/lib/openai/models/beta/threads/message_create_params.rb index dfabf003..ea41994f 100644 --- a/lib/openai/models/beta/threads/message_create_params.rb +++ b/lib/openai/models/beta/threads/message_create_params.rb @@ -19,10 +19,10 @@ class MessageCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute role # The role of the entity that is creating the message. Allowed values include: # - # - `user`: Indicates the message is sent by an actual user and should be used in - # most cases to represent user-generated messages. - # - `assistant`: Indicates the message is generated by the assistant. Use this - # value to insert messages from the assistant into the conversation. + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. # # @return [Symbol, OpenAI::Models::Beta::Threads::MessageCreateParams::Role] required :role, enum: -> { OpenAI::Models::Beta::Threads::MessageCreateParams::Role } @@ -37,11 +37,11 @@ class MessageCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true @@ -77,10 +77,10 @@ module Content # The role of the entity that is creating the message. Allowed values include: # - # - `user`: Indicates the message is sent by an actual user and should be used in - # most cases to represent user-generated messages. - # - `assistant`: Indicates the message is generated by the assistant. Use this - # value to insert messages from the assistant into the conversation. + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. module Role extend OpenAI::Internal::Type::Enum diff --git a/lib/openai/models/beta/threads/message_delta_event.rb b/lib/openai/models/beta/threads/message_delta_event.rb index 11ba7a44..0e313696 100644 --- a/lib/openai/models/beta/threads/message_delta_event.rb +++ b/lib/openai/models/beta/threads/message_delta_event.rb @@ -25,7 +25,7 @@ class MessageDeltaEvent < OpenAI::Internal::Type::BaseModel # @!parse # # Represents a message delta i.e. any changed fields on a message during - # # streaming. + # # streaming. # # # # @param id [String] # # @param delta [OpenAI::Models::Beta::Threads::MessageDelta] diff --git a/lib/openai/models/beta/threads/message_list_params.rb b/lib/openai/models/beta/threads/message_list_params.rb index 9a7d62f5..b30bcf8a 100644 --- a/lib/openai/models/beta/threads/message_list_params.rb +++ b/lib/openai/models/beta/threads/message_list_params.rb @@ -12,9 +12,9 @@ class MessageListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] after # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. # # @return [String, nil] optional :after, String @@ -25,9 +25,9 @@ class MessageListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] before # A cursor for use in pagination. `before` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # starting with obj_foo, your subsequent call can include before=obj_foo in order - # to fetch the previous page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. # # @return [String, nil] optional :before, String @@ -38,7 +38,7 @@ class MessageListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] limit # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. # # @return [Integer, nil] optional :limit, Integer @@ -49,7 +49,7 @@ class MessageListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] order # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. # # @return [Symbol, OpenAI::Models::Beta::Threads::MessageListParams::Order, nil] optional :order, enum: -> { OpenAI::Models::Beta::Threads::MessageListParams::Order } @@ -81,7 +81,7 @@ class MessageListParams < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. module Order extend OpenAI::Internal::Type::Enum diff --git a/lib/openai/models/beta/threads/message_update_params.rb b/lib/openai/models/beta/threads/message_update_params.rb index c83b973f..b02d01a6 100644 --- a/lib/openai/models/beta/threads/message_update_params.rb +++ b/lib/openai/models/beta/threads/message_update_params.rb @@ -17,11 +17,11 @@ class MessageUpdateParams < OpenAI::Internal::Type::BaseModel # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true diff --git a/lib/openai/models/beta/threads/required_action_function_tool_call.rb b/lib/openai/models/beta/threads/required_action_function_tool_call.rb index d79d1249..bd40aadc 100644 --- a/lib/openai/models/beta/threads/required_action_function_tool_call.rb +++ b/lib/openai/models/beta/threads/required_action_function_tool_call.rb @@ -7,9 +7,9 @@ module Threads class RequiredActionFunctionToolCall < OpenAI::Internal::Type::BaseModel # @!attribute id # The ID of the tool call. This ID must be referenced when you submit the tool - # outputs in using the - # [Submit tool outputs to run](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) - # endpoint. + # outputs in using the + # [Submit tool outputs to run](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) + # endpoint. # # @return [String] required :id, String @@ -22,7 +22,7 @@ class RequiredActionFunctionToolCall < OpenAI::Internal::Type::BaseModel # @!attribute type # The type of tool call the output is required for. For now, this is always - # `function`. + # `function`. # # @return [Symbol, :function] required :type, const: :function diff --git a/lib/openai/models/beta/threads/run.rb b/lib/openai/models/beta/threads/run.rb index 3c22831f..c6436c0d 100644 --- a/lib/openai/models/beta/threads/run.rb +++ b/lib/openai/models/beta/threads/run.rb @@ -16,8 +16,8 @@ class Run < OpenAI::Internal::Type::BaseModel # @!attribute assistant_id # The ID of the - # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for - # execution of this run. + # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for + # execution of this run. # # @return [String] required :assistant_id, String @@ -54,15 +54,15 @@ class Run < OpenAI::Internal::Type::BaseModel # @!attribute incomplete_details # Details on why the run is incomplete. Will be `null` if the run is not - # incomplete. + # incomplete. # # @return [OpenAI::Models::Beta::Threads::Run::IncompleteDetails, nil] required :incomplete_details, -> { OpenAI::Models::Beta::Threads::Run::IncompleteDetails }, nil?: true # @!attribute instructions # The instructions that the - # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for - # this run. + # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for + # this run. # # @return [String] required :instructions, String @@ -75,33 +75,33 @@ class Run < OpenAI::Internal::Type::BaseModel # @!attribute max_completion_tokens # The maximum number of completion tokens specified to have been used over the - # course of the run. + # course of the run. # # @return [Integer, nil] required :max_completion_tokens, Integer, nil?: true # @!attribute max_prompt_tokens # The maximum number of prompt tokens specified to have been used over the course - # of the run. + # of the run. # # @return [Integer, nil] required :max_prompt_tokens, Integer, nil?: true # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true # @!attribute model # The model that the - # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for - # this run. + # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for + # this run. # # @return [String] required :model, String @@ -114,40 +114,40 @@ class Run < OpenAI::Internal::Type::BaseModel # @!attribute parallel_tool_calls # Whether to enable - # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) - # during tool use. + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. # # @return [Boolean] required :parallel_tool_calls, OpenAI::Internal::Type::Boolean # @!attribute required_action # Details on the action required to continue the run. Will be `null` if no action - # is required. + # is required. # # @return [OpenAI::Models::Beta::Threads::Run::RequiredAction, nil] required :required_action, -> { OpenAI::Models::Beta::Threads::Run::RequiredAction }, nil?: true # @!attribute response_format # Specifies the format that the model must output. Compatible with - # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), - # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - # message the model generates is valid JSON. + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. # - # **Important:** when using JSON mode, you **must** also instruct the model to - # produce JSON yourself via a system or user message. Without this, the model may - # generate an unending stream of whitespace until the generation reaches the token - # limit, resulting in a long-running and seemingly "stuck" request. Also note that - # the message content may be partially cut off if `finish_reason="length"`, which - # indicates the generation exceeded `max_tokens` or the conversation exceeded the - # max context length. + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. # # @return [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] required :response_format, union: -> { OpenAI::Models::Beta::AssistantResponseFormatOption }, nil?: true @@ -160,49 +160,49 @@ class Run < OpenAI::Internal::Type::BaseModel # @!attribute status # The status of the run, which can be either `queued`, `in_progress`, - # `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, - # `incomplete`, or `expired`. + # `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, + # `incomplete`, or `expired`. # # @return [Symbol, OpenAI::Models::Beta::Threads::RunStatus] required :status, enum: -> { OpenAI::Models::Beta::Threads::RunStatus } # @!attribute thread_id # The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) - # that was executed on as a part of this run. + # that was executed on as a part of this run. # # @return [String] required :thread_id, String # @!attribute tool_choice # Controls which (if any) tool is called by the model. `none` means the model will - # not call any tools and instead generates a message. `auto` is the default value - # and means the model can pick between generating a message or calling one or more - # tools. `required` means the model must call one or more tools before responding - # to the user. Specifying a particular tool like `{"type": "file_search"}` or - # `{"type": "function", "function": {"name": "my_function"}}` forces the model to - # call that tool. + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. # # @return [Symbol, OpenAI::Models::Beta::AssistantToolChoiceOption::Auto, OpenAI::Models::Beta::AssistantToolChoice, nil] required :tool_choice, union: -> { OpenAI::Models::Beta::AssistantToolChoiceOption }, nil?: true # @!attribute tools # The list of tools that the - # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for - # this run. + # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for + # this run. # # @return [Array] required :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Models::Beta::AssistantTool] } # @!attribute truncation_strategy # Controls for how a thread will be truncated prior to the run. Use this to - # control the intial context window of the run. + # control the intial context window of the run. # # @return [OpenAI::Models::Beta::Threads::Run::TruncationStrategy, nil] required :truncation_strategy, -> { OpenAI::Models::Beta::Threads::Run::TruncationStrategy }, nil?: true # @!attribute usage # Usage statistics related to the run. This value will be `null` if the run is not - # in a terminal state (i.e. `in_progress`, `queued`, etc.). + # in a terminal state (i.e. `in_progress`, `queued`, etc.). # # @return [OpenAI::Models::Beta::Threads::Run::Usage, nil] required :usage, -> { OpenAI::Models::Beta::Threads::Run::Usage }, nil?: true @@ -221,7 +221,7 @@ class Run < OpenAI::Internal::Type::BaseModel # @!parse # # Represents an execution run on a - # # [thread](https://platform.openai.com/docs/api-reference/threads). + # # [thread](https://platform.openai.com/docs/api-reference/threads). # # # # @param id [String] # # @param assistant_id [String] @@ -290,7 +290,7 @@ class Run < OpenAI::Internal::Type::BaseModel class IncompleteDetails < OpenAI::Internal::Type::BaseModel # @!attribute [r] reason # The reason why the run is incomplete. This will point to which specific token - # limit was reached over the course of the run. + # limit was reached over the course of the run. # # @return [Symbol, OpenAI::Models::Beta::Threads::Run::IncompleteDetails::Reason, nil] optional :reason, enum: -> { OpenAI::Models::Beta::Threads::Run::IncompleteDetails::Reason } @@ -301,7 +301,7 @@ class IncompleteDetails < OpenAI::Internal::Type::BaseModel # @!parse # # Details on why the run is incomplete. Will be `null` if the run is not - # # incomplete. + # # incomplete. # # # # @param reason [Symbol, OpenAI::Models::Beta::Threads::Run::IncompleteDetails::Reason] # # @@ -310,7 +310,7 @@ class IncompleteDetails < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # The reason why the run is incomplete. This will point to which specific token - # limit was reached over the course of the run. + # limit was reached over the course of the run. # # @see OpenAI::Models::Beta::Threads::Run::IncompleteDetails#reason module Reason @@ -386,7 +386,7 @@ class RequiredAction < OpenAI::Internal::Type::BaseModel # @!parse # # Details on the action required to continue the run. Will be `null` if no action - # # is required. + # # is required. # # # # @param submit_tool_outputs [OpenAI::Models::Beta::Threads::Run::RequiredAction::SubmitToolOutputs] # # @param type [Symbol, :submit_tool_outputs] @@ -419,23 +419,23 @@ class SubmitToolOutputs < OpenAI::Internal::Type::BaseModel class TruncationStrategy < OpenAI::Internal::Type::BaseModel # @!attribute type # The truncation strategy to use for the thread. The default is `auto`. If set to - # `last_messages`, the thread will be truncated to the n most recent messages in - # the thread. When set to `auto`, messages in the middle of the thread will be - # dropped to fit the context length of the model, `max_prompt_tokens`. + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. # # @return [Symbol, OpenAI::Models::Beta::Threads::Run::TruncationStrategy::Type] required :type, enum: -> { OpenAI::Models::Beta::Threads::Run::TruncationStrategy::Type } # @!attribute last_messages # The number of most recent messages from the thread when constructing the context - # for the run. + # for the run. # # @return [Integer, nil] optional :last_messages, Integer, nil?: true # @!parse # # Controls for how a thread will be truncated prior to the run. Use this to - # # control the intial context window of the run. + # # control the intial context window of the run. # # # # @param type [Symbol, OpenAI::Models::Beta::Threads::Run::TruncationStrategy::Type] # # @param last_messages [Integer, nil] @@ -445,9 +445,9 @@ class TruncationStrategy < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # The truncation strategy to use for the thread. The default is `auto`. If set to - # `last_messages`, the thread will be truncated to the n most recent messages in - # the thread. When set to `auto`, messages in the middle of the thread will be - # dropped to fit the context length of the model, `max_prompt_tokens`. + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. # # @see OpenAI::Models::Beta::Threads::Run::TruncationStrategy#type module Type @@ -486,7 +486,7 @@ class Usage < OpenAI::Internal::Type::BaseModel # @!parse # # Usage statistics related to the run. This value will be `null` if the run is not - # # in a terminal state (i.e. `in_progress`, `queued`, etc.). + # # in a terminal state (i.e. `in_progress`, `queued`, etc.). # # # # @param completion_tokens [Integer] # # @param prompt_tokens [Integer] diff --git a/lib/openai/models/beta/threads/run_create_params.rb b/lib/openai/models/beta/threads/run_create_params.rb index bb191666..a058e78e 100644 --- a/lib/openai/models/beta/threads/run_create_params.rb +++ b/lib/openai/models/beta/threads/run_create_params.rb @@ -14,20 +14,20 @@ class RunCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute assistant_id # The ID of the - # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to - # execute this run. + # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + # execute this run. # # @return [String] required :assistant_id, String # @!attribute [r] include # A list of additional fields to include in the response. Currently the only - # supported value is `step_details.tool_calls[*].file_search.results[*].content` - # to fetch the file search result content. + # supported value is `step_details.tool_calls[*].file_search.results[*].content` + # to fetch the file search result content. # - # See the - # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) - # for more information. + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. # # @return [Array, nil] optional :include, @@ -39,8 +39,8 @@ class RunCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute additional_instructions # Appends additional instructions at the end of the instructions for the run. This - # is useful for modifying the behavior on a per-run basis without overriding other - # instructions. + # is useful for modifying the behavior on a per-run basis without overriding other + # instructions. # # @return [String, nil] optional :additional_instructions, String, nil?: true @@ -55,56 +55,56 @@ class RunCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute instructions # Overrides the - # [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) - # of the assistant. This is useful for modifying the behavior on a per-run basis. + # [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) + # of the assistant. This is useful for modifying the behavior on a per-run basis. # # @return [String, nil] optional :instructions, String, nil?: true # @!attribute max_completion_tokens # The maximum number of completion tokens that may be used over the course of the - # run. The run will make a best effort to use only the number of completion tokens - # specified, across multiple turns of the run. If the run exceeds the number of - # completion tokens specified, the run will end with status `incomplete`. See - # `incomplete_details` for more info. + # run. The run will make a best effort to use only the number of completion tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # completion tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. # # @return [Integer, nil] optional :max_completion_tokens, Integer, nil?: true # @!attribute max_prompt_tokens # The maximum number of prompt tokens that may be used over the course of the run. - # The run will make a best effort to use only the number of prompt tokens - # specified, across multiple turns of the run. If the run exceeds the number of - # prompt tokens specified, the run will end with status `incomplete`. See - # `incomplete_details` for more info. + # The run will make a best effort to use only the number of prompt tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # prompt tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. # # @return [Integer, nil] optional :max_prompt_tokens, Integer, nil?: true # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true # @!attribute model # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to - # be used to execute this run. If a value is provided here, it will override the - # model associated with the assistant. If not, the model associated with the - # assistant will be used. + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. # # @return [String, Symbol, OpenAI::Models::ChatModel, nil] optional :model, union: -> { OpenAI::Models::Beta::Threads::RunCreateParams::Model }, nil?: true # @!attribute [r] parallel_tool_calls # Whether to enable - # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) - # during tool use. + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. # # @return [Boolean, nil] optional :parallel_tool_calls, OpenAI::Internal::Type::Boolean @@ -116,62 +116,62 @@ class RunCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute reasoning_effort # **o-series models only** # - # Constrains effort on reasoning for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] optional :reasoning_effort, enum: -> { OpenAI::Models::ReasoningEffort }, nil?: true # @!attribute response_format # Specifies the format that the model must output. Compatible with - # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), - # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - # message the model generates is valid JSON. + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. # - # **Important:** when using JSON mode, you **must** also instruct the model to - # produce JSON yourself via a system or user message. Without this, the model may - # generate an unending stream of whitespace until the generation reaches the token - # limit, resulting in a long-running and seemingly "stuck" request. Also note that - # the message content may be partially cut off if `finish_reason="length"`, which - # indicates the generation exceeded `max_tokens` or the conversation exceeded the - # max context length. + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. # # @return [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] optional :response_format, union: -> { OpenAI::Models::Beta::AssistantResponseFormatOption }, nil?: true # @!attribute temperature # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. # # @return [Float, nil] optional :temperature, Float, nil?: true # @!attribute tool_choice # Controls which (if any) tool is called by the model. `none` means the model will - # not call any tools and instead generates a message. `auto` is the default value - # and means the model can pick between generating a message or calling one or more - # tools. `required` means the model must call one or more tools before responding - # to the user. Specifying a particular tool like `{"type": "file_search"}` or - # `{"type": "function", "function": {"name": "my_function"}}` forces the model to - # call that tool. + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. # # @return [Symbol, OpenAI::Models::Beta::AssistantToolChoiceOption::Auto, OpenAI::Models::Beta::AssistantToolChoice, nil] optional :tool_choice, union: -> { OpenAI::Models::Beta::AssistantToolChoiceOption }, nil?: true # @!attribute tools # Override the tools the assistant can use for this run. This is useful for - # modifying the behavior on a per-run basis. + # modifying the behavior on a per-run basis. # # @return [Array, nil] optional :tools, @@ -180,17 +180,17 @@ class RunCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute top_p # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or temperature but not both. + # We generally recommend altering this or temperature but not both. # # @return [Float, nil] optional :top_p, Float, nil?: true # @!attribute truncation_strategy # Controls for how a thread will be truncated prior to the run. Use this to - # control the intial context window of the run. + # control the intial context window of the run. # # @return [OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy, nil] optional :truncation_strategy, @@ -254,10 +254,10 @@ class AdditionalMessage < OpenAI::Internal::Type::BaseModel # @!attribute role # The role of the entity that is creating the message. Allowed values include: # - # - `user`: Indicates the message is sent by an actual user and should be used in - # most cases to represent user-generated messages. - # - `assistant`: Indicates the message is generated by the assistant. Use this - # value to insert messages from the assistant into the conversation. + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. # # @return [Symbol, OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Role] required :role, enum: -> { OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Role } @@ -272,11 +272,11 @@ class AdditionalMessage < OpenAI::Internal::Type::BaseModel # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true @@ -313,10 +313,10 @@ module Content # The role of the entity that is creating the message. Allowed values include: # - # - `user`: Indicates the message is sent by an actual user and should be used in - # most cases to represent user-generated messages. - # - `assistant`: Indicates the message is generated by the assistant. Use this - # value to insert messages from the assistant into the conversation. + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. # # @see OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage#role module Role @@ -395,9 +395,9 @@ class FileSearch < OpenAI::Internal::Type::BaseModel end # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to - # be used to execute this run. If a value is provided here, it will override the - # model associated with the assistant. If not, the model associated with the - # assistant will be used. + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. module Model extend OpenAI::Internal::Type::Union @@ -414,23 +414,23 @@ module Model class TruncationStrategy < OpenAI::Internal::Type::BaseModel # @!attribute type # The truncation strategy to use for the thread. The default is `auto`. If set to - # `last_messages`, the thread will be truncated to the n most recent messages in - # the thread. When set to `auto`, messages in the middle of the thread will be - # dropped to fit the context length of the model, `max_prompt_tokens`. + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. # # @return [Symbol, OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy::Type] required :type, enum: -> { OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy::Type } # @!attribute last_messages # The number of most recent messages from the thread when constructing the context - # for the run. + # for the run. # # @return [Integer, nil] optional :last_messages, Integer, nil?: true # @!parse # # Controls for how a thread will be truncated prior to the run. Use this to - # # control the intial context window of the run. + # # control the intial context window of the run. # # # # @param type [Symbol, OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy::Type] # # @param last_messages [Integer, nil] @@ -440,9 +440,9 @@ class TruncationStrategy < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # The truncation strategy to use for the thread. The default is `auto`. If set to - # `last_messages`, the thread will be truncated to the n most recent messages in - # the thread. When set to `auto`, messages in the middle of the thread will be - # dropped to fit the context length of the model, `max_prompt_tokens`. + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. # # @see OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy#type module Type diff --git a/lib/openai/models/beta/threads/run_list_params.rb b/lib/openai/models/beta/threads/run_list_params.rb index f2b9f759..3d4e377b 100644 --- a/lib/openai/models/beta/threads/run_list_params.rb +++ b/lib/openai/models/beta/threads/run_list_params.rb @@ -12,9 +12,9 @@ class RunListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] after # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. # # @return [String, nil] optional :after, String @@ -25,9 +25,9 @@ class RunListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] before # A cursor for use in pagination. `before` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # starting with obj_foo, your subsequent call can include before=obj_foo in order - # to fetch the previous page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. # # @return [String, nil] optional :before, String @@ -38,7 +38,7 @@ class RunListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] limit # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. # # @return [Integer, nil] optional :limit, Integer @@ -49,7 +49,7 @@ class RunListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] order # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. # # @return [Symbol, OpenAI::Models::Beta::Threads::RunListParams::Order, nil] optional :order, enum: -> { OpenAI::Models::Beta::Threads::RunListParams::Order } @@ -70,7 +70,7 @@ class RunListParams < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. module Order extend OpenAI::Internal::Type::Enum diff --git a/lib/openai/models/beta/threads/run_status.rb b/lib/openai/models/beta/threads/run_status.rb index efb9f2b6..d17f7dc6 100644 --- a/lib/openai/models/beta/threads/run_status.rb +++ b/lib/openai/models/beta/threads/run_status.rb @@ -5,8 +5,8 @@ module Models module Beta module Threads # The status of the run, which can be either `queued`, `in_progress`, - # `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, - # `incomplete`, or `expired`. + # `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, + # `incomplete`, or `expired`. module RunStatus extend OpenAI::Internal::Type::Enum diff --git a/lib/openai/models/beta/threads/run_submit_tool_outputs_params.rb b/lib/openai/models/beta/threads/run_submit_tool_outputs_params.rb index 4e4733e6..2243cb1d 100644 --- a/lib/openai/models/beta/threads/run_submit_tool_outputs_params.rb +++ b/lib/openai/models/beta/threads/run_submit_tool_outputs_params.rb @@ -46,7 +46,7 @@ class ToolOutput < OpenAI::Internal::Type::BaseModel # @!attribute [r] tool_call_id # The ID of the tool call in the `required_action` object within the run object - # the output is being submitted for. + # the output is being submitted for. # # @return [String, nil] optional :tool_call_id, String diff --git a/lib/openai/models/beta/threads/run_update_params.rb b/lib/openai/models/beta/threads/run_update_params.rb index 97c387fc..68b6536b 100644 --- a/lib/openai/models/beta/threads/run_update_params.rb +++ b/lib/openai/models/beta/threads/run_update_params.rb @@ -17,11 +17,11 @@ class RunUpdateParams < OpenAI::Internal::Type::BaseModel # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true diff --git a/lib/openai/models/beta/threads/runs/code_interpreter_output_image.rb b/lib/openai/models/beta/threads/runs/code_interpreter_output_image.rb index 1fbdc55b..ab77db4e 100644 --- a/lib/openai/models/beta/threads/runs/code_interpreter_output_image.rb +++ b/lib/openai/models/beta/threads/runs/code_interpreter_output_image.rb @@ -40,7 +40,7 @@ class CodeInterpreterOutputImage < OpenAI::Internal::Type::BaseModel class Image < OpenAI::Internal::Type::BaseModel # @!attribute [r] file_id # The [file](https://platform.openai.com/docs/api-reference/files) ID of the - # image. + # image. # # @return [String, nil] optional :file_id, String diff --git a/lib/openai/models/beta/threads/runs/code_interpreter_tool_call.rb b/lib/openai/models/beta/threads/runs/code_interpreter_tool_call.rb index 4daf4c57..60262e17 100644 --- a/lib/openai/models/beta/threads/runs/code_interpreter_tool_call.rb +++ b/lib/openai/models/beta/threads/runs/code_interpreter_tool_call.rb @@ -21,7 +21,7 @@ class CodeInterpreterToolCall < OpenAI::Internal::Type::BaseModel # @!attribute type # The type of tool call. This is always going to be `code_interpreter` for this - # type of tool call. + # type of tool call. # # @return [Symbol, :code_interpreter] required :type, const: :code_interpreter @@ -47,8 +47,8 @@ class CodeInterpreter < OpenAI::Internal::Type::BaseModel # @!attribute outputs # The outputs from the Code Interpreter tool call. Code Interpreter can output one - # or more items, including text (`logs`) or images (`image`). Each of these are - # represented by a different object type. + # or more items, including text (`logs`) or images (`image`). Each of these are + # represented by a different object type. # # @return [Array] required :outputs, @@ -126,7 +126,7 @@ class Image < OpenAI::Internal::Type::BaseModel class Image < OpenAI::Internal::Type::BaseModel # @!attribute file_id # The [file](https://platform.openai.com/docs/api-reference/files) ID of the - # image. + # image. # # @return [String] required :file_id, String diff --git a/lib/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rb b/lib/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rb index 6573f533..a9cbef61 100644 --- a/lib/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rb +++ b/lib/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rb @@ -14,7 +14,7 @@ class CodeInterpreterToolCallDelta < OpenAI::Internal::Type::BaseModel # @!attribute type # The type of tool call. This is always going to be `code_interpreter` for this - # type of tool call. + # type of tool call. # # @return [Symbol, :code_interpreter] required :type, const: :code_interpreter @@ -66,8 +66,8 @@ class CodeInterpreter < OpenAI::Internal::Type::BaseModel # @!attribute [r] outputs # The outputs from the Code Interpreter tool call. Code Interpreter can output one - # or more items, including text (`logs`) or images (`image`). Each of these are - # represented by a different object type. + # or more items, including text (`logs`) or images (`image`). Each of these are + # represented by a different object type. # # @return [Array, nil] optional :outputs, diff --git a/lib/openai/models/beta/threads/runs/file_search_tool_call.rb b/lib/openai/models/beta/threads/runs/file_search_tool_call.rb index c8331a7b..3c6c423a 100644 --- a/lib/openai/models/beta/threads/runs/file_search_tool_call.rb +++ b/lib/openai/models/beta/threads/runs/file_search_tool_call.rb @@ -20,7 +20,7 @@ class FileSearchToolCall < OpenAI::Internal::Type::BaseModel # @!attribute type # The type of tool call. This is always going to be `file_search` for this type of - # tool call. + # tool call. # # @return [Symbol, :file_search] required :type, const: :file_search @@ -72,7 +72,7 @@ class FileSearch < OpenAI::Internal::Type::BaseModel class RankingOptions < OpenAI::Internal::Type::BaseModel # @!attribute ranker # The ranker to use for the file search. If not specified will use the `auto` - # ranker. + # ranker. # # @return [Symbol, OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions::Ranker] required :ranker, @@ -80,7 +80,7 @@ class RankingOptions < OpenAI::Internal::Type::BaseModel # @!attribute score_threshold # The score threshold for the file search. All values must be a floating point - # number between 0 and 1. + # number between 0 and 1. # # @return [Float] required :score_threshold, Float @@ -96,7 +96,7 @@ class RankingOptions < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # The ranker to use for the file search. If not specified will use the `auto` - # ranker. + # ranker. # # @see OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions#ranker module Ranker @@ -128,14 +128,14 @@ class Result < OpenAI::Internal::Type::BaseModel # @!attribute score # The score of the result. All values must be a floating point number between 0 - # and 1. + # and 1. # # @return [Float] required :score, Float # @!attribute [r] content # The content of the result that was found. The content is only included if - # requested via the include query parameter. + # requested via the include query parameter. # # @return [Array, nil] optional :content, diff --git a/lib/openai/models/beta/threads/runs/file_search_tool_call_delta.rb b/lib/openai/models/beta/threads/runs/file_search_tool_call_delta.rb index eb5776d0..4fa2dc05 100644 --- a/lib/openai/models/beta/threads/runs/file_search_tool_call_delta.rb +++ b/lib/openai/models/beta/threads/runs/file_search_tool_call_delta.rb @@ -20,7 +20,7 @@ class FileSearchToolCallDelta < OpenAI::Internal::Type::BaseModel # @!attribute type # The type of tool call. This is always going to be `file_search` for this type of - # tool call. + # tool call. # # @return [Symbol, :file_search] required :type, const: :file_search diff --git a/lib/openai/models/beta/threads/runs/function_tool_call.rb b/lib/openai/models/beta/threads/runs/function_tool_call.rb index f0677ac7..cc1eb09a 100644 --- a/lib/openai/models/beta/threads/runs/function_tool_call.rb +++ b/lib/openai/models/beta/threads/runs/function_tool_call.rb @@ -20,7 +20,7 @@ class FunctionToolCall < OpenAI::Internal::Type::BaseModel # @!attribute type # The type of tool call. This is always going to be `function` for this type of - # tool call. + # tool call. # # @return [Symbol, :function] required :type, const: :function @@ -50,8 +50,8 @@ class Function < OpenAI::Internal::Type::BaseModel # @!attribute output # The output of the function. This will be `null` if the outputs have not been - # [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) - # yet. + # [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) + # yet. # # @return [String, nil] required :output, String, nil?: true diff --git a/lib/openai/models/beta/threads/runs/function_tool_call_delta.rb b/lib/openai/models/beta/threads/runs/function_tool_call_delta.rb index c4bc767e..6a164004 100644 --- a/lib/openai/models/beta/threads/runs/function_tool_call_delta.rb +++ b/lib/openai/models/beta/threads/runs/function_tool_call_delta.rb @@ -14,7 +14,7 @@ class FunctionToolCallDelta < OpenAI::Internal::Type::BaseModel # @!attribute type # The type of tool call. This is always going to be `function` for this type of - # tool call. + # tool call. # # @return [Symbol, :function] required :type, const: :function @@ -73,8 +73,8 @@ class Function < OpenAI::Internal::Type::BaseModel # @!attribute output # The output of the function. This will be `null` if the outputs have not been - # [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) - # yet. + # [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) + # yet. # # @return [String, nil] optional :output, String, nil?: true diff --git a/lib/openai/models/beta/threads/runs/run_step.rb b/lib/openai/models/beta/threads/runs/run_step.rb index 9261d7c5..a4111bd4 100644 --- a/lib/openai/models/beta/threads/runs/run_step.rb +++ b/lib/openai/models/beta/threads/runs/run_step.rb @@ -15,8 +15,8 @@ class RunStep < OpenAI::Internal::Type::BaseModel # @!attribute assistant_id # The ID of the - # [assistant](https://platform.openai.com/docs/api-reference/assistants) - # associated with the run step. + # [assistant](https://platform.openai.com/docs/api-reference/assistants) + # associated with the run step. # # @return [String] required :assistant_id, String @@ -41,7 +41,7 @@ class RunStep < OpenAI::Internal::Type::BaseModel # @!attribute expired_at # The Unix timestamp (in seconds) for when the run step expired. A step is - # considered expired if the parent run is expired. + # considered expired if the parent run is expired. # # @return [Integer, nil] required :expired_at, Integer, nil?: true @@ -54,18 +54,18 @@ class RunStep < OpenAI::Internal::Type::BaseModel # @!attribute last_error # The last error associated with this run step. Will be `null` if there are no - # errors. + # errors. # # @return [OpenAI::Models::Beta::Threads::Runs::RunStep::LastError, nil] required :last_error, -> { OpenAI::Models::Beta::Threads::Runs::RunStep::LastError }, nil?: true # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true @@ -78,14 +78,14 @@ class RunStep < OpenAI::Internal::Type::BaseModel # @!attribute run_id # The ID of the [run](https://platform.openai.com/docs/api-reference/runs) that - # this run step is a part of. + # this run step is a part of. # # @return [String] required :run_id, String # @!attribute status # The status of the run step, which can be either `in_progress`, `cancelled`, - # `failed`, `completed`, or `expired`. + # `failed`, `completed`, or `expired`. # # @return [Symbol, OpenAI::Models::Beta::Threads::Runs::RunStep::Status] required :status, enum: -> { OpenAI::Models::Beta::Threads::Runs::RunStep::Status } @@ -98,7 +98,7 @@ class RunStep < OpenAI::Internal::Type::BaseModel # @!attribute thread_id # The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) - # that was run. + # that was run. # # @return [String] required :thread_id, String @@ -111,7 +111,7 @@ class RunStep < OpenAI::Internal::Type::BaseModel # @!attribute usage # Usage statistics related to the run step. This value will be `null` while the - # run step's status is `in_progress`. + # run step's status is `in_progress`. # # @return [OpenAI::Models::Beta::Threads::Runs::RunStep::Usage, nil] required :usage, -> { OpenAI::Models::Beta::Threads::Runs::RunStep::Usage }, nil?: true @@ -176,7 +176,7 @@ class LastError < OpenAI::Internal::Type::BaseModel # @!parse # # The last error associated with this run step. Will be `null` if there are no - # # errors. + # # errors. # # # # @param code [Symbol, OpenAI::Models::Beta::Threads::Runs::RunStep::LastError::Code] # # @param message [String] @@ -203,7 +203,7 @@ module Code end # The status of the run step, which can be either `in_progress`, `cancelled`, - # `failed`, `completed`, or `expired`. + # `failed`, `completed`, or `expired`. # # @see OpenAI::Models::Beta::Threads::Runs::RunStep#status module Status @@ -279,7 +279,7 @@ class Usage < OpenAI::Internal::Type::BaseModel # @!parse # # Usage statistics related to the run step. This value will be `null` while the - # # run step's status is `in_progress`. + # # run step's status is `in_progress`. # # # # @param completion_tokens [Integer] # # @param prompt_tokens [Integer] diff --git a/lib/openai/models/beta/threads/runs/run_step_delta_event.rb b/lib/openai/models/beta/threads/runs/run_step_delta_event.rb index 027d0490..a71b9858 100644 --- a/lib/openai/models/beta/threads/runs/run_step_delta_event.rb +++ b/lib/openai/models/beta/threads/runs/run_step_delta_event.rb @@ -26,7 +26,7 @@ class RunStepDeltaEvent < OpenAI::Internal::Type::BaseModel # @!parse # # Represents a run step delta i.e. any changed fields on a run step during - # # streaming. + # # streaming. # # # # @param id [String] # # @param delta [OpenAI::Models::Beta::Threads::Runs::RunStepDelta] diff --git a/lib/openai/models/beta/threads/runs/step_list_params.rb b/lib/openai/models/beta/threads/runs/step_list_params.rb index a7c48d03..1b281161 100644 --- a/lib/openai/models/beta/threads/runs/step_list_params.rb +++ b/lib/openai/models/beta/threads/runs/step_list_params.rb @@ -18,9 +18,9 @@ class StepListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] after # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. # # @return [String, nil] optional :after, String @@ -31,9 +31,9 @@ class StepListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] before # A cursor for use in pagination. `before` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # starting with obj_foo, your subsequent call can include before=obj_foo in order - # to fetch the previous page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. # # @return [String, nil] optional :before, String @@ -44,12 +44,12 @@ class StepListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] include # A list of additional fields to include in the response. Currently the only - # supported value is `step_details.tool_calls[*].file_search.results[*].content` - # to fetch the file search result content. + # supported value is `step_details.tool_calls[*].file_search.results[*].content` + # to fetch the file search result content. # - # See the - # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) - # for more information. + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. # # @return [Array, nil] optional :include, @@ -61,7 +61,7 @@ class StepListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] limit # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. # # @return [Integer, nil] optional :limit, Integer @@ -72,7 +72,7 @@ class StepListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] order # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. # # @return [Symbol, OpenAI::Models::Beta::Threads::Runs::StepListParams::Order, nil] optional :order, enum: -> { OpenAI::Models::Beta::Threads::Runs::StepListParams::Order } @@ -95,7 +95,7 @@ class StepListParams < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. module Order extend OpenAI::Internal::Type::Enum diff --git a/lib/openai/models/beta/threads/runs/step_retrieve_params.rb b/lib/openai/models/beta/threads/runs/step_retrieve_params.rb index efbf4aaf..a02c8ce0 100644 --- a/lib/openai/models/beta/threads/runs/step_retrieve_params.rb +++ b/lib/openai/models/beta/threads/runs/step_retrieve_params.rb @@ -23,12 +23,12 @@ class StepRetrieveParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] include # A list of additional fields to include in the response. Currently the only - # supported value is `step_details.tool_calls[*].file_search.results[*].content` - # to fetch the file search result content. + # supported value is `step_details.tool_calls[*].file_search.results[*].content` + # to fetch the file search result content. # - # See the - # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) - # for more information. + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. # # @return [Array, nil] optional :include, diff --git a/lib/openai/models/beta/threads/runs/tool_call_delta_object.rb b/lib/openai/models/beta/threads/runs/tool_call_delta_object.rb index 7f61ee05..8c1394b6 100644 --- a/lib/openai/models/beta/threads/runs/tool_call_delta_object.rb +++ b/lib/openai/models/beta/threads/runs/tool_call_delta_object.rb @@ -14,8 +14,8 @@ class ToolCallDeltaObject < OpenAI::Internal::Type::BaseModel # @!attribute [r] tool_calls # An array of tool calls the run step was involved in. These can be associated - # with one of three types of tools: `code_interpreter`, `file_search`, or - # `function`. + # with one of three types of tools: `code_interpreter`, `file_search`, or + # `function`. # # @return [Array, nil] optional :tool_calls, diff --git a/lib/openai/models/beta/threads/runs/tool_calls_step_details.rb b/lib/openai/models/beta/threads/runs/tool_calls_step_details.rb index bd9aa901..1ac644f3 100644 --- a/lib/openai/models/beta/threads/runs/tool_calls_step_details.rb +++ b/lib/openai/models/beta/threads/runs/tool_calls_step_details.rb @@ -8,8 +8,8 @@ module Runs class ToolCallsStepDetails < OpenAI::Internal::Type::BaseModel # @!attribute tool_calls # An array of tool calls the run step was involved in. These can be associated - # with one of three types of tools: `code_interpreter`, `file_search`, or - # `function`. + # with one of three types of tools: `code_interpreter`, `file_search`, or + # `function`. # # @return [Array] required :tool_calls, diff --git a/lib/openai/models/chat/chat_completion.rb b/lib/openai/models/chat/chat_completion.rb index 0f9f79a6..97ee34db 100644 --- a/lib/openai/models/chat/chat_completion.rb +++ b/lib/openai/models/chat/chat_completion.rb @@ -15,7 +15,7 @@ class ChatCompletion < OpenAI::Internal::Type::BaseModel # @!attribute choices # A list of chat completion choices. Can be more than one if `n` is greater - # than 1. + # than 1. # # @return [Array] required :choices, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Chat::ChatCompletion::Choice] } @@ -47,8 +47,8 @@ class ChatCompletion < OpenAI::Internal::Type::BaseModel # @!attribute [r] system_fingerprint # This fingerprint represents the backend configuration that the model runs with. # - # Can be used in conjunction with the `seed` request parameter to understand when - # backend changes have been made that might impact determinism. + # Can be used in conjunction with the `seed` request parameter to understand when + # backend changes have been made that might impact determinism. # # @return [String, nil] optional :system_fingerprint, String @@ -69,7 +69,7 @@ class ChatCompletion < OpenAI::Internal::Type::BaseModel # @!parse # # Represents a chat completion response returned by model, based on the provided - # # input. + # # input. # # # # @param id [String] # # @param choices [Array] @@ -99,11 +99,11 @@ class ChatCompletion < OpenAI::Internal::Type::BaseModel class Choice < OpenAI::Internal::Type::BaseModel # @!attribute finish_reason # The reason the model stopped generating tokens. This will be `stop` if the model - # hit a natural stop point or a provided stop sequence, `length` if the maximum - # number of tokens specified in the request was reached, `content_filter` if - # content was omitted due to a flag from our content filters, `tool_calls` if the - # model called a tool, or `function_call` (deprecated) if the model called a - # function. + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, `content_filter` if + # content was omitted due to a flag from our content filters, `tool_calls` if the + # model called a tool, or `function_call` (deprecated) if the model called a + # function. # # @return [Symbol, OpenAI::Models::Chat::ChatCompletion::Choice::FinishReason] required :finish_reason, enum: -> { OpenAI::Models::Chat::ChatCompletion::Choice::FinishReason } @@ -137,11 +137,11 @@ class Choice < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # The reason the model stopped generating tokens. This will be `stop` if the model - # hit a natural stop point or a provided stop sequence, `length` if the maximum - # number of tokens specified in the request was reached, `content_filter` if - # content was omitted due to a flag from our content filters, `tool_calls` if the - # model called a tool, or `function_call` (deprecated) if the model called a - # function. + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, `content_filter` if + # content was omitted due to a flag from our content filters, `tool_calls` if the + # model called a tool, or `function_call` (deprecated) if the model called a + # function. # # @see OpenAI::Models::Chat::ChatCompletion::Choice#finish_reason module FinishReason diff --git a/lib/openai/models/chat/chat_completion_assistant_message_param.rb b/lib/openai/models/chat/chat_completion_assistant_message_param.rb index 5ac2d838..992b3818 100644 --- a/lib/openai/models/chat/chat_completion_assistant_message_param.rb +++ b/lib/openai/models/chat/chat_completion_assistant_message_param.rb @@ -12,14 +12,14 @@ class ChatCompletionAssistantMessageParam < OpenAI::Internal::Type::BaseModel # @!attribute audio # Data about a previous audio response from the model. - # [Learn more](https://platform.openai.com/docs/guides/audio). + # [Learn more](https://platform.openai.com/docs/guides/audio). # # @return [OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Audio, nil] optional :audio, -> { OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Audio }, nil?: true # @!attribute content # The contents of the assistant message. Required unless `tool_calls` or - # `function_call` is specified. + # `function_call` is specified. # # @return [String, Array, nil] optional :content, @@ -28,7 +28,7 @@ class ChatCompletionAssistantMessageParam < OpenAI::Internal::Type::BaseModel # @!attribute function_call # Deprecated and replaced by `tool_calls`. The name and arguments of a function - # that should be called, as generated by the model. + # that should be called, as generated by the model. # # @return [OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::FunctionCall, nil] optional :function_call, @@ -37,7 +37,7 @@ class ChatCompletionAssistantMessageParam < OpenAI::Internal::Type::BaseModel # @!attribute [r] name # An optional name for the participant. Provides the model information to - # differentiate between participants of the same role. + # differentiate between participants of the same role. # # @return [String, nil] optional :name, String @@ -99,7 +99,7 @@ class Audio < OpenAI::Internal::Type::BaseModel # @!parse # # Data about a previous audio response from the model. - # # [Learn more](https://platform.openai.com/docs/guides/audio). + # # [Learn more](https://platform.openai.com/docs/guides/audio). # # # # @param id [String] # # @@ -109,7 +109,7 @@ class Audio < OpenAI::Internal::Type::BaseModel end # The contents of the assistant message. Required unless `tool_calls` or - # `function_call` is specified. + # `function_call` is specified. # # @see OpenAI::Models::Chat::ChatCompletionAssistantMessageParam#content module Content @@ -122,7 +122,7 @@ module Content variant -> { OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Content::ArrayOfContentPartArray } # Learn about - # [text inputs](https://platform.openai.com/docs/guides/text-generation). + # [text inputs](https://platform.openai.com/docs/guides/text-generation). module ArrayOfContentPart extend OpenAI::Internal::Type::Union @@ -152,9 +152,9 @@ module ArrayOfContentPart class FunctionCall < OpenAI::Internal::Type::BaseModel # @!attribute arguments # The arguments to call the function with, as generated by the model in JSON - # format. Note that the model does not always generate valid JSON, and may - # hallucinate parameters not defined by your function schema. Validate the - # arguments in your code before calling your function. + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. # # @return [String] required :arguments, String @@ -167,7 +167,7 @@ class FunctionCall < OpenAI::Internal::Type::BaseModel # @!parse # # Deprecated and replaced by `tool_calls`. The name and arguments of a function - # # that should be called, as generated by the model. + # # that should be called, as generated by the model. # # # # @param arguments [String] # # @param name [String] diff --git a/lib/openai/models/chat/chat_completion_audio.rb b/lib/openai/models/chat/chat_completion_audio.rb index e5c8bb97..238b8a6b 100644 --- a/lib/openai/models/chat/chat_completion_audio.rb +++ b/lib/openai/models/chat/chat_completion_audio.rb @@ -12,14 +12,14 @@ class ChatCompletionAudio < OpenAI::Internal::Type::BaseModel # @!attribute data # Base64 encoded audio bytes generated by the model, in the format specified in - # the request. + # the request. # # @return [String] required :data, String # @!attribute expires_at # The Unix timestamp (in seconds) for when this audio response will no longer be - # accessible on the server for use in multi-turn conversations. + # accessible on the server for use in multi-turn conversations. # # @return [Integer] required :expires_at, Integer @@ -32,8 +32,8 @@ class ChatCompletionAudio < OpenAI::Internal::Type::BaseModel # @!parse # # If the audio output modality is requested, this object contains data about the - # # audio response from the model. - # # [Learn more](https://platform.openai.com/docs/guides/audio). + # # audio response from the model. + # # [Learn more](https://platform.openai.com/docs/guides/audio). # # # # @param id [String] # # @param data [String] diff --git a/lib/openai/models/chat/chat_completion_audio_param.rb b/lib/openai/models/chat/chat_completion_audio_param.rb index b1ede0ce..07ca649d 100644 --- a/lib/openai/models/chat/chat_completion_audio_param.rb +++ b/lib/openai/models/chat/chat_completion_audio_param.rb @@ -6,22 +6,22 @@ module Chat class ChatCompletionAudioParam < OpenAI::Internal::Type::BaseModel # @!attribute format_ # Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`, - # or `pcm16`. + # or `pcm16`. # # @return [Symbol, OpenAI::Models::Chat::ChatCompletionAudioParam::Format] required :format_, enum: -> { OpenAI::Models::Chat::ChatCompletionAudioParam::Format }, api_name: :format # @!attribute voice # The voice the model uses to respond. Supported voices are `alloy`, `ash`, - # `ballad`, `coral`, `echo`, `sage`, and `shimmer`. + # `ballad`, `coral`, `echo`, `sage`, and `shimmer`. # # @return [String, Symbol, OpenAI::Models::Chat::ChatCompletionAudioParam::Voice] required :voice, union: -> { OpenAI::Models::Chat::ChatCompletionAudioParam::Voice } # @!parse # # Parameters for audio output. Required when audio output is requested with - # # `modalities: ["audio"]`. - # # [Learn more](https://platform.openai.com/docs/guides/audio). + # # `modalities: ["audio"]`. + # # [Learn more](https://platform.openai.com/docs/guides/audio). # # # # @param format_ [Symbol, OpenAI::Models::Chat::ChatCompletionAudioParam::Format] # # @param voice [String, Symbol, OpenAI::Models::Chat::ChatCompletionAudioParam::Voice] @@ -31,7 +31,7 @@ class ChatCompletionAudioParam < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`, - # or `pcm16`. + # or `pcm16`. # # @see OpenAI::Models::Chat::ChatCompletionAudioParam#format_ module Format @@ -51,7 +51,7 @@ module Format end # The voice the model uses to respond. Supported voices are `alloy`, `ash`, - # `ballad`, `coral`, `echo`, `sage`, and `shimmer`. + # `ballad`, `coral`, `echo`, `sage`, and `shimmer`. # # @see OpenAI::Models::Chat::ChatCompletionAudioParam#voice module Voice diff --git a/lib/openai/models/chat/chat_completion_chunk.rb b/lib/openai/models/chat/chat_completion_chunk.rb index 5c693f05..b9c033b3 100644 --- a/lib/openai/models/chat/chat_completion_chunk.rb +++ b/lib/openai/models/chat/chat_completion_chunk.rb @@ -12,8 +12,8 @@ class ChatCompletionChunk < OpenAI::Internal::Type::BaseModel # @!attribute choices # A list of chat completion choices. Can contain more than one elements if `n` is - # greater than 1. Can also be empty for the last chunk if you set - # `stream_options: {"include_usage": true}`. + # greater than 1. Can also be empty for the last chunk if you set + # `stream_options: {"include_usage": true}`. # # @return [Array] required :choices, @@ -21,7 +21,7 @@ class ChatCompletionChunk < OpenAI::Internal::Type::BaseModel # @!attribute created # The Unix timestamp (in seconds) of when the chat completion was created. Each - # chunk has the same timestamp. + # chunk has the same timestamp. # # @return [Integer] required :created, Integer @@ -46,8 +46,8 @@ class ChatCompletionChunk < OpenAI::Internal::Type::BaseModel # @!attribute [r] system_fingerprint # This fingerprint represents the backend configuration that the model runs with. - # Can be used in conjunction with the `seed` request parameter to understand when - # backend changes have been made that might impact determinism. + # Can be used in conjunction with the `seed` request parameter to understand when + # backend changes have been made that might impact determinism. # # @return [String, nil] optional :system_fingerprint, String @@ -58,20 +58,20 @@ class ChatCompletionChunk < OpenAI::Internal::Type::BaseModel # @!attribute usage # An optional field that will only be present when you set - # `stream_options: {"include_usage": true}` in your request. When present, it - # contains a null value **except for the last chunk** which contains the token - # usage statistics for the entire request. + # `stream_options: {"include_usage": true}` in your request. When present, it + # contains a null value **except for the last chunk** which contains the token + # usage statistics for the entire request. # - # **NOTE:** If the stream is interrupted or cancelled, you may not receive the - # final usage chunk which contains the total token usage for the request. + # **NOTE:** If the stream is interrupted or cancelled, you may not receive the + # final usage chunk which contains the total token usage for the request. # # @return [OpenAI::Models::CompletionUsage, nil] optional :usage, -> { OpenAI::Models::CompletionUsage }, nil?: true # @!parse # # Represents a streamed chunk of a chat completion response returned by the model, - # # based on the provided input. - # # [Learn more](https://platform.openai.com/docs/guides/streaming-responses). + # # based on the provided input. + # # [Learn more](https://platform.openai.com/docs/guides/streaming-responses). # # # # @param id [String] # # @param choices [Array] @@ -107,11 +107,11 @@ class Choice < OpenAI::Internal::Type::BaseModel # @!attribute finish_reason # The reason the model stopped generating tokens. This will be `stop` if the model - # hit a natural stop point or a provided stop sequence, `length` if the maximum - # number of tokens specified in the request was reached, `content_filter` if - # content was omitted due to a flag from our content filters, `tool_calls` if the - # model called a tool, or `function_call` (deprecated) if the model called a - # function. + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, `content_filter` if + # content was omitted due to a flag from our content filters, `tool_calls` if the + # model called a tool, or `function_call` (deprecated) if the model called a + # function. # # @return [Symbol, OpenAI::Models::Chat::ChatCompletionChunk::Choice::FinishReason, nil] required :finish_reason, @@ -150,7 +150,7 @@ class Delta < OpenAI::Internal::Type::BaseModel # @!attribute [r] function_call # Deprecated and replaced by `tool_calls`. The name and arguments of a function - # that should be called, as generated by the model. + # that should be called, as generated by the model. # # @return [OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall, nil] optional :function_call, -> { OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall } @@ -204,9 +204,9 @@ class Delta < OpenAI::Internal::Type::BaseModel class FunctionCall < OpenAI::Internal::Type::BaseModel # @!attribute [r] arguments # The arguments to call the function with, as generated by the model in JSON - # format. Note that the model does not always generate valid JSON, and may - # hallucinate parameters not defined by your function schema. Validate the - # arguments in your code before calling your function. + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. # # @return [String, nil] optional :arguments, String @@ -227,7 +227,7 @@ class FunctionCall < OpenAI::Internal::Type::BaseModel # @!parse # # Deprecated and replaced by `tool_calls`. The name and arguments of a function - # # that should be called, as generated by the model. + # # that should be called, as generated by the model. # # # # @param arguments [String] # # @param name [String] @@ -305,9 +305,9 @@ class ToolCall < OpenAI::Internal::Type::BaseModel class Function < OpenAI::Internal::Type::BaseModel # @!attribute [r] arguments # The arguments to call the function with, as generated by the model in JSON - # format. Note that the model does not always generate valid JSON, and may - # hallucinate parameters not defined by your function schema. Validate the - # arguments in your code before calling your function. + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. # # @return [String, nil] optional :arguments, String @@ -353,11 +353,11 @@ module Type end # The reason the model stopped generating tokens. This will be `stop` if the model - # hit a natural stop point or a provided stop sequence, `length` if the maximum - # number of tokens specified in the request was reached, `content_filter` if - # content was omitted due to a flag from our content filters, `tool_calls` if the - # model called a tool, or `function_call` (deprecated) if the model called a - # function. + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, `content_filter` if + # content was omitted due to a flag from our content filters, `tool_calls` if the + # model called a tool, or `function_call` (deprecated) if the model called a + # function. # # @see OpenAI::Models::Chat::ChatCompletionChunk::Choice#finish_reason module FinishReason diff --git a/lib/openai/models/chat/chat_completion_content_part.rb b/lib/openai/models/chat/chat_completion_content_part.rb index 30da9605..31635ff4 100644 --- a/lib/openai/models/chat/chat_completion_content_part.rb +++ b/lib/openai/models/chat/chat_completion_content_part.rb @@ -4,7 +4,7 @@ module OpenAI module Models module Chat # Learn about - # [text inputs](https://platform.openai.com/docs/guides/text-generation). + # [text inputs](https://platform.openai.com/docs/guides/text-generation). module ChatCompletionContentPart extend OpenAI::Internal::Type::Union @@ -36,7 +36,7 @@ class File < OpenAI::Internal::Type::BaseModel # @!parse # # Learn about [file inputs](https://platform.openai.com/docs/guides/text) for text - # # generation. + # # generation. # # # # @param file [OpenAI::Models::Chat::ChatCompletionContentPart::File::File] # # @param type [Symbol, :file] @@ -49,7 +49,7 @@ class File < OpenAI::Internal::Type::BaseModel class File < OpenAI::Internal::Type::BaseModel # @!attribute [r] file_data # The base64 encoded file data, used when passing the file to the model as a - # string. + # string. # # @return [String, nil] optional :file_data, String diff --git a/lib/openai/models/chat/chat_completion_content_part_image.rb b/lib/openai/models/chat/chat_completion_content_part_image.rb index 04a9152b..a22c144e 100644 --- a/lib/openai/models/chat/chat_completion_content_part_image.rb +++ b/lib/openai/models/chat/chat_completion_content_part_image.rb @@ -35,7 +35,7 @@ class ImageURL < OpenAI::Internal::Type::BaseModel # @!attribute [r] detail # Specifies the detail level of the image. Learn more in the - # [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding). + # [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding). # # @return [Symbol, OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL::Detail, nil] optional :detail, enum: -> { OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL::Detail } @@ -53,7 +53,7 @@ class ImageURL < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # Specifies the detail level of the image. Learn more in the - # [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding). + # [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding). # # @see OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL#detail module Detail diff --git a/lib/openai/models/chat/chat_completion_content_part_text.rb b/lib/openai/models/chat/chat_completion_content_part_text.rb index e286368a..e1975859 100644 --- a/lib/openai/models/chat/chat_completion_content_part_text.rb +++ b/lib/openai/models/chat/chat_completion_content_part_text.rb @@ -18,7 +18,7 @@ class ChatCompletionContentPartText < OpenAI::Internal::Type::BaseModel # @!parse # # Learn about - # # [text inputs](https://platform.openai.com/docs/guides/text-generation). + # # [text inputs](https://platform.openai.com/docs/guides/text-generation). # # # # @param text [String] # # @param type [Symbol, :text] diff --git a/lib/openai/models/chat/chat_completion_developer_message_param.rb b/lib/openai/models/chat/chat_completion_developer_message_param.rb index 1fe2ecc1..ecd1e5bd 100644 --- a/lib/openai/models/chat/chat_completion_developer_message_param.rb +++ b/lib/openai/models/chat/chat_completion_developer_message_param.rb @@ -18,7 +18,7 @@ class ChatCompletionDeveloperMessageParam < OpenAI::Internal::Type::BaseModel # @!attribute [r] name # An optional name for the participant. Provides the model information to - # differentiate between participants of the same role. + # differentiate between participants of the same role. # # @return [String, nil] optional :name, String @@ -29,8 +29,8 @@ class ChatCompletionDeveloperMessageParam < OpenAI::Internal::Type::BaseModel # @!parse # # Developer-provided instructions that the model should follow, regardless of - # # messages sent by the user. With o1 models and newer, `developer` messages - # # replace the previous `system` messages. + # # messages sent by the user. With o1 models and newer, `developer` messages + # # replace the previous `system` messages. # # # # @param content [String, Array] # # @param name [String] diff --git a/lib/openai/models/chat/chat_completion_function_call_option.rb b/lib/openai/models/chat/chat_completion_function_call_option.rb index 9434599e..3ae8526b 100644 --- a/lib/openai/models/chat/chat_completion_function_call_option.rb +++ b/lib/openai/models/chat/chat_completion_function_call_option.rb @@ -12,7 +12,7 @@ class ChatCompletionFunctionCallOption < OpenAI::Internal::Type::BaseModel # @!parse # # Specifying a particular function via `{"name": "my_function"}` forces the model - # # to call that function. + # # to call that function. # # # # @param name [String] # # diff --git a/lib/openai/models/chat/chat_completion_message.rb b/lib/openai/models/chat/chat_completion_message.rb index 9b1828e2..b36682ef 100644 --- a/lib/openai/models/chat/chat_completion_message.rb +++ b/lib/openai/models/chat/chat_completion_message.rb @@ -24,7 +24,7 @@ class ChatCompletionMessage < OpenAI::Internal::Type::BaseModel # @!attribute [r] annotations # Annotations for the message, when applicable, as when using the - # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). # # @return [Array, nil] optional :annotations, @@ -36,15 +36,15 @@ class ChatCompletionMessage < OpenAI::Internal::Type::BaseModel # @!attribute audio # If the audio output modality is requested, this object contains data about the - # audio response from the model. - # [Learn more](https://platform.openai.com/docs/guides/audio). + # audio response from the model. + # [Learn more](https://platform.openai.com/docs/guides/audio). # # @return [OpenAI::Models::Chat::ChatCompletionAudio, nil] optional :audio, -> { OpenAI::Models::Chat::ChatCompletionAudio }, nil?: true # @!attribute [r] function_call # Deprecated and replaced by `tool_calls`. The name and arguments of a function - # that should be called, as generated by the model. + # that should be called, as generated by the model. # # @return [OpenAI::Models::Chat::ChatCompletionMessage::FunctionCall, nil] optional :function_call, -> { OpenAI::Models::Chat::ChatCompletionMessage::FunctionCall } @@ -159,9 +159,9 @@ class URLCitation < OpenAI::Internal::Type::BaseModel class FunctionCall < OpenAI::Internal::Type::BaseModel # @!attribute arguments # The arguments to call the function with, as generated by the model in JSON - # format. Note that the model does not always generate valid JSON, and may - # hallucinate parameters not defined by your function schema. Validate the - # arguments in your code before calling your function. + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. # # @return [String] required :arguments, String @@ -174,7 +174,7 @@ class FunctionCall < OpenAI::Internal::Type::BaseModel # @!parse # # Deprecated and replaced by `tool_calls`. The name and arguments of a function - # # that should be called, as generated by the model. + # # that should be called, as generated by the model. # # # # @param arguments [String] # # @param name [String] diff --git a/lib/openai/models/chat/chat_completion_message_param.rb b/lib/openai/models/chat/chat_completion_message_param.rb index 6796ea10..ed72d515 100644 --- a/lib/openai/models/chat/chat_completion_message_param.rb +++ b/lib/openai/models/chat/chat_completion_message_param.rb @@ -4,8 +4,8 @@ module OpenAI module Models module Chat # Developer-provided instructions that the model should follow, regardless of - # messages sent by the user. With o1 models and newer, `developer` messages - # replace the previous `system` messages. + # messages sent by the user. With o1 models and newer, `developer` messages + # replace the previous `system` messages. module ChatCompletionMessageParam extend OpenAI::Internal::Type::Union diff --git a/lib/openai/models/chat/chat_completion_message_tool_call.rb b/lib/openai/models/chat/chat_completion_message_tool_call.rb index 508bc7ca..cca6cc4e 100644 --- a/lib/openai/models/chat/chat_completion_message_tool_call.rb +++ b/lib/openai/models/chat/chat_completion_message_tool_call.rb @@ -35,9 +35,9 @@ class ChatCompletionMessageToolCall < OpenAI::Internal::Type::BaseModel class Function < OpenAI::Internal::Type::BaseModel # @!attribute arguments # The arguments to call the function with, as generated by the model in JSON - # format. Note that the model does not always generate valid JSON, and may - # hallucinate parameters not defined by your function schema. Validate the - # arguments in your code before calling your function. + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. # # @return [String] required :arguments, String diff --git a/lib/openai/models/chat/chat_completion_named_tool_choice.rb b/lib/openai/models/chat/chat_completion_named_tool_choice.rb index 9058c083..c887906f 100644 --- a/lib/openai/models/chat/chat_completion_named_tool_choice.rb +++ b/lib/openai/models/chat/chat_completion_named_tool_choice.rb @@ -17,7 +17,7 @@ class ChatCompletionNamedToolChoice < OpenAI::Internal::Type::BaseModel # @!parse # # Specifies a tool the model should use. Use to force the model to call a specific - # # function. + # # function. # # # # @param function [OpenAI::Models::Chat::ChatCompletionNamedToolChoice::Function] # # @param type [Symbol, :function] diff --git a/lib/openai/models/chat/chat_completion_prediction_content.rb b/lib/openai/models/chat/chat_completion_prediction_content.rb index dd3150fd..0cc7df62 100644 --- a/lib/openai/models/chat/chat_completion_prediction_content.rb +++ b/lib/openai/models/chat/chat_completion_prediction_content.rb @@ -6,22 +6,22 @@ module Chat class ChatCompletionPredictionContent < OpenAI::Internal::Type::BaseModel # @!attribute content # The content that should be matched when generating a model response. If - # generated tokens would match this content, the entire model response can be - # returned much more quickly. + # generated tokens would match this content, the entire model response can be + # returned much more quickly. # # @return [String, Array] required :content, union: -> { OpenAI::Models::Chat::ChatCompletionPredictionContent::Content } # @!attribute type # The type of the predicted content you want to provide. This type is currently - # always `content`. + # always `content`. # # @return [Symbol, :content] required :type, const: :content # @!parse # # Static predicted output content, such as the content of a text file that is - # # being regenerated. + # # being regenerated. # # # # @param content [String, Array] # # @param type [Symbol, :content] @@ -31,8 +31,8 @@ class ChatCompletionPredictionContent < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # The content that should be matched when generating a model response. If - # generated tokens would match this content, the entire model response can be - # returned much more quickly. + # generated tokens would match this content, the entire model response can be + # returned much more quickly. # # @see OpenAI::Models::Chat::ChatCompletionPredictionContent#content module Content diff --git a/lib/openai/models/chat/chat_completion_stream_options.rb b/lib/openai/models/chat/chat_completion_stream_options.rb index 7b23b7a2..cb29b9a6 100644 --- a/lib/openai/models/chat/chat_completion_stream_options.rb +++ b/lib/openai/models/chat/chat_completion_stream_options.rb @@ -6,12 +6,12 @@ module Chat class ChatCompletionStreamOptions < OpenAI::Internal::Type::BaseModel # @!attribute [r] include_usage # If set, an additional chunk will be streamed before the `data: [DONE]` message. - # The `usage` field on this chunk shows the token usage statistics for the entire - # request, and the `choices` field will always be an empty array. + # The `usage` field on this chunk shows the token usage statistics for the entire + # request, and the `choices` field will always be an empty array. # - # All other chunks will also include a `usage` field, but with a null value. - # **NOTE:** If the stream is interrupted, you may not receive the final usage - # chunk which contains the total token usage for the request. + # All other chunks will also include a `usage` field, but with a null value. + # **NOTE:** If the stream is interrupted, you may not receive the final usage + # chunk which contains the total token usage for the request. # # @return [Boolean, nil] optional :include_usage, OpenAI::Internal::Type::Boolean diff --git a/lib/openai/models/chat/chat_completion_system_message_param.rb b/lib/openai/models/chat/chat_completion_system_message_param.rb index d1d9b2f7..44d1d207 100644 --- a/lib/openai/models/chat/chat_completion_system_message_param.rb +++ b/lib/openai/models/chat/chat_completion_system_message_param.rb @@ -18,7 +18,7 @@ class ChatCompletionSystemMessageParam < OpenAI::Internal::Type::BaseModel # @!attribute [r] name # An optional name for the participant. Provides the model information to - # differentiate between participants of the same role. + # differentiate between participants of the same role. # # @return [String, nil] optional :name, String @@ -29,8 +29,8 @@ class ChatCompletionSystemMessageParam < OpenAI::Internal::Type::BaseModel # @!parse # # Developer-provided instructions that the model should follow, regardless of - # # messages sent by the user. With o1 models and newer, use `developer` messages - # # for this purpose instead. + # # messages sent by the user. With o1 models and newer, use `developer` messages + # # for this purpose instead. # # # # @param content [String, Array] # # @param name [String] diff --git a/lib/openai/models/chat/chat_completion_token_logprob.rb b/lib/openai/models/chat/chat_completion_token_logprob.rb index 8b6d0019..a9f0bc0d 100644 --- a/lib/openai/models/chat/chat_completion_token_logprob.rb +++ b/lib/openai/models/chat/chat_completion_token_logprob.rb @@ -12,25 +12,25 @@ class ChatCompletionTokenLogprob < OpenAI::Internal::Type::BaseModel # @!attribute bytes # A list of integers representing the UTF-8 bytes representation of the token. - # Useful in instances where characters are represented by multiple tokens and - # their byte representations must be combined to generate the correct text - # representation. Can be `null` if there is no bytes representation for the token. + # Useful in instances where characters are represented by multiple tokens and + # their byte representations must be combined to generate the correct text + # representation. Can be `null` if there is no bytes representation for the token. # # @return [Array, nil] required :bytes, OpenAI::Internal::Type::ArrayOf[Integer], nil?: true # @!attribute logprob # The log probability of this token, if it is within the top 20 most likely - # tokens. Otherwise, the value `-9999.0` is used to signify that the token is very - # unlikely. + # tokens. Otherwise, the value `-9999.0` is used to signify that the token is very + # unlikely. # # @return [Float] required :logprob, Float # @!attribute top_logprobs # List of the most likely tokens and their log probability, at this token - # position. In rare cases, there may be fewer than the number of requested - # `top_logprobs` returned. + # position. In rare cases, there may be fewer than the number of requested + # `top_logprobs` returned. # # @return [Array] required :top_logprobs, @@ -55,17 +55,17 @@ class TopLogprob < OpenAI::Internal::Type::BaseModel # @!attribute bytes # A list of integers representing the UTF-8 bytes representation of the token. - # Useful in instances where characters are represented by multiple tokens and - # their byte representations must be combined to generate the correct text - # representation. Can be `null` if there is no bytes representation for the token. + # Useful in instances where characters are represented by multiple tokens and + # their byte representations must be combined to generate the correct text + # representation. Can be `null` if there is no bytes representation for the token. # # @return [Array, nil] required :bytes, OpenAI::Internal::Type::ArrayOf[Integer], nil?: true # @!attribute logprob # The log probability of this token, if it is within the top 20 most likely - # tokens. Otherwise, the value `-9999.0` is used to signify that the token is very - # unlikely. + # tokens. Otherwise, the value `-9999.0` is used to signify that the token is very + # unlikely. # # @return [Float] required :logprob, Float diff --git a/lib/openai/models/chat/chat_completion_tool_choice_option.rb b/lib/openai/models/chat/chat_completion_tool_choice_option.rb index bb04bbc8..c57aaf23 100644 --- a/lib/openai/models/chat/chat_completion_tool_choice_option.rb +++ b/lib/openai/models/chat/chat_completion_tool_choice_option.rb @@ -4,14 +4,14 @@ module OpenAI module Models module Chat # Controls which (if any) tool is called by the model. `none` means the model will - # not call any tool and instead generates a message. `auto` means the model can - # pick between generating a message or calling one or more tools. `required` means - # the model must call one or more tools. Specifying a particular tool via - # `{"type": "function", "function": {"name": "my_function"}}` forces the model to - # call that tool. + # not call any tool and instead generates a message. `auto` means the model can + # pick between generating a message or calling one or more tools. `required` means + # the model must call one or more tools. Specifying a particular tool via + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. # - # `none` is the default when no tools are present. `auto` is the default if tools - # are present. + # `none` is the default when no tools are present. `auto` is the default if tools + # are present. module ChatCompletionToolChoiceOption extend OpenAI::Internal::Type::Union @@ -22,8 +22,8 @@ module ChatCompletionToolChoiceOption variant -> { OpenAI::Models::Chat::ChatCompletionNamedToolChoice } # `none` means the model will not call any tool and instead generates a message. - # `auto` means the model can pick between generating a message or calling one or - # more tools. `required` means the model must call one or more tools. + # `auto` means the model can pick between generating a message or calling one or + # more tools. `required` means the model must call one or more tools. module Auto extend OpenAI::Internal::Type::Enum diff --git a/lib/openai/models/chat/chat_completion_user_message_param.rb b/lib/openai/models/chat/chat_completion_user_message_param.rb index ba2c2dbe..6cf8585a 100644 --- a/lib/openai/models/chat/chat_completion_user_message_param.rb +++ b/lib/openai/models/chat/chat_completion_user_message_param.rb @@ -18,7 +18,7 @@ class ChatCompletionUserMessageParam < OpenAI::Internal::Type::BaseModel # @!attribute [r] name # An optional name for the participant. Provides the model information to - # differentiate between participants of the same role. + # differentiate between participants of the same role. # # @return [String, nil] optional :name, String @@ -29,7 +29,7 @@ class ChatCompletionUserMessageParam < OpenAI::Internal::Type::BaseModel # @!parse # # Messages sent by an end user, containing prompts or additional context - # # information. + # # information. # # # # @param content [String, Array] # # @param name [String] diff --git a/lib/openai/models/chat/completion_create_params.rb b/lib/openai/models/chat/completion_create_params.rb index e8aed051..65406125 100644 --- a/lib/openai/models/chat/completion_create_params.rb +++ b/lib/openai/models/chat/completion_create_params.rb @@ -13,11 +13,11 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute messages # A list of messages comprising the conversation so far. Depending on the - # [model](https://platform.openai.com/docs/models) you use, different message - # types (modalities) are supported, like - # [text](https://platform.openai.com/docs/guides/text-generation), - # [images](https://platform.openai.com/docs/guides/vision), and - # [audio](https://platform.openai.com/docs/guides/audio). + # [model](https://platform.openai.com/docs/models) you use, different message + # types (modalities) are supported, like + # [text](https://platform.openai.com/docs/guides/text-generation), + # [images](https://platform.openai.com/docs/guides/vision), and + # [audio](https://platform.openai.com/docs/guides/audio). # # @return [Array] required :messages, @@ -25,26 +25,26 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute model # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a - # wide range of models with different capabilities, performance characteristics, - # and price points. Refer to the - # [model guide](https://platform.openai.com/docs/models) to browse and compare - # available models. + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. # # @return [String, Symbol, OpenAI::Models::ChatModel] required :model, union: -> { OpenAI::Models::Chat::CompletionCreateParams::Model } # @!attribute audio # Parameters for audio output. Required when audio output is requested with - # `modalities: ["audio"]`. - # [Learn more](https://platform.openai.com/docs/guides/audio). + # `modalities: ["audio"]`. + # [Learn more](https://platform.openai.com/docs/guides/audio). # # @return [OpenAI::Models::Chat::ChatCompletionAudioParam, nil] optional :audio, -> { OpenAI::Models::Chat::ChatCompletionAudioParam }, nil?: true # @!attribute frequency_penalty # Number between -2.0 and 2.0. Positive values penalize new tokens based on their - # existing frequency in the text so far, decreasing the model's likelihood to - # repeat the same line verbatim. + # existing frequency in the text so far, decreasing the model's likelihood to + # repeat the same line verbatim. # # @return [Float, nil] optional :frequency_penalty, Float, nil?: true @@ -52,18 +52,18 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] function_call # Deprecated in favor of `tool_choice`. # - # Controls which (if any) function is called by the model. + # Controls which (if any) function is called by the model. # - # `none` means the model will not call a function and instead generates a message. + # `none` means the model will not call a function and instead generates a message. # - # `auto` means the model can pick between generating a message or calling a - # function. + # `auto` means the model can pick between generating a message or calling a + # function. # - # Specifying a particular function via `{"name": "my_function"}` forces the model - # to call that function. + # Specifying a particular function via `{"name": "my_function"}` forces the model + # to call that function. # - # `none` is the default when no functions are present. `auto` is the default if - # functions are present. + # `none` is the default when no functions are present. `auto` is the default if + # functions are present. # # @return [Symbol, OpenAI::Models::Chat::CompletionCreateParams::FunctionCall::FunctionCallMode, OpenAI::Models::Chat::ChatCompletionFunctionCallOption, nil] optional :function_call, union: -> { OpenAI::Models::Chat::CompletionCreateParams::FunctionCall } @@ -75,7 +75,7 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] functions # Deprecated in favor of `tools`. # - # A list of functions the model may generate JSON inputs for. + # A list of functions the model may generate JSON inputs for. # # @return [Array, nil] optional :functions, @@ -88,66 +88,66 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute logit_bias # Modify the likelihood of specified tokens appearing in the completion. # - # Accepts a JSON object that maps tokens (specified by their token ID in the - # tokenizer) to an associated bias value from -100 to 100. Mathematically, the - # bias is added to the logits generated by the model prior to sampling. The exact - # effect will vary per model, but values between -1 and 1 should decrease or - # increase likelihood of selection; values like -100 or 100 should result in a ban - # or exclusive selection of the relevant token. + # Accepts a JSON object that maps tokens (specified by their token ID in the + # tokenizer) to an associated bias value from -100 to 100. Mathematically, the + # bias is added to the logits generated by the model prior to sampling. The exact + # effect will vary per model, but values between -1 and 1 should decrease or + # increase likelihood of selection; values like -100 or 100 should result in a ban + # or exclusive selection of the relevant token. # # @return [Hash{Symbol=>Integer}, nil] optional :logit_bias, OpenAI::Internal::Type::HashOf[Integer], nil?: true # @!attribute logprobs # Whether to return log probabilities of the output tokens or not. If true, - # returns the log probabilities of each output token returned in the `content` of - # `message`. + # returns the log probabilities of each output token returned in the `content` of + # `message`. # # @return [Boolean, nil] optional :logprobs, OpenAI::Internal::Type::Boolean, nil?: true # @!attribute max_completion_tokens # An upper bound for the number of tokens that can be generated for a completion, - # including visible output tokens and - # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). # # @return [Integer, nil] optional :max_completion_tokens, Integer, nil?: true # @!attribute max_tokens # The maximum number of [tokens](/tokenizer) that can be generated in the chat - # completion. This value can be used to control - # [costs](https://openai.com/api/pricing/) for text generated via API. + # completion. This value can be used to control + # [costs](https://openai.com/api/pricing/) for text generated via API. # - # This value is now deprecated in favor of `max_completion_tokens`, and is not - # compatible with - # [o1 series models](https://platform.openai.com/docs/guides/reasoning). + # This value is now deprecated in favor of `max_completion_tokens`, and is not + # compatible with + # [o1 series models](https://platform.openai.com/docs/guides/reasoning). # # @return [Integer, nil] optional :max_tokens, Integer, nil?: true # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true # @!attribute modalities # Output types that you would like the model to generate. Most models are capable - # of generating text, which is the default: + # of generating text, which is the default: # - # `["text"]` + # `["text"]` # - # The `gpt-4o-audio-preview` model can also be used to - # [generate audio](https://platform.openai.com/docs/guides/audio). To request that - # this model generate both text and audio responses, you can use: + # The `gpt-4o-audio-preview` model can also be used to + # [generate audio](https://platform.openai.com/docs/guides/audio). To request that + # this model generate both text and audio responses, you can use: # - # `["text", "audio"]` + # `["text", "audio"]` # # @return [Array, nil] optional :modalities, @@ -156,16 +156,16 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute n # How many chat completion choices to generate for each input message. Note that - # you will be charged based on the number of generated tokens across all of the - # choices. Keep `n` as `1` to minimize costs. + # you will be charged based on the number of generated tokens across all of the + # choices. Keep `n` as `1` to minimize costs. # # @return [Integer, nil] optional :n, Integer, nil?: true # @!attribute [r] parallel_tool_calls # Whether to enable - # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) - # during tool use. + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. # # @return [Boolean, nil] optional :parallel_tool_calls, OpenAI::Internal::Type::Boolean @@ -176,15 +176,15 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute prediction # Static predicted output content, such as the content of a text file that is - # being regenerated. + # being regenerated. # # @return [OpenAI::Models::Chat::ChatCompletionPredictionContent, nil] optional :prediction, -> { OpenAI::Models::Chat::ChatCompletionPredictionContent }, nil?: true # @!attribute presence_penalty # Number between -2.0 and 2.0. Positive values penalize new tokens based on - # whether they appear in the text so far, increasing the model's likelihood to - # talk about new topics. + # whether they appear in the text so far, increasing the model's likelihood to + # talk about new topics. # # @return [Float, nil] optional :presence_penalty, Float, nil?: true @@ -192,10 +192,10 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute reasoning_effort # **o-series models only** # - # Constrains effort on reasoning for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] optional :reasoning_effort, enum: -> { OpenAI::Models::ReasoningEffort }, nil?: true @@ -203,14 +203,14 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] response_format # An object specifying the format that the model must output. # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # Setting to `{ "type": "json_object" }` enables the older JSON mode, which - # ensures the message the model generates is valid JSON. Using `json_schema` is - # preferred for models that support it. + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. # # @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject, nil] optional :response_format, union: -> { OpenAI::Models::Chat::CompletionCreateParams::ResponseFormat } @@ -221,44 +221,44 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute seed # This feature is in Beta. If specified, our system will make a best effort to - # sample deterministically, such that repeated requests with the same `seed` and - # parameters should return the same result. Determinism is not guaranteed, and you - # should refer to the `system_fingerprint` response parameter to monitor changes - # in the backend. + # sample deterministically, such that repeated requests with the same `seed` and + # parameters should return the same result. Determinism is not guaranteed, and you + # should refer to the `system_fingerprint` response parameter to monitor changes + # in the backend. # # @return [Integer, nil] optional :seed, Integer, nil?: true # @!attribute service_tier # Specifies the latency tier to use for processing the request. This parameter is - # relevant for customers subscribed to the scale tier service: + # relevant for customers subscribed to the scale tier service: # - # - If set to 'auto', and the Project is Scale tier enabled, the system will - # utilize scale tier credits until they are exhausted. - # - If set to 'auto', and the Project is not Scale tier enabled, the request will - # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. - # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. - # - When not set, the default behavior is 'auto'. + # - If set to 'auto', and the Project is Scale tier enabled, the system will + # utilize scale tier credits until they are exhausted. + # - If set to 'auto', and the Project is not Scale tier enabled, the request will + # be processed using the default service tier with a lower uptime SLA and no + # latency guarentee. + # - If set to 'default', the request will be processed using the default service + # tier with a lower uptime SLA and no latency guarentee. + # - When not set, the default behavior is 'auto'. # - # When this parameter is set, the response body will include the `service_tier` - # utilized. + # When this parameter is set, the response body will include the `service_tier` + # utilized. # # @return [Symbol, OpenAI::Models::Chat::CompletionCreateParams::ServiceTier, nil] optional :service_tier, enum: -> { OpenAI::Models::Chat::CompletionCreateParams::ServiceTier }, nil?: true # @!attribute stop # Up to 4 sequences where the API will stop generating further tokens. The - # returned text will not contain the stop sequence. + # returned text will not contain the stop sequence. # # @return [String, Array, nil] optional :stop, union: -> { OpenAI::Models::Chat::CompletionCreateParams::Stop }, nil?: true # @!attribute store # Whether or not to store the output of this chat completion request for use in - # our [model distillation](https://platform.openai.com/docs/guides/distillation) - # or [evals](https://platform.openai.com/docs/guides/evals) products. + # our [model distillation](https://platform.openai.com/docs/guides/distillation) + # or [evals](https://platform.openai.com/docs/guides/evals) products. # # @return [Boolean, nil] optional :store, OpenAI::Internal::Type::Boolean, nil?: true @@ -271,23 +271,23 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute temperature # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. We generally recommend altering this or `top_p` but - # not both. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. # # @return [Float, nil] optional :temperature, Float, nil?: true # @!attribute [r] tool_choice # Controls which (if any) tool is called by the model. `none` means the model will - # not call any tool and instead generates a message. `auto` means the model can - # pick between generating a message or calling one or more tools. `required` means - # the model must call one or more tools. Specifying a particular tool via - # `{"type": "function", "function": {"name": "my_function"}}` forces the model to - # call that tool. + # not call any tool and instead generates a message. `auto` means the model can + # pick between generating a message or calling one or more tools. `required` means + # the model must call one or more tools. Specifying a particular tool via + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. # - # `none` is the default when no tools are present. `auto` is the default if tools - # are present. + # `none` is the default when no tools are present. `auto` is the default if tools + # are present. # # @return [Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionNamedToolChoice, nil] optional :tool_choice, union: -> { OpenAI::Models::Chat::ChatCompletionToolChoiceOption } @@ -298,8 +298,8 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] tools # A list of tools the model may call. Currently, only functions are supported as a - # tool. Use this to provide a list of functions the model may generate JSON inputs - # for. A max of 128 functions are supported. + # tool. Use this to provide a list of functions the model may generate JSON inputs + # for. A max of 128 functions are supported. # # @return [Array, nil] optional :tools, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Chat::ChatCompletionTool] } @@ -310,26 +310,26 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute top_logprobs # An integer between 0 and 20 specifying the number of most likely tokens to - # return at each token position, each with an associated log probability. - # `logprobs` must be set to `true` if this parameter is used. + # return at each token position, each with an associated log probability. + # `logprobs` must be set to `true` if this parameter is used. # # @return [Integer, nil] optional :top_logprobs, Integer, nil?: true # @!attribute top_p # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or `temperature` but not both. + # We generally recommend altering this or `temperature` but not both. # # @return [Float, nil] optional :top_p, Float, nil?: true # @!attribute [r] user # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). # # @return [String, nil] optional :user, String @@ -340,8 +340,8 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] web_search_options # This tool searches the web for relevant results to use in a response. Learn more - # about the - # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + # about the + # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). # # @return [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions, nil] optional :web_search_options, -> { OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions } @@ -423,10 +423,10 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a - # wide range of models with different capabilities, performance characteristics, - # and price points. Refer to the - # [model guide](https://platform.openai.com/docs/models) to browse and compare - # available models. + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. module Model extend OpenAI::Internal::Type::Union @@ -447,18 +447,18 @@ module Model # # Deprecated in favor of `tool_choice`. # - # Controls which (if any) function is called by the model. + # Controls which (if any) function is called by the model. # - # `none` means the model will not call a function and instead generates a message. + # `none` means the model will not call a function and instead generates a message. # - # `auto` means the model can pick between generating a message or calling a - # function. + # `auto` means the model can pick between generating a message or calling a + # function. # - # Specifying a particular function via `{"name": "my_function"}` forces the model - # to call that function. + # Specifying a particular function via `{"name": "my_function"}` forces the model + # to call that function. # - # `none` is the default when no functions are present. `auto` is the default if - # functions are present. + # `none` is the default when no functions are present. `auto` is the default if + # functions are present. module FunctionCall extend OpenAI::Internal::Type::Union @@ -469,8 +469,8 @@ module FunctionCall variant -> { OpenAI::Models::Chat::ChatCompletionFunctionCallOption } # `none` means the model will not call a function and instead generates a message. - # `auto` means the model can pick between generating a message or calling a - # function. + # `auto` means the model can pick between generating a message or calling a + # function. module FunctionCallMode extend OpenAI::Internal::Type::Enum @@ -493,14 +493,14 @@ module FunctionCallMode class Function < OpenAI::Internal::Type::BaseModel # @!attribute name # The name of the function to be called. Must be a-z, A-Z, 0-9, or contain - # underscores and dashes, with a maximum length of 64. + # underscores and dashes, with a maximum length of 64. # # @return [String] required :name, String # @!attribute [r] description # A description of what the function does, used by the model to choose when and - # how to call the function. + # how to call the function. # # @return [String, nil] optional :description, String @@ -511,12 +511,12 @@ class Function < OpenAI::Internal::Type::BaseModel # @!attribute [r] parameters # The parameters the functions accepts, described as a JSON Schema object. See the - # [guide](https://platform.openai.com/docs/guides/function-calling) for examples, - # and the - # [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for - # documentation about the format. + # [guide](https://platform.openai.com/docs/guides/function-calling) for examples, + # and the + # [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for + # documentation about the format. # - # Omitting `parameters` defines a function with an empty parameter list. + # Omitting `parameters` defines a function with an empty parameter list. # # @return [Hash{Symbol=>Object}, nil] optional :parameters, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] @@ -550,14 +550,14 @@ module Modality # An object specifying the format that the model must output. # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # Setting to `{ "type": "json_object" }` enables the older JSON mode, which - # ensures the message the model generates is valid JSON. Using `json_schema` is - # preferred for models that support it. + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. module ResponseFormat extend OpenAI::Internal::Type::Union @@ -580,19 +580,19 @@ module ResponseFormat end # Specifies the latency tier to use for processing the request. This parameter is - # relevant for customers subscribed to the scale tier service: - # - # - If set to 'auto', and the Project is Scale tier enabled, the system will - # utilize scale tier credits until they are exhausted. - # - If set to 'auto', and the Project is not Scale tier enabled, the request will - # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. - # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. - # - When not set, the default behavior is 'auto'. - # - # When this parameter is set, the response body will include the `service_tier` - # utilized. + # relevant for customers subscribed to the scale tier service: + # + # - If set to 'auto', and the Project is Scale tier enabled, the system will + # utilize scale tier credits until they are exhausted. + # - If set to 'auto', and the Project is not Scale tier enabled, the request will + # be processed using the default service tier with a lower uptime SLA and no + # latency guarentee. + # - If set to 'default', the request will be processed using the default service + # tier with a lower uptime SLA and no latency guarentee. + # - When not set, the default behavior is 'auto'. + # + # When this parameter is set, the response body will include the `service_tier` + # utilized. module ServiceTier extend OpenAI::Internal::Type::Enum @@ -607,7 +607,7 @@ module ServiceTier end # Up to 4 sequences where the API will stop generating further tokens. The - # returned text will not contain the stop sequence. + # returned text will not contain the stop sequence. module Stop extend OpenAI::Internal::Type::Union @@ -625,7 +625,7 @@ module Stop class WebSearchOptions < OpenAI::Internal::Type::BaseModel # @!attribute [r] search_context_size # High level guidance for the amount of context window space to use for the - # search. One of `low`, `medium`, or `high`. `medium` is the default. + # search. One of `low`, `medium`, or `high`. `medium` is the default. # # @return [Symbol, OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::SearchContextSize, nil] optional :search_context_size, @@ -645,8 +645,8 @@ class WebSearchOptions < OpenAI::Internal::Type::BaseModel # @!parse # # This tool searches the web for relevant results to use in a response. Learn more - # # about the - # # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + # # about the + # # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). # # # # @param search_context_size [Symbol, OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::SearchContextSize] # # @param user_location [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation, nil] @@ -656,7 +656,7 @@ class WebSearchOptions < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # High level guidance for the amount of context window space to use for the - # search. One of `low`, `medium`, or `high`. `medium` is the default. + # search. One of `low`, `medium`, or `high`. `medium` is the default. # # @see OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions#search_context_size module SearchContextSize @@ -712,7 +712,7 @@ class Approximate < OpenAI::Internal::Type::BaseModel # @!attribute [r] country # The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of - # the user, e.g. `US`. + # the user, e.g. `US`. # # @return [String, nil] optional :country, String @@ -733,7 +733,7 @@ class Approximate < OpenAI::Internal::Type::BaseModel # @!attribute [r] timezone # The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the - # user, e.g. `America/Los_Angeles`. + # user, e.g. `America/Los_Angeles`. # # @return [String, nil] optional :timezone, String diff --git a/lib/openai/models/chat/completion_list_params.rb b/lib/openai/models/chat/completion_list_params.rb index 5e2f39ec..dede958c 100644 --- a/lib/openai/models/chat/completion_list_params.rb +++ b/lib/openai/models/chat/completion_list_params.rb @@ -32,7 +32,7 @@ class CompletionListParams < OpenAI::Internal::Type::BaseModel # @!attribute metadata # A list of metadata keys to filter the Chat Completions by. Example: # - # `metadata[key1]=value1&metadata[key2]=value2` + # `metadata[key1]=value1&metadata[key2]=value2` # # @return [Hash{Symbol=>String}, nil] optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true @@ -49,7 +49,7 @@ class CompletionListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] order # Sort order for Chat Completions by timestamp. Use `asc` for ascending order or - # `desc` for descending order. Defaults to `asc`. + # `desc` for descending order. Defaults to `asc`. # # @return [Symbol, OpenAI::Models::Chat::CompletionListParams::Order, nil] optional :order, enum: -> { OpenAI::Models::Chat::CompletionListParams::Order } @@ -71,7 +71,7 @@ class CompletionListParams < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # Sort order for Chat Completions by timestamp. Use `asc` for ascending order or - # `desc` for descending order. Defaults to `asc`. + # `desc` for descending order. Defaults to `asc`. module Order extend OpenAI::Internal::Type::Enum diff --git a/lib/openai/models/chat/completion_update_params.rb b/lib/openai/models/chat/completion_update_params.rb index 54a9a688..0a8504f8 100644 --- a/lib/openai/models/chat/completion_update_params.rb +++ b/lib/openai/models/chat/completion_update_params.rb @@ -11,11 +11,11 @@ class CompletionUpdateParams < OpenAI::Internal::Type::BaseModel # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true diff --git a/lib/openai/models/chat/completions/message_list_params.rb b/lib/openai/models/chat/completions/message_list_params.rb index 18970d11..eca322af 100644 --- a/lib/openai/models/chat/completions/message_list_params.rb +++ b/lib/openai/models/chat/completions/message_list_params.rb @@ -32,7 +32,7 @@ class MessageListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] order # Sort order for messages by timestamp. Use `asc` for ascending order or `desc` - # for descending order. Defaults to `asc`. + # for descending order. Defaults to `asc`. # # @return [Symbol, OpenAI::Models::Chat::Completions::MessageListParams::Order, nil] optional :order, enum: -> { OpenAI::Models::Chat::Completions::MessageListParams::Order } @@ -52,7 +52,7 @@ class MessageListParams < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # Sort order for messages by timestamp. Use `asc` for ascending order or `desc` - # for descending order. Defaults to `asc`. + # for descending order. Defaults to `asc`. module Order extend OpenAI::Internal::Type::Enum diff --git a/lib/openai/models/comparison_filter.rb b/lib/openai/models/comparison_filter.rb index 1c2b3a50..13134827 100644 --- a/lib/openai/models/comparison_filter.rb +++ b/lib/openai/models/comparison_filter.rb @@ -12,26 +12,26 @@ class ComparisonFilter < OpenAI::Internal::Type::BaseModel # @!attribute type # Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`. # - # - `eq`: equals - # - `ne`: not equal - # - `gt`: greater than - # - `gte`: greater than or equal - # - `lt`: less than - # - `lte`: less than or equal + # - `eq`: equals + # - `ne`: not equal + # - `gt`: greater than + # - `gte`: greater than or equal + # - `lt`: less than + # - `lte`: less than or equal # # @return [Symbol, OpenAI::Models::ComparisonFilter::Type] required :type, enum: -> { OpenAI::Models::ComparisonFilter::Type } # @!attribute value # The value to compare against the attribute key; supports string, number, or - # boolean types. + # boolean types. # # @return [String, Float, Boolean] required :value, union: -> { OpenAI::Models::ComparisonFilter::Value } # @!parse # # A filter used to compare a specified attribute key to a given value using a - # # defined comparison operation. + # # defined comparison operation. # # # # @param key [String] # # @param type [Symbol, OpenAI::Models::ComparisonFilter::Type] @@ -43,12 +43,12 @@ class ComparisonFilter < OpenAI::Internal::Type::BaseModel # Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`. # - # - `eq`: equals - # - `ne`: not equal - # - `gt`: greater than - # - `gte`: greater than or equal - # - `lt`: less than - # - `lte`: less than or equal + # - `eq`: equals + # - `ne`: not equal + # - `gt`: greater than + # - `gte`: greater than or equal + # - `lt`: less than + # - `lte`: less than or equal # # @see OpenAI::Models::ComparisonFilter#type module Type @@ -69,7 +69,7 @@ module Type end # The value to compare against the attribute key; supports string, number, or - # boolean types. + # boolean types. # # @see OpenAI::Models::ComparisonFilter#value module Value diff --git a/lib/openai/models/completion.rb b/lib/openai/models/completion.rb index 49f1d4c1..6062d60a 100644 --- a/lib/openai/models/completion.rb +++ b/lib/openai/models/completion.rb @@ -39,8 +39,8 @@ class Completion < OpenAI::Internal::Type::BaseModel # @!attribute [r] system_fingerprint # This fingerprint represents the backend configuration that the model runs with. # - # Can be used in conjunction with the `seed` request parameter to understand when - # backend changes have been made that might impact determinism. + # Can be used in conjunction with the `seed` request parameter to understand when + # backend changes have been made that might impact determinism. # # @return [String, nil] optional :system_fingerprint, String @@ -61,7 +61,7 @@ class Completion < OpenAI::Internal::Type::BaseModel # @!parse # # Represents a completion response from the API. Note: both the streamed and - # # non-streamed response objects share the same shape (unlike the chat endpoint). + # # non-streamed response objects share the same shape (unlike the chat endpoint). # # # # @param id [String] # # @param choices [Array] diff --git a/lib/openai/models/completion_choice.rb b/lib/openai/models/completion_choice.rb index 1a2084f4..07081468 100644 --- a/lib/openai/models/completion_choice.rb +++ b/lib/openai/models/completion_choice.rb @@ -5,9 +5,9 @@ module Models class CompletionChoice < OpenAI::Internal::Type::BaseModel # @!attribute finish_reason # The reason the model stopped generating tokens. This will be `stop` if the model - # hit a natural stop point or a provided stop sequence, `length` if the maximum - # number of tokens specified in the request was reached, or `content_filter` if - # content was omitted due to a flag from our content filters. + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, or `content_filter` if + # content was omitted due to a flag from our content filters. # # @return [Symbol, OpenAI::Models::CompletionChoice::FinishReason] required :finish_reason, enum: -> { OpenAI::Models::CompletionChoice::FinishReason } @@ -38,9 +38,9 @@ class CompletionChoice < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # The reason the model stopped generating tokens. This will be `stop` if the model - # hit a natural stop point or a provided stop sequence, `length` if the maximum - # number of tokens specified in the request was reached, or `content_filter` if - # content was omitted due to a flag from our content filters. + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, or `content_filter` if + # content was omitted due to a flag from our content filters. # # @see OpenAI::Models::CompletionChoice#finish_reason module FinishReason diff --git a/lib/openai/models/completion_create_params.rb b/lib/openai/models/completion_create_params.rb index c8920c94..a2931ac5 100644 --- a/lib/openai/models/completion_create_params.rb +++ b/lib/openai/models/completion_create_params.rb @@ -12,35 +12,35 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute model # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. # # @return [String, Symbol, OpenAI::Models::CompletionCreateParams::Model] required :model, union: -> { OpenAI::Models::CompletionCreateParams::Model } # @!attribute prompt # The prompt(s) to generate completions for, encoded as a string, array of - # strings, array of tokens, or array of token arrays. + # strings, array of tokens, or array of token arrays. # - # Note that <|endoftext|> is the document separator that the model sees during - # training, so if a prompt is not specified the model will generate as if from the - # beginning of a new document. + # Note that <|endoftext|> is the document separator that the model sees during + # training, so if a prompt is not specified the model will generate as if from the + # beginning of a new document. # # @return [String, Array, Array, Array>, nil] required :prompt, union: -> { OpenAI::Models::CompletionCreateParams::Prompt }, nil?: true # @!attribute best_of # Generates `best_of` completions server-side and returns the "best" (the one with - # the highest log probability per token). Results cannot be streamed. + # the highest log probability per token). Results cannot be streamed. # - # When used with `n`, `best_of` controls the number of candidate completions and - # `n` specifies how many to return – `best_of` must be greater than `n`. + # When used with `n`, `best_of` controls the number of candidate completions and + # `n` specifies how many to return – `best_of` must be greater than `n`. # - # **Note:** Because this parameter generates many completions, it can quickly - # consume your token quota. Use carefully and ensure that you have reasonable - # settings for `max_tokens` and `stop`. + # **Note:** Because this parameter generates many completions, it can quickly + # consume your token quota. Use carefully and ensure that you have reasonable + # settings for `max_tokens` and `stop`. # # @return [Integer, nil] optional :best_of, Integer, nil?: true @@ -53,10 +53,10 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute frequency_penalty # Number between -2.0 and 2.0. Positive values penalize new tokens based on their - # existing frequency in the text so far, decreasing the model's likelihood to - # repeat the same line verbatim. + # existing frequency in the text so far, decreasing the model's likelihood to + # repeat the same line verbatim. # - # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) + # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) # # @return [Float, nil] optional :frequency_penalty, Float, nil?: true @@ -64,39 +64,39 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute logit_bias # Modify the likelihood of specified tokens appearing in the completion. # - # Accepts a JSON object that maps tokens (specified by their token ID in the GPT - # tokenizer) to an associated bias value from -100 to 100. You can use this - # [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. - # Mathematically, the bias is added to the logits generated by the model prior to - # sampling. The exact effect will vary per model, but values between -1 and 1 - # should decrease or increase likelihood of selection; values like -100 or 100 - # should result in a ban or exclusive selection of the relevant token. + # Accepts a JSON object that maps tokens (specified by their token ID in the GPT + # tokenizer) to an associated bias value from -100 to 100. You can use this + # [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. + # Mathematically, the bias is added to the logits generated by the model prior to + # sampling. The exact effect will vary per model, but values between -1 and 1 + # should decrease or increase likelihood of selection; values like -100 or 100 + # should result in a ban or exclusive selection of the relevant token. # - # As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token - # from being generated. + # As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token + # from being generated. # # @return [Hash{Symbol=>Integer}, nil] optional :logit_bias, OpenAI::Internal::Type::HashOf[Integer], nil?: true # @!attribute logprobs # Include the log probabilities on the `logprobs` most likely output tokens, as - # well the chosen tokens. For example, if `logprobs` is 5, the API will return a - # list of the 5 most likely tokens. The API will always return the `logprob` of - # the sampled token, so there may be up to `logprobs+1` elements in the response. + # well the chosen tokens. For example, if `logprobs` is 5, the API will return a + # list of the 5 most likely tokens. The API will always return the `logprob` of + # the sampled token, so there may be up to `logprobs+1` elements in the response. # - # The maximum value for `logprobs` is 5. + # The maximum value for `logprobs` is 5. # # @return [Integer, nil] optional :logprobs, Integer, nil?: true # @!attribute max_tokens # The maximum number of [tokens](/tokenizer) that can be generated in the - # completion. + # completion. # - # The token count of your prompt plus `max_tokens` cannot exceed the model's - # context length. - # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - # for counting tokens. + # The token count of your prompt plus `max_tokens` cannot exceed the model's + # context length. + # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + # for counting tokens. # # @return [Integer, nil] optional :max_tokens, Integer, nil?: true @@ -104,37 +104,37 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute n # How many completions to generate for each prompt. # - # **Note:** Because this parameter generates many completions, it can quickly - # consume your token quota. Use carefully and ensure that you have reasonable - # settings for `max_tokens` and `stop`. + # **Note:** Because this parameter generates many completions, it can quickly + # consume your token quota. Use carefully and ensure that you have reasonable + # settings for `max_tokens` and `stop`. # # @return [Integer, nil] optional :n, Integer, nil?: true # @!attribute presence_penalty # Number between -2.0 and 2.0. Positive values penalize new tokens based on - # whether they appear in the text so far, increasing the model's likelihood to - # talk about new topics. + # whether they appear in the text so far, increasing the model's likelihood to + # talk about new topics. # - # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) + # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) # # @return [Float, nil] optional :presence_penalty, Float, nil?: true # @!attribute seed # If specified, our system will make a best effort to sample deterministically, - # such that repeated requests with the same `seed` and parameters should return - # the same result. + # such that repeated requests with the same `seed` and parameters should return + # the same result. # - # Determinism is not guaranteed, and you should refer to the `system_fingerprint` - # response parameter to monitor changes in the backend. + # Determinism is not guaranteed, and you should refer to the `system_fingerprint` + # response parameter to monitor changes in the backend. # # @return [Integer, nil] optional :seed, Integer, nil?: true # @!attribute stop # Up to 4 sequences where the API will stop generating further tokens. The - # returned text will not contain the stop sequence. + # returned text will not contain the stop sequence. # # @return [String, Array, nil] optional :stop, union: -> { OpenAI::Models::CompletionCreateParams::Stop }, nil?: true @@ -148,35 +148,35 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute suffix # The suffix that comes after a completion of inserted text. # - # This parameter is only supported for `gpt-3.5-turbo-instruct`. + # This parameter is only supported for `gpt-3.5-turbo-instruct`. # # @return [String, nil] optional :suffix, String, nil?: true # @!attribute temperature # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. # - # We generally recommend altering this or `top_p` but not both. + # We generally recommend altering this or `top_p` but not both. # # @return [Float, nil] optional :temperature, Float, nil?: true # @!attribute top_p # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or `temperature` but not both. + # We generally recommend altering this or `temperature` but not both. # # @return [Float, nil] optional :top_p, Float, nil?: true # @!attribute [r] user # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). # # @return [String, nil] optional :user, String @@ -232,10 +232,10 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. module Model extend OpenAI::Internal::Type::Union @@ -261,11 +261,11 @@ module Model end # The prompt(s) to generate completions for, encoded as a string, array of - # strings, array of tokens, or array of token arrays. + # strings, array of tokens, or array of token arrays. # - # Note that <|endoftext|> is the document separator that the model sees during - # training, so if a prompt is not specified the model will generate as if from the - # beginning of a new document. + # Note that <|endoftext|> is the document separator that the model sees during + # training, so if a prompt is not specified the model will generate as if from the + # beginning of a new document. module Prompt extend OpenAI::Internal::Type::Union @@ -289,7 +289,7 @@ module Prompt end # Up to 4 sequences where the API will stop generating further tokens. The - # returned text will not contain the stop sequence. + # returned text will not contain the stop sequence. module Stop extend OpenAI::Internal::Type::Union diff --git a/lib/openai/models/completion_usage.rb b/lib/openai/models/completion_usage.rb index bdcff97d..0f098720 100644 --- a/lib/openai/models/completion_usage.rb +++ b/lib/openai/models/completion_usage.rb @@ -67,7 +67,7 @@ class CompletionUsage < OpenAI::Internal::Type::BaseModel class CompletionTokensDetails < OpenAI::Internal::Type::BaseModel # @!attribute [r] accepted_prediction_tokens # When using Predicted Outputs, the number of tokens in the prediction that - # appeared in the completion. + # appeared in the completion. # # @return [Integer, nil] optional :accepted_prediction_tokens, Integer @@ -98,9 +98,9 @@ class CompletionTokensDetails < OpenAI::Internal::Type::BaseModel # @!attribute [r] rejected_prediction_tokens # When using Predicted Outputs, the number of tokens in the prediction that did - # not appear in the completion. However, like reasoning tokens, these tokens are - # still counted in the total completion tokens for purposes of billing, output, - # and context window limits. + # not appear in the completion. However, like reasoning tokens, these tokens are + # still counted in the total completion tokens for purposes of billing, output, + # and context window limits. # # @return [Integer, nil] optional :rejected_prediction_tokens, Integer diff --git a/lib/openai/models/compound_filter.rb b/lib/openai/models/compound_filter.rb index fa9859e2..57314aa0 100644 --- a/lib/openai/models/compound_filter.rb +++ b/lib/openai/models/compound_filter.rb @@ -5,7 +5,7 @@ module Models class CompoundFilter < OpenAI::Internal::Type::BaseModel # @!attribute filters # Array of filters to combine. Items can be `ComparisonFilter` or - # `CompoundFilter`. + # `CompoundFilter`. # # @return [Array] required :filters, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Models::CompoundFilter::Filter] } @@ -27,7 +27,7 @@ class CompoundFilter < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # A filter used to compare a specified attribute key to a given value using a - # defined comparison operation. + # defined comparison operation. module Filter extend OpenAI::Internal::Type::Union diff --git a/lib/openai/models/embedding.rb b/lib/openai/models/embedding.rb index d71f4752..75d1aebf 100644 --- a/lib/openai/models/embedding.rb +++ b/lib/openai/models/embedding.rb @@ -5,8 +5,8 @@ module Models class Embedding < OpenAI::Internal::Type::BaseModel # @!attribute embedding # The embedding vector, which is a list of floats. The length of vector depends on - # the model as listed in the - # [embedding guide](https://platform.openai.com/docs/guides/embeddings). + # the model as listed in the + # [embedding guide](https://platform.openai.com/docs/guides/embeddings). # # @return [Array] required :embedding, OpenAI::Internal::Type::ArrayOf[Float] diff --git a/lib/openai/models/embedding_create_params.rb b/lib/openai/models/embedding_create_params.rb index 4705cd60..3f26541e 100644 --- a/lib/openai/models/embedding_create_params.rb +++ b/lib/openai/models/embedding_create_params.rb @@ -10,30 +10,30 @@ class EmbeddingCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute input # Input text to embed, encoded as a string or array of tokens. To embed multiple - # inputs in a single request, pass an array of strings or array of token arrays. - # The input must not exceed the max input tokens for the model (8192 tokens for - # `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 - # dimensions or less. - # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - # for counting tokens. Some models may also impose a limit on total number of - # tokens summed across inputs. + # inputs in a single request, pass an array of strings or array of token arrays. + # The input must not exceed the max input tokens for the model (8192 tokens for + # `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 + # dimensions or less. + # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + # for counting tokens. Some models may also impose a limit on total number of + # tokens summed across inputs. # # @return [String, Array, Array, Array>] required :input, union: -> { OpenAI::Models::EmbeddingCreateParams::Input } # @!attribute model # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. # # @return [String, Symbol, OpenAI::Models::EmbeddingModel] required :model, union: -> { OpenAI::Models::EmbeddingCreateParams::Model } # @!attribute [r] dimensions # The number of dimensions the resulting output embeddings should have. Only - # supported in `text-embedding-3` and later models. + # supported in `text-embedding-3` and later models. # # @return [Integer, nil] optional :dimensions, Integer @@ -44,7 +44,7 @@ class EmbeddingCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] encoding_format # The format to return the embeddings in. Can be either `float` or - # [`base64`](https://pypi.org/project/pybase64/). + # [`base64`](https://pypi.org/project/pybase64/). # # @return [Symbol, OpenAI::Models::EmbeddingCreateParams::EncodingFormat, nil] optional :encoding_format, enum: -> { OpenAI::Models::EmbeddingCreateParams::EncodingFormat } @@ -55,8 +55,8 @@ class EmbeddingCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] user # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). # # @return [String, nil] optional :user, String @@ -78,13 +78,13 @@ class EmbeddingCreateParams < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # Input text to embed, encoded as a string or array of tokens. To embed multiple - # inputs in a single request, pass an array of strings or array of token arrays. - # The input must not exceed the max input tokens for the model (8192 tokens for - # `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 - # dimensions or less. - # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - # for counting tokens. Some models may also impose a limit on total number of - # tokens summed across inputs. + # inputs in a single request, pass an array of strings or array of token arrays. + # The input must not exceed the max input tokens for the model (8192 tokens for + # `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 + # dimensions or less. + # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + # for counting tokens. Some models may also impose a limit on total number of + # tokens summed across inputs. module Input extend OpenAI::Internal::Type::Union @@ -112,10 +112,10 @@ module Input end # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. module Model extend OpenAI::Internal::Type::Union @@ -130,7 +130,7 @@ module Model end # The format to return the embeddings in. Can be either `float` or - # [`base64`](https://pypi.org/project/pybase64/). + # [`base64`](https://pypi.org/project/pybase64/). module EncodingFormat extend OpenAI::Internal::Type::Enum diff --git a/lib/openai/models/file_chunking_strategy_param.rb b/lib/openai/models/file_chunking_strategy_param.rb index 6652fa42..8a671209 100644 --- a/lib/openai/models/file_chunking_strategy_param.rb +++ b/lib/openai/models/file_chunking_strategy_param.rb @@ -3,7 +3,7 @@ module OpenAI module Models # The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. Only applicable if `file_ids` is non-empty. + # strategy. Only applicable if `file_ids` is non-empty. module FileChunkingStrategyParam extend OpenAI::Internal::Type::Union diff --git a/lib/openai/models/file_create_params.rb b/lib/openai/models/file_create_params.rb index 43683821..a3bc12dd 100644 --- a/lib/openai/models/file_create_params.rb +++ b/lib/openai/models/file_create_params.rb @@ -16,9 +16,9 @@ class FileCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute purpose # The intended purpose of the uploaded file. One of: - `assistants`: Used in the - # Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for - # fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: - # Flexible file type for any purpose - `evals`: Used for eval data sets + # Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for + # fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: + # Flexible file type for any purpose - `evals`: Used for eval data sets # # @return [Symbol, OpenAI::Models::FilePurpose] required :purpose, enum: -> { OpenAI::Models::FilePurpose } diff --git a/lib/openai/models/file_list_params.rb b/lib/openai/models/file_list_params.rb index d5a7bce4..ccc569bf 100644 --- a/lib/openai/models/file_list_params.rb +++ b/lib/openai/models/file_list_params.rb @@ -10,9 +10,9 @@ class FileListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] after # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. # # @return [String, nil] optional :after, String @@ -23,7 +23,7 @@ class FileListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] limit # A limit on the number of objects to be returned. Limit can range between 1 and - # 10,000, and the default is 10,000. + # 10,000, and the default is 10,000. # # @return [Integer, nil] optional :limit, Integer @@ -34,7 +34,7 @@ class FileListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] order # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. # # @return [Symbol, OpenAI::Models::FileListParams::Order, nil] optional :order, enum: -> { OpenAI::Models::FileListParams::Order } @@ -65,7 +65,7 @@ class FileListParams < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. module Order extend OpenAI::Internal::Type::Enum diff --git a/lib/openai/models/file_object.rb b/lib/openai/models/file_object.rb index d25bcfa7..d694613d 100644 --- a/lib/openai/models/file_object.rb +++ b/lib/openai/models/file_object.rb @@ -36,15 +36,15 @@ class FileObject < OpenAI::Internal::Type::BaseModel # @!attribute purpose # The intended purpose of the file. Supported values are `assistants`, - # `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` - # and `vision`. + # `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` + # and `vision`. # # @return [Symbol, OpenAI::Models::FileObject::Purpose] required :purpose, enum: -> { OpenAI::Models::FileObject::Purpose } # @!attribute status # Deprecated. The current status of the file, which can be either `uploaded`, - # `processed`, or `error`. + # `processed`, or `error`. # # @return [Symbol, OpenAI::Models::FileObject::Status] required :status, enum: -> { OpenAI::Models::FileObject::Status } @@ -61,7 +61,7 @@ class FileObject < OpenAI::Internal::Type::BaseModel # @!attribute [r] status_details # Deprecated. For details on why a fine-tuning training file failed validation, - # see the `error` field on `fine_tuning.job`. + # see the `error` field on `fine_tuning.job`. # # @return [String, nil] optional :status_details, String @@ -101,8 +101,8 @@ class FileObject < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # The intended purpose of the file. Supported values are `assistants`, - # `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` - # and `vision`. + # `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` + # and `vision`. # # @see OpenAI::Models::FileObject#purpose module Purpose @@ -126,7 +126,7 @@ module Purpose # @deprecated # # Deprecated. The current status of the file, which can be either `uploaded`, - # `processed`, or `error`. + # `processed`, or `error`. # # @see OpenAI::Models::FileObject#status module Status diff --git a/lib/openai/models/file_purpose.rb b/lib/openai/models/file_purpose.rb index 3b9a9976..c11caef0 100644 --- a/lib/openai/models/file_purpose.rb +++ b/lib/openai/models/file_purpose.rb @@ -3,9 +3,9 @@ module OpenAI module Models # The intended purpose of the uploaded file. One of: - `assistants`: Used in the - # Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for - # fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: - # Flexible file type for any purpose - `evals`: Used for eval data sets + # Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for + # fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: + # Flexible file type for any purpose - `evals`: Used for eval data sets module FilePurpose extend OpenAI::Internal::Type::Enum diff --git a/lib/openai/models/fine_tuning/fine_tuning_job.rb b/lib/openai/models/fine_tuning/fine_tuning_job.rb index 9b72f23f..b92146f5 100644 --- a/lib/openai/models/fine_tuning/fine_tuning_job.rb +++ b/lib/openai/models/fine_tuning/fine_tuning_job.rb @@ -19,28 +19,28 @@ class FineTuningJob < OpenAI::Internal::Type::BaseModel # @!attribute error # For fine-tuning jobs that have `failed`, this will contain more information on - # the cause of the failure. + # the cause of the failure. # # @return [OpenAI::Models::FineTuning::FineTuningJob::Error, nil] required :error, -> { OpenAI::Models::FineTuning::FineTuningJob::Error }, nil?: true # @!attribute fine_tuned_model # The name of the fine-tuned model that is being created. The value will be null - # if the fine-tuning job is still running. + # if the fine-tuning job is still running. # # @return [String, nil] required :fine_tuned_model, String, nil?: true # @!attribute finished_at # The Unix timestamp (in seconds) for when the fine-tuning job was finished. The - # value will be null if the fine-tuning job is still running. + # value will be null if the fine-tuning job is still running. # # @return [Integer, nil] required :finished_at, Integer, nil?: true # @!attribute hyperparameters # The hyperparameters used for the fine-tuning job. This value will only be - # returned when running `supervised` jobs. + # returned when running `supervised` jobs. # # @return [OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters] required :hyperparameters, -> { OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters } @@ -65,8 +65,8 @@ class FineTuningJob < OpenAI::Internal::Type::BaseModel # @!attribute result_files # The compiled results file ID(s) for the fine-tuning job. You can retrieve the - # results with the - # [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + # results with the + # [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). # # @return [Array] required :result_files, OpenAI::Internal::Type::ArrayOf[String] @@ -79,36 +79,36 @@ class FineTuningJob < OpenAI::Internal::Type::BaseModel # @!attribute status # The current status of the fine-tuning job, which can be either - # `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. + # `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. # # @return [Symbol, OpenAI::Models::FineTuning::FineTuningJob::Status] required :status, enum: -> { OpenAI::Models::FineTuning::FineTuningJob::Status } # @!attribute trained_tokens # The total number of billable tokens processed by this fine-tuning job. The value - # will be null if the fine-tuning job is still running. + # will be null if the fine-tuning job is still running. # # @return [Integer, nil] required :trained_tokens, Integer, nil?: true # @!attribute training_file # The file ID used for training. You can retrieve the training data with the - # [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + # [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). # # @return [String] required :training_file, String # @!attribute validation_file # The file ID used for validation. You can retrieve the validation results with - # the - # [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + # the + # [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). # # @return [String, nil] required :validation_file, String, nil?: true # @!attribute estimated_finish # The Unix timestamp (in seconds) for when the fine-tuning job is estimated to - # finish. The value will be null if the fine-tuning job is not running. + # finish. The value will be null if the fine-tuning job is not running. # # @return [Integer, nil] optional :estimated_finish, Integer, nil?: true @@ -123,11 +123,11 @@ class FineTuningJob < OpenAI::Internal::Type::BaseModel # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true @@ -144,7 +144,7 @@ class FineTuningJob < OpenAI::Internal::Type::BaseModel # @!parse # # The `fine_tuning.job` object represents a fine-tuning job that has been created - # # through the API. + # # through the API. # # # # @param id [String] # # @param created_at [Integer] @@ -209,14 +209,14 @@ class Error < OpenAI::Internal::Type::BaseModel # @!attribute param # The parameter that was invalid, usually `training_file` or `validation_file`. - # This field will be null if the failure was not parameter-specific. + # This field will be null if the failure was not parameter-specific. # # @return [String, nil] required :param, String, nil?: true # @!parse # # For fine-tuning jobs that have `failed`, this will contain more information on - # # the cause of the failure. + # # the cause of the failure. # # # # @param code [String] # # @param message [String] @@ -231,7 +231,7 @@ class Error < OpenAI::Internal::Type::BaseModel class Hyperparameters < OpenAI::Internal::Type::BaseModel # @!attribute [r] batch_size # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. + # parameters are updated less frequently, but with lower variance. # # @return [Symbol, :auto, Integer, nil] optional :batch_size, union: -> { OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters::BatchSize } @@ -242,7 +242,7 @@ class Hyperparameters < OpenAI::Internal::Type::BaseModel # @!attribute [r] learning_rate_multiplier # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. + # avoid overfitting. # # @return [Symbol, :auto, Float, nil] optional :learning_rate_multiplier, @@ -254,7 +254,7 @@ class Hyperparameters < OpenAI::Internal::Type::BaseModel # @!attribute [r] n_epochs # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. + # through the training dataset. # # @return [Symbol, :auto, Integer, nil] optional :n_epochs, union: -> { OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters::NEpochs } @@ -265,7 +265,7 @@ class Hyperparameters < OpenAI::Internal::Type::BaseModel # @!parse # # The hyperparameters used for the fine-tuning job. This value will only be - # # returned when running `supervised` jobs. + # # returned when running `supervised` jobs. # # # # @param batch_size [Symbol, :auto, Integer] # # @param learning_rate_multiplier [Symbol, :auto, Float] @@ -276,7 +276,7 @@ class Hyperparameters < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. + # parameters are updated less frequently, but with lower variance. # # @see OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters#batch_size module BatchSize @@ -292,7 +292,7 @@ module BatchSize end # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. + # avoid overfitting. # # @see OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters#learning_rate_multiplier module LearningRateMultiplier @@ -308,7 +308,7 @@ module LearningRateMultiplier end # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. + # through the training dataset. # # @see OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters#n_epochs module NEpochs @@ -325,7 +325,7 @@ module NEpochs end # The current status of the fine-tuning job, which can be either - # `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. + # `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. # # @see OpenAI::Models::FineTuning::FineTuningJob#status module Status @@ -413,7 +413,7 @@ class Dpo < OpenAI::Internal::Type::BaseModel class Hyperparameters < OpenAI::Internal::Type::BaseModel # @!attribute [r] batch_size # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. + # parameters are updated less frequently, but with lower variance. # # @return [Symbol, :auto, Integer, nil] optional :batch_size, @@ -425,7 +425,7 @@ class Hyperparameters < OpenAI::Internal::Type::BaseModel # @!attribute [r] beta # The beta value for the DPO method. A higher beta value will increase the weight - # of the penalty between the policy and reference model. + # of the penalty between the policy and reference model. # # @return [Symbol, :auto, Float, nil] optional :beta, @@ -437,7 +437,7 @@ class Hyperparameters < OpenAI::Internal::Type::BaseModel # @!attribute [r] learning_rate_multiplier # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. + # avoid overfitting. # # @return [Symbol, :auto, Float, nil] optional :learning_rate_multiplier, @@ -449,7 +449,7 @@ class Hyperparameters < OpenAI::Internal::Type::BaseModel # @!attribute [r] n_epochs # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. + # through the training dataset. # # @return [Symbol, :auto, Integer, nil] optional :n_epochs, @@ -472,7 +472,7 @@ class Hyperparameters < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. + # parameters are updated less frequently, but with lower variance. # # @see OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters#batch_size module BatchSize @@ -488,7 +488,7 @@ module BatchSize end # The beta value for the DPO method. A higher beta value will increase the weight - # of the penalty between the policy and reference model. + # of the penalty between the policy and reference model. # # @see OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters#beta module Beta @@ -504,7 +504,7 @@ module Beta end # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. + # avoid overfitting. # # @see OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters#learning_rate_multiplier module LearningRateMultiplier @@ -520,7 +520,7 @@ module LearningRateMultiplier end # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. + # through the training dataset. # # @see OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters#n_epochs module NEpochs @@ -563,7 +563,7 @@ class Supervised < OpenAI::Internal::Type::BaseModel class Hyperparameters < OpenAI::Internal::Type::BaseModel # @!attribute [r] batch_size # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. + # parameters are updated less frequently, but with lower variance. # # @return [Symbol, :auto, Integer, nil] optional :batch_size, @@ -575,7 +575,7 @@ class Hyperparameters < OpenAI::Internal::Type::BaseModel # @!attribute [r] learning_rate_multiplier # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. + # avoid overfitting. # # @return [Symbol, :auto, Float, nil] optional :learning_rate_multiplier, @@ -587,7 +587,7 @@ class Hyperparameters < OpenAI::Internal::Type::BaseModel # @!attribute [r] n_epochs # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. + # through the training dataset. # # @return [Symbol, :auto, Integer, nil] optional :n_epochs, @@ -609,7 +609,7 @@ class Hyperparameters < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. + # parameters are updated less frequently, but with lower variance. # # @see OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters#batch_size module BatchSize @@ -625,7 +625,7 @@ module BatchSize end # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. + # avoid overfitting. # # @see OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters#learning_rate_multiplier module LearningRateMultiplier @@ -641,7 +641,7 @@ module LearningRateMultiplier end # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. + # through the training dataset. # # @see OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters#n_epochs module NEpochs diff --git a/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration.rb b/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration.rb index 5b1dc8c8..603de792 100644 --- a/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration.rb +++ b/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration.rb @@ -12,23 +12,23 @@ class FineTuningJobWandbIntegration < OpenAI::Internal::Type::BaseModel # @!attribute entity # The entity to use for the run. This allows you to set the team or username of - # the WandB user that you would like associated with the run. If not set, the - # default entity for the registered WandB API key is used. + # the WandB user that you would like associated with the run. If not set, the + # default entity for the registered WandB API key is used. # # @return [String, nil] optional :entity, String, nil?: true # @!attribute name # A display name to set for the run. If not set, we will use the Job ID as the - # name. + # name. # # @return [String, nil] optional :name, String, nil?: true # @!attribute [r] tags # A list of tags to be attached to the newly created run. These tags are passed - # through directly to WandB. Some default tags are generated by OpenAI: - # "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". + # through directly to WandB. Some default tags are generated by OpenAI: + # "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". # # @return [Array, nil] optional :tags, OpenAI::Internal::Type::ArrayOf[String] @@ -39,9 +39,9 @@ class FineTuningJobWandbIntegration < OpenAI::Internal::Type::BaseModel # @!parse # # The settings for your integration with Weights and Biases. This payload - # # specifies the project that metrics will be sent to. Optionally, you can set an - # # explicit display name for your run, add tags to your run, and set a default - # # entity (team, username, etc) to be associated with your run. + # # specifies the project that metrics will be sent to. Optionally, you can set an + # # explicit display name for your run, add tags to your run, and set a default + # # entity (team, username, etc) to be associated with your run. # # # # @param project [String] # # @param entity [String, nil] diff --git a/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration_object.rb b/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration_object.rb index 05920c94..3554c944 100644 --- a/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration_object.rb +++ b/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration_object.rb @@ -12,9 +12,9 @@ class FineTuningJobWandbIntegrationObject < OpenAI::Internal::Type::BaseModel # @!attribute wandb # The settings for your integration with Weights and Biases. This payload - # specifies the project that metrics will be sent to. Optionally, you can set an - # explicit display name for your run, add tags to your run, and set a default - # entity (team, username, etc) to be associated with your run. + # specifies the project that metrics will be sent to. Optionally, you can set an + # explicit display name for your run, add tags to your run, and set a default + # entity (team, username, etc) to be associated with your run. # # @return [OpenAI::Models::FineTuning::FineTuningJobWandbIntegration] required :wandb, -> { OpenAI::Models::FineTuning::FineTuningJobWandbIntegration } diff --git a/lib/openai/models/fine_tuning/job_create_params.rb b/lib/openai/models/fine_tuning/job_create_params.rb index 89dcb87d..df3de345 100644 --- a/lib/openai/models/fine_tuning/job_create_params.rb +++ b/lib/openai/models/fine_tuning/job_create_params.rb @@ -11,7 +11,7 @@ class JobCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute model # The name of the model to fine-tune. You can select one of the - # [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). + # [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). # # @return [String, Symbol, OpenAI::Models::FineTuning::JobCreateParams::Model] required :model, union: -> { OpenAI::Models::FineTuning::JobCreateParams::Model } @@ -19,28 +19,28 @@ class JobCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute training_file # The ID of an uploaded file that contains training data. # - # See [upload file](https://platform.openai.com/docs/api-reference/files/create) - # for how to upload a file. + # See [upload file](https://platform.openai.com/docs/api-reference/files/create) + # for how to upload a file. # - # Your dataset must be formatted as a JSONL file. Additionally, you must upload - # your file with the purpose `fine-tune`. + # Your dataset must be formatted as a JSONL file. Additionally, you must upload + # your file with the purpose `fine-tune`. # - # The contents of the file should differ depending on if the model uses the - # [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input), - # [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) - # format, or if the fine-tuning method uses the - # [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) - # format. + # The contents of the file should differ depending on if the model uses the + # [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input), + # [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + # format, or if the fine-tuning method uses the + # [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) + # format. # - # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) - # for more details. + # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) + # for more details. # # @return [String] required :training_file, String # @!attribute [r] hyperparameters # The hyperparameters used for the fine-tuning job. This value is now deprecated - # in favor of `method`, and should be passed in under the `method` parameter. + # in favor of `method`, and should be passed in under the `method` parameter. # # @return [OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters, nil] optional :hyperparameters, -> { OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters } @@ -59,11 +59,11 @@ class JobCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true @@ -80,18 +80,18 @@ class JobCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute seed # The seed controls the reproducibility of the job. Passing in the same seed and - # job parameters should produce the same results, but may differ in rare cases. If - # a seed is not specified, one will be generated for you. + # job parameters should produce the same results, but may differ in rare cases. If + # a seed is not specified, one will be generated for you. # # @return [Integer, nil] optional :seed, Integer, nil?: true # @!attribute suffix # A string of up to 64 characters that will be added to your fine-tuned model - # name. + # name. # - # For example, a `suffix` of "custom-model-name" would produce a model name like - # `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. + # For example, a `suffix` of "custom-model-name" would produce a model name like + # `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. # # @return [String, nil] optional :suffix, String, nil?: true @@ -99,16 +99,16 @@ class JobCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute validation_file # The ID of an uploaded file that contains validation data. # - # If you provide this file, the data is used to generate validation metrics - # periodically during fine-tuning. These metrics can be viewed in the fine-tuning - # results file. The same data should not be present in both train and validation - # files. + # If you provide this file, the data is used to generate validation metrics + # periodically during fine-tuning. These metrics can be viewed in the fine-tuning + # results file. The same data should not be present in both train and validation + # files. # - # Your dataset must be formatted as a JSONL file. You must upload your file with - # the purpose `fine-tune`. + # Your dataset must be formatted as a JSONL file. You must upload your file with + # the purpose `fine-tune`. # - # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) - # for more details. + # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) + # for more details. # # @return [String, nil] optional :validation_file, String, nil?: true @@ -144,7 +144,7 @@ class JobCreateParams < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # The name of the model to fine-tune. You can select one of the - # [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). + # [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). module Model extend OpenAI::Internal::Type::Union @@ -176,7 +176,7 @@ module Model class Hyperparameters < OpenAI::Internal::Type::BaseModel # @!attribute [r] batch_size # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. + # parameters are updated less frequently, but with lower variance. # # @return [Symbol, :auto, Integer, nil] optional :batch_size, @@ -188,7 +188,7 @@ class Hyperparameters < OpenAI::Internal::Type::BaseModel # @!attribute [r] learning_rate_multiplier # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. + # avoid overfitting. # # @return [Symbol, :auto, Float, nil] optional :learning_rate_multiplier, @@ -200,7 +200,7 @@ class Hyperparameters < OpenAI::Internal::Type::BaseModel # @!attribute [r] n_epochs # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. + # through the training dataset. # # @return [Symbol, :auto, Integer, nil] optional :n_epochs, union: -> { OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters::NEpochs } @@ -211,7 +211,7 @@ class Hyperparameters < OpenAI::Internal::Type::BaseModel # @!parse # # The hyperparameters used for the fine-tuning job. This value is now deprecated - # # in favor of `method`, and should be passed in under the `method` parameter. + # # in favor of `method`, and should be passed in under the `method` parameter. # # # # @param batch_size [Symbol, :auto, Integer] # # @param learning_rate_multiplier [Symbol, :auto, Float] @@ -222,7 +222,7 @@ class Hyperparameters < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. + # parameters are updated less frequently, but with lower variance. # # @see OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters#batch_size module BatchSize @@ -238,7 +238,7 @@ module BatchSize end # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. + # avoid overfitting. # # @see OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters#learning_rate_multiplier module LearningRateMultiplier @@ -254,7 +254,7 @@ module LearningRateMultiplier end # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. + # through the training dataset. # # @see OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters#n_epochs module NEpochs @@ -273,16 +273,16 @@ module NEpochs class Integration < OpenAI::Internal::Type::BaseModel # @!attribute type # The type of integration to enable. Currently, only "wandb" (Weights and Biases) - # is supported. + # is supported. # # @return [Symbol, :wandb] required :type, const: :wandb # @!attribute wandb # The settings for your integration with Weights and Biases. This payload - # specifies the project that metrics will be sent to. Optionally, you can set an - # explicit display name for your run, add tags to your run, and set a default - # entity (team, username, etc) to be associated with your run. + # specifies the project that metrics will be sent to. Optionally, you can set an + # explicit display name for your run, add tags to your run, and set a default + # entity (team, username, etc) to be associated with your run. # # @return [OpenAI::Models::FineTuning::JobCreateParams::Integration::Wandb] required :wandb, -> { OpenAI::Models::FineTuning::JobCreateParams::Integration::Wandb } @@ -305,23 +305,23 @@ class Wandb < OpenAI::Internal::Type::BaseModel # @!attribute entity # The entity to use for the run. This allows you to set the team or username of - # the WandB user that you would like associated with the run. If not set, the - # default entity for the registered WandB API key is used. + # the WandB user that you would like associated with the run. If not set, the + # default entity for the registered WandB API key is used. # # @return [String, nil] optional :entity, String, nil?: true # @!attribute name # A display name to set for the run. If not set, we will use the Job ID as the - # name. + # name. # # @return [String, nil] optional :name, String, nil?: true # @!attribute [r] tags # A list of tags to be attached to the newly created run. These tags are passed - # through directly to WandB. Some default tags are generated by OpenAI: - # "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". + # through directly to WandB. Some default tags are generated by OpenAI: + # "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". # # @return [Array, nil] optional :tags, OpenAI::Internal::Type::ArrayOf[String] @@ -332,9 +332,9 @@ class Wandb < OpenAI::Internal::Type::BaseModel # @!parse # # The settings for your integration with Weights and Biases. This payload - # # specifies the project that metrics will be sent to. Optionally, you can set an - # # explicit display name for your run, add tags to your run, and set a default - # # entity (team, username, etc) to be associated with your run. + # # specifies the project that metrics will be sent to. Optionally, you can set an + # # explicit display name for your run, add tags to your run, and set a default + # # entity (team, username, etc) to be associated with your run. # # # # @param project [String] # # @param entity [String, nil] @@ -415,7 +415,7 @@ class Dpo < OpenAI::Internal::Type::BaseModel class Hyperparameters < OpenAI::Internal::Type::BaseModel # @!attribute [r] batch_size # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. + # parameters are updated less frequently, but with lower variance. # # @return [Symbol, :auto, Integer, nil] optional :batch_size, @@ -427,7 +427,7 @@ class Hyperparameters < OpenAI::Internal::Type::BaseModel # @!attribute [r] beta # The beta value for the DPO method. A higher beta value will increase the weight - # of the penalty between the policy and reference model. + # of the penalty between the policy and reference model. # # @return [Symbol, :auto, Float, nil] optional :beta, @@ -439,7 +439,7 @@ class Hyperparameters < OpenAI::Internal::Type::BaseModel # @!attribute [r] learning_rate_multiplier # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. + # avoid overfitting. # # @return [Symbol, :auto, Float, nil] optional :learning_rate_multiplier, @@ -451,7 +451,7 @@ class Hyperparameters < OpenAI::Internal::Type::BaseModel # @!attribute [r] n_epochs # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. + # through the training dataset. # # @return [Symbol, :auto, Integer, nil] optional :n_epochs, @@ -474,7 +474,7 @@ class Hyperparameters < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. + # parameters are updated less frequently, but with lower variance. # # @see OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters#batch_size module BatchSize @@ -490,7 +490,7 @@ module BatchSize end # The beta value for the DPO method. A higher beta value will increase the weight - # of the penalty between the policy and reference model. + # of the penalty between the policy and reference model. # # @see OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters#beta module Beta @@ -506,7 +506,7 @@ module Beta end # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. + # avoid overfitting. # # @see OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters#learning_rate_multiplier module LearningRateMultiplier @@ -522,7 +522,7 @@ module LearningRateMultiplier end # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. + # through the training dataset. # # @see OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters#n_epochs module NEpochs @@ -565,7 +565,7 @@ class Supervised < OpenAI::Internal::Type::BaseModel class Hyperparameters < OpenAI::Internal::Type::BaseModel # @!attribute [r] batch_size # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. + # parameters are updated less frequently, but with lower variance. # # @return [Symbol, :auto, Integer, nil] optional :batch_size, @@ -577,7 +577,7 @@ class Hyperparameters < OpenAI::Internal::Type::BaseModel # @!attribute [r] learning_rate_multiplier # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. + # avoid overfitting. # # @return [Symbol, :auto, Float, nil] optional :learning_rate_multiplier, @@ -589,7 +589,7 @@ class Hyperparameters < OpenAI::Internal::Type::BaseModel # @!attribute [r] n_epochs # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. + # through the training dataset. # # @return [Symbol, :auto, Integer, nil] optional :n_epochs, @@ -611,7 +611,7 @@ class Hyperparameters < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. + # parameters are updated less frequently, but with lower variance. # # @see OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters#batch_size module BatchSize @@ -627,7 +627,7 @@ module BatchSize end # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. + # avoid overfitting. # # @see OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters#learning_rate_multiplier module LearningRateMultiplier @@ -643,7 +643,7 @@ module LearningRateMultiplier end # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. + # through the training dataset. # # @see OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters#n_epochs module NEpochs diff --git a/lib/openai/models/fine_tuning/job_list_params.rb b/lib/openai/models/fine_tuning/job_list_params.rb index e0ffb31f..1ca2c3a3 100644 --- a/lib/openai/models/fine_tuning/job_list_params.rb +++ b/lib/openai/models/fine_tuning/job_list_params.rb @@ -31,7 +31,7 @@ class JobListParams < OpenAI::Internal::Type::BaseModel # @!attribute metadata # Optional metadata filter. To filter, use the syntax `metadata[k]=v`. - # Alternatively, set `metadata=null` to indicate no metadata. + # Alternatively, set `metadata=null` to indicate no metadata. # # @return [Hash{Symbol=>String}, nil] optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true diff --git a/lib/openai/models/fine_tuning/jobs/fine_tuning_job_checkpoint.rb b/lib/openai/models/fine_tuning/jobs/fine_tuning_job_checkpoint.rb index 0af58cbe..e32a2926 100644 --- a/lib/openai/models/fine_tuning/jobs/fine_tuning_job_checkpoint.rb +++ b/lib/openai/models/fine_tuning/jobs/fine_tuning_job_checkpoint.rb @@ -50,7 +50,7 @@ class FineTuningJobCheckpoint < OpenAI::Internal::Type::BaseModel # @!parse # # The `fine_tuning.job.checkpoint` object represents a model checkpoint for a - # # fine-tuning job that is ready to use. + # # fine-tuning job that is ready to use. # # # # @param id [String] # # @param created_at [Integer] diff --git a/lib/openai/models/function_definition.rb b/lib/openai/models/function_definition.rb index e024d931..7bd7f7d5 100644 --- a/lib/openai/models/function_definition.rb +++ b/lib/openai/models/function_definition.rb @@ -5,14 +5,14 @@ module Models class FunctionDefinition < OpenAI::Internal::Type::BaseModel # @!attribute name # The name of the function to be called. Must be a-z, A-Z, 0-9, or contain - # underscores and dashes, with a maximum length of 64. + # underscores and dashes, with a maximum length of 64. # # @return [String] required :name, String # @!attribute [r] description # A description of what the function does, used by the model to choose when and - # how to call the function. + # how to call the function. # # @return [String, nil] optional :description, String @@ -23,12 +23,12 @@ class FunctionDefinition < OpenAI::Internal::Type::BaseModel # @!attribute [r] parameters # The parameters the functions accepts, described as a JSON Schema object. See the - # [guide](https://platform.openai.com/docs/guides/function-calling) for examples, - # and the - # [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for - # documentation about the format. + # [guide](https://platform.openai.com/docs/guides/function-calling) for examples, + # and the + # [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for + # documentation about the format. # - # Omitting `parameters` defines a function with an empty parameter list. + # Omitting `parameters` defines a function with an empty parameter list. # # @return [Hash{Symbol=>Object}, nil] optional :parameters, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] @@ -39,10 +39,10 @@ class FunctionDefinition < OpenAI::Internal::Type::BaseModel # @!attribute strict # Whether to enable strict schema adherence when generating the function call. If - # set to true, the model will follow the exact schema defined in the `parameters` - # field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn - # more about Structured Outputs in the - # [function calling guide](docs/guides/function-calling). + # set to true, the model will follow the exact schema defined in the `parameters` + # field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn + # more about Structured Outputs in the + # [function calling guide](docs/guides/function-calling). # # @return [Boolean, nil] optional :strict, OpenAI::Internal::Type::Boolean, nil?: true diff --git a/lib/openai/models/image.rb b/lib/openai/models/image.rb index e3842efd..dddf163d 100644 --- a/lib/openai/models/image.rb +++ b/lib/openai/models/image.rb @@ -5,7 +5,7 @@ module Models class Image < OpenAI::Internal::Type::BaseModel # @!attribute [r] b64_json # The base64-encoded JSON of the generated image, if `response_format` is - # `b64_json`. + # `b64_json`. # # @return [String, nil] optional :b64_json, String @@ -16,7 +16,7 @@ class Image < OpenAI::Internal::Type::BaseModel # @!attribute [r] revised_prompt # The prompt that was used to generate the image, if there was any revision to the - # prompt. + # prompt. # # @return [String, nil] optional :revised_prompt, String diff --git a/lib/openai/models/image_create_variation_params.rb b/lib/openai/models/image_create_variation_params.rb index 06bd032f..a24a9d51 100644 --- a/lib/openai/models/image_create_variation_params.rb +++ b/lib/openai/models/image_create_variation_params.rb @@ -10,29 +10,29 @@ class ImageCreateVariationParams < OpenAI::Internal::Type::BaseModel # @!attribute image # The image to use as the basis for the variation(s). Must be a valid PNG file, - # less than 4MB, and square. + # less than 4MB, and square. # # @return [IO, StringIO] required :image, IO # @!attribute model # The model to use for image generation. Only `dall-e-2` is supported at this - # time. + # time. # # @return [String, Symbol, OpenAI::Models::ImageModel, nil] optional :model, union: -> { OpenAI::Models::ImageCreateVariationParams::Model }, nil?: true # @!attribute n # The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only - # `n=1` is supported. + # `n=1` is supported. # # @return [Integer, nil] optional :n, Integer, nil?: true # @!attribute response_format # The format in which the generated images are returned. Must be one of `url` or - # `b64_json`. URLs are only valid for 60 minutes after the image has been - # generated. + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. # # @return [Symbol, OpenAI::Models::ImageCreateVariationParams::ResponseFormat, nil] optional :response_format, @@ -41,15 +41,15 @@ class ImageCreateVariationParams < OpenAI::Internal::Type::BaseModel # @!attribute size # The size of the generated images. Must be one of `256x256`, `512x512`, or - # `1024x1024`. + # `1024x1024`. # # @return [Symbol, OpenAI::Models::ImageCreateVariationParams::Size, nil] optional :size, enum: -> { OpenAI::Models::ImageCreateVariationParams::Size }, nil?: true # @!attribute [r] user # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). # # @return [String, nil] optional :user, String @@ -72,7 +72,7 @@ class ImageCreateVariationParams < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # The model to use for image generation. Only `dall-e-2` is supported at this - # time. + # time. module Model extend OpenAI::Internal::Type::Union @@ -87,8 +87,8 @@ module Model end # The format in which the generated images are returned. Must be one of `url` or - # `b64_json`. URLs are only valid for 60 minutes after the image has been - # generated. + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. module ResponseFormat extend OpenAI::Internal::Type::Enum @@ -103,7 +103,7 @@ module ResponseFormat end # The size of the generated images. Must be one of `256x256`, `512x512`, or - # `1024x1024`. + # `1024x1024`. module Size extend OpenAI::Internal::Type::Enum diff --git a/lib/openai/models/image_edit_params.rb b/lib/openai/models/image_edit_params.rb index 43a5120c..0a0c78c4 100644 --- a/lib/openai/models/image_edit_params.rb +++ b/lib/openai/models/image_edit_params.rb @@ -10,22 +10,22 @@ class ImageEditParams < OpenAI::Internal::Type::BaseModel # @!attribute image # The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask - # is not provided, image must have transparency, which will be used as the mask. + # is not provided, image must have transparency, which will be used as the mask. # # @return [IO, StringIO] required :image, IO # @!attribute prompt # A text description of the desired image(s). The maximum length is 1000 - # characters. + # characters. # # @return [String] required :prompt, String # @!attribute [r] mask # An additional image whose fully transparent areas (e.g. where alpha is zero) - # indicate where `image` should be edited. Must be a valid PNG file, less than - # 4MB, and have the same dimensions as `image`. + # indicate where `image` should be edited. Must be a valid PNG file, less than + # 4MB, and have the same dimensions as `image`. # # @return [IO, StringIO, nil] optional :mask, IO @@ -36,7 +36,7 @@ class ImageEditParams < OpenAI::Internal::Type::BaseModel # @!attribute model # The model to use for image generation. Only `dall-e-2` is supported at this - # time. + # time. # # @return [String, Symbol, OpenAI::Models::ImageModel, nil] optional :model, union: -> { OpenAI::Models::ImageEditParams::Model }, nil?: true @@ -49,23 +49,23 @@ class ImageEditParams < OpenAI::Internal::Type::BaseModel # @!attribute response_format # The format in which the generated images are returned. Must be one of `url` or - # `b64_json`. URLs are only valid for 60 minutes after the image has been - # generated. + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. # # @return [Symbol, OpenAI::Models::ImageEditParams::ResponseFormat, nil] optional :response_format, enum: -> { OpenAI::Models::ImageEditParams::ResponseFormat }, nil?: true # @!attribute size # The size of the generated images. Must be one of `256x256`, `512x512`, or - # `1024x1024`. + # `1024x1024`. # # @return [Symbol, OpenAI::Models::ImageEditParams::Size, nil] optional :size, enum: -> { OpenAI::Models::ImageEditParams::Size }, nil?: true # @!attribute [r] user # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). # # @return [String, nil] optional :user, String @@ -103,7 +103,7 @@ class ImageEditParams < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # The model to use for image generation. Only `dall-e-2` is supported at this - # time. + # time. module Model extend OpenAI::Internal::Type::Union @@ -118,8 +118,8 @@ module Model end # The format in which the generated images are returned. Must be one of `url` or - # `b64_json`. URLs are only valid for 60 minutes after the image has been - # generated. + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. module ResponseFormat extend OpenAI::Internal::Type::Enum @@ -134,7 +134,7 @@ module ResponseFormat end # The size of the generated images. Must be one of `256x256`, `512x512`, or - # `1024x1024`. + # `1024x1024`. module Size extend OpenAI::Internal::Type::Enum diff --git a/lib/openai/models/image_generate_params.rb b/lib/openai/models/image_generate_params.rb index 30a2eaa3..bce729c4 100644 --- a/lib/openai/models/image_generate_params.rb +++ b/lib/openai/models/image_generate_params.rb @@ -10,7 +10,7 @@ class ImageGenerateParams < OpenAI::Internal::Type::BaseModel # @!attribute prompt # A text description of the desired image(s). The maximum length is 1000 - # characters for `dall-e-2` and 4000 characters for `dall-e-3`. + # characters for `dall-e-2` and 4000 characters for `dall-e-3`. # # @return [String] required :prompt, String @@ -23,15 +23,15 @@ class ImageGenerateParams < OpenAI::Internal::Type::BaseModel # @!attribute n # The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only - # `n=1` is supported. + # `n=1` is supported. # # @return [Integer, nil] optional :n, Integer, nil?: true # @!attribute [r] quality # The quality of the image that will be generated. `hd` creates images with finer - # details and greater consistency across the image. This param is only supported - # for `dall-e-3`. + # details and greater consistency across the image. This param is only supported + # for `dall-e-3`. # # @return [Symbol, OpenAI::Models::ImageGenerateParams::Quality, nil] optional :quality, enum: -> { OpenAI::Models::ImageGenerateParams::Quality } @@ -42,33 +42,33 @@ class ImageGenerateParams < OpenAI::Internal::Type::BaseModel # @!attribute response_format # The format in which the generated images are returned. Must be one of `url` or - # `b64_json`. URLs are only valid for 60 minutes after the image has been - # generated. + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. # # @return [Symbol, OpenAI::Models::ImageGenerateParams::ResponseFormat, nil] optional :response_format, enum: -> { OpenAI::Models::ImageGenerateParams::ResponseFormat }, nil?: true # @!attribute size # The size of the generated images. Must be one of `256x256`, `512x512`, or - # `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or - # `1024x1792` for `dall-e-3` models. + # `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or + # `1024x1792` for `dall-e-3` models. # # @return [Symbol, OpenAI::Models::ImageGenerateParams::Size, nil] optional :size, enum: -> { OpenAI::Models::ImageGenerateParams::Size }, nil?: true # @!attribute style # The style of the generated images. Must be one of `vivid` or `natural`. Vivid - # causes the model to lean towards generating hyper-real and dramatic images. - # Natural causes the model to produce more natural, less hyper-real looking - # images. This param is only supported for `dall-e-3`. + # causes the model to lean towards generating hyper-real and dramatic images. + # Natural causes the model to produce more natural, less hyper-real looking + # images. This param is only supported for `dall-e-3`. # # @return [Symbol, OpenAI::Models::ImageGenerateParams::Style, nil] optional :style, enum: -> { OpenAI::Models::ImageGenerateParams::Style }, nil?: true # @!attribute [r] user # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). # # @return [String, nil] optional :user, String @@ -120,8 +120,8 @@ module Model end # The quality of the image that will be generated. `hd` creates images with finer - # details and greater consistency across the image. This param is only supported - # for `dall-e-3`. + # details and greater consistency across the image. This param is only supported + # for `dall-e-3`. module Quality extend OpenAI::Internal::Type::Enum @@ -136,8 +136,8 @@ module Quality end # The format in which the generated images are returned. Must be one of `url` or - # `b64_json`. URLs are only valid for 60 minutes after the image has been - # generated. + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. module ResponseFormat extend OpenAI::Internal::Type::Enum @@ -152,8 +152,8 @@ module ResponseFormat end # The size of the generated images. Must be one of `256x256`, `512x512`, or - # `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or - # `1024x1792` for `dall-e-3` models. + # `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or + # `1024x1792` for `dall-e-3` models. module Size extend OpenAI::Internal::Type::Enum @@ -171,9 +171,9 @@ module Size end # The style of the generated images. Must be one of `vivid` or `natural`. Vivid - # causes the model to lean towards generating hyper-real and dramatic images. - # Natural causes the model to produce more natural, less hyper-real looking - # images. This param is only supported for `dall-e-3`. + # causes the model to lean towards generating hyper-real and dramatic images. + # Natural causes the model to produce more natural, less hyper-real looking + # images. This param is only supported for `dall-e-3`. module Style extend OpenAI::Internal::Type::Enum diff --git a/lib/openai/models/moderation.rb b/lib/openai/models/moderation.rb index 4fd434ab..6abb069c 100644 --- a/lib/openai/models/moderation.rb +++ b/lib/openai/models/moderation.rb @@ -41,77 +41,77 @@ class Moderation < OpenAI::Internal::Type::BaseModel class Categories < OpenAI::Internal::Type::BaseModel # @!attribute harassment # Content that expresses, incites, or promotes harassing language towards any - # target. + # target. # # @return [Boolean] required :harassment, OpenAI::Internal::Type::Boolean # @!attribute harassment_threatening # Harassment content that also includes violence or serious harm towards any - # target. + # target. # # @return [Boolean] required :harassment_threatening, OpenAI::Internal::Type::Boolean, api_name: :"harassment/threatening" # @!attribute hate # Content that expresses, incites, or promotes hate based on race, gender, - # ethnicity, religion, nationality, sexual orientation, disability status, or - # caste. Hateful content aimed at non-protected groups (e.g., chess players) is - # harassment. + # ethnicity, religion, nationality, sexual orientation, disability status, or + # caste. Hateful content aimed at non-protected groups (e.g., chess players) is + # harassment. # # @return [Boolean] required :hate, OpenAI::Internal::Type::Boolean # @!attribute hate_threatening # Hateful content that also includes violence or serious harm towards the targeted - # group based on race, gender, ethnicity, religion, nationality, sexual - # orientation, disability status, or caste. + # group based on race, gender, ethnicity, religion, nationality, sexual + # orientation, disability status, or caste. # # @return [Boolean] required :hate_threatening, OpenAI::Internal::Type::Boolean, api_name: :"hate/threatening" # @!attribute illicit # Content that includes instructions or advice that facilitate the planning or - # execution of wrongdoing, or that gives advice or instruction on how to commit - # illicit acts. For example, "how to shoplift" would fit this category. + # execution of wrongdoing, or that gives advice or instruction on how to commit + # illicit acts. For example, "how to shoplift" would fit this category. # # @return [Boolean, nil] required :illicit, OpenAI::Internal::Type::Boolean, nil?: true # @!attribute illicit_violent # Content that includes instructions or advice that facilitate the planning or - # execution of wrongdoing that also includes violence, or that gives advice or - # instruction on the procurement of any weapon. + # execution of wrongdoing that also includes violence, or that gives advice or + # instruction on the procurement of any weapon. # # @return [Boolean, nil] required :illicit_violent, OpenAI::Internal::Type::Boolean, api_name: :"illicit/violent", nil?: true # @!attribute self_harm # Content that promotes, encourages, or depicts acts of self-harm, such as - # suicide, cutting, and eating disorders. + # suicide, cutting, and eating disorders. # # @return [Boolean] required :self_harm, OpenAI::Internal::Type::Boolean, api_name: :"self-harm" # @!attribute self_harm_instructions # Content that encourages performing acts of self-harm, such as suicide, cutting, - # and eating disorders, or that gives instructions or advice on how to commit such - # acts. + # and eating disorders, or that gives instructions or advice on how to commit such + # acts. # # @return [Boolean] required :self_harm_instructions, OpenAI::Internal::Type::Boolean, api_name: :"self-harm/instructions" # @!attribute self_harm_intent # Content where the speaker expresses that they are engaging or intend to engage - # in acts of self-harm, such as suicide, cutting, and eating disorders. + # in acts of self-harm, such as suicide, cutting, and eating disorders. # # @return [Boolean] required :self_harm_intent, OpenAI::Internal::Type::Boolean, api_name: :"self-harm/intent" # @!attribute sexual # Content meant to arouse sexual excitement, such as the description of sexual - # activity, or that promotes sexual services (excluding sex education and - # wellness). + # activity, or that promotes sexual services (excluding sex education and + # wellness). # # @return [Boolean] required :sexual, OpenAI::Internal::Type::Boolean diff --git a/lib/openai/models/moderation_create_params.rb b/lib/openai/models/moderation_create_params.rb index c5e3ff4a..41230634 100644 --- a/lib/openai/models/moderation_create_params.rb +++ b/lib/openai/models/moderation_create_params.rb @@ -10,16 +10,16 @@ class ModerationCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute input # Input (or inputs) to classify. Can be a single string, an array of strings, or - # an array of multi-modal input objects similar to other models. + # an array of multi-modal input objects similar to other models. # # @return [String, Array, Array] required :input, union: -> { OpenAI::Models::ModerationCreateParams::Input } # @!attribute [r] model # The content moderation model you would like to use. Learn more in - # [the moderation guide](https://platform.openai.com/docs/guides/moderation), and - # learn about available models - # [here](https://platform.openai.com/docs/models#moderation). + # [the moderation guide](https://platform.openai.com/docs/guides/moderation), and + # learn about available models + # [here](https://platform.openai.com/docs/models#moderation). # # @return [String, Symbol, OpenAI::Models::ModerationModel, nil] optional :model, union: -> { OpenAI::Models::ModerationCreateParams::Model } @@ -38,7 +38,7 @@ class ModerationCreateParams < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # Input (or inputs) to classify. Can be a single string, an array of strings, or - # an array of multi-modal input objects similar to other models. + # an array of multi-modal input objects similar to other models. module Input extend OpenAI::Internal::Type::Union @@ -62,9 +62,9 @@ module Input end # The content moderation model you would like to use. Learn more in - # [the moderation guide](https://platform.openai.com/docs/guides/moderation), and - # learn about available models - # [here](https://platform.openai.com/docs/models#moderation). + # [the moderation guide](https://platform.openai.com/docs/guides/moderation), and + # learn about available models + # [here](https://platform.openai.com/docs/models#moderation). module Model extend OpenAI::Internal::Type::Union diff --git a/lib/openai/models/other_file_chunking_strategy_object.rb b/lib/openai/models/other_file_chunking_strategy_object.rb index 9b28e285..acb54ef8 100644 --- a/lib/openai/models/other_file_chunking_strategy_object.rb +++ b/lib/openai/models/other_file_chunking_strategy_object.rb @@ -11,8 +11,8 @@ class OtherFileChunkingStrategyObject < OpenAI::Internal::Type::BaseModel # @!parse # # This is returned when the chunking strategy is unknown. Typically, this is - # # because the file was indexed before the `chunking_strategy` concept was - # # introduced in the API. + # # because the file was indexed before the `chunking_strategy` concept was + # # introduced in the API. # # # # @param type [Symbol, :other] # # diff --git a/lib/openai/models/reasoning.rb b/lib/openai/models/reasoning.rb index 0727d5c1..25128ce1 100644 --- a/lib/openai/models/reasoning.rb +++ b/lib/openai/models/reasoning.rb @@ -6,10 +6,10 @@ class Reasoning < OpenAI::Internal::Type::BaseModel # @!attribute effort # **o-series models only** # - # Constrains effort on reasoning for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] optional :effort, enum: -> { OpenAI::Models::ReasoningEffort }, nil?: true @@ -17,9 +17,9 @@ class Reasoning < OpenAI::Internal::Type::BaseModel # @!attribute generate_summary # **computer_use_preview only** # - # A summary of the reasoning performed by the model. This can be useful for - # debugging and understanding the model's reasoning process. One of `concise` or - # `detailed`. + # A summary of the reasoning performed by the model. This can be useful for + # debugging and understanding the model's reasoning process. One of `concise` or + # `detailed`. # # @return [Symbol, OpenAI::Models::Reasoning::GenerateSummary, nil] optional :generate_summary, enum: -> { OpenAI::Models::Reasoning::GenerateSummary }, nil?: true @@ -27,8 +27,8 @@ class Reasoning < OpenAI::Internal::Type::BaseModel # @!parse # # **o-series models only** # # - # # Configuration options for - # # [reasoning models](https://platform.openai.com/docs/guides/reasoning). + # # Configuration options for + # # [reasoning models](https://platform.openai.com/docs/guides/reasoning). # # # # @param effort [Symbol, OpenAI::Models::ReasoningEffort, nil] # # @param generate_summary [Symbol, OpenAI::Models::Reasoning::GenerateSummary, nil] @@ -39,9 +39,9 @@ class Reasoning < OpenAI::Internal::Type::BaseModel # **computer_use_preview only** # - # A summary of the reasoning performed by the model. This can be useful for - # debugging and understanding the model's reasoning process. One of `concise` or - # `detailed`. + # A summary of the reasoning performed by the model. This can be useful for + # debugging and understanding the model's reasoning process. One of `concise` or + # `detailed`. # # @see OpenAI::Models::Reasoning#generate_summary module GenerateSummary diff --git a/lib/openai/models/reasoning_effort.rb b/lib/openai/models/reasoning_effort.rb index 737f9d5f..a7bb035e 100644 --- a/lib/openai/models/reasoning_effort.rb +++ b/lib/openai/models/reasoning_effort.rb @@ -4,10 +4,10 @@ module OpenAI module Models # **o-series models only** # - # Constrains effort on reasoning for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. module ReasoningEffort extend OpenAI::Internal::Type::Enum diff --git a/lib/openai/models/response_format_json_object.rb b/lib/openai/models/response_format_json_object.rb index bcd20439..611237a3 100644 --- a/lib/openai/models/response_format_json_object.rb +++ b/lib/openai/models/response_format_json_object.rb @@ -11,8 +11,8 @@ class ResponseFormatJSONObject < OpenAI::Internal::Type::BaseModel # @!parse # # JSON object response format. An older method of generating JSON responses. Using - # # `json_schema` is recommended for models that support it. Note that the model - # # will not generate JSON without a system or user message instructing it to do so. + # # `json_schema` is recommended for models that support it. Note that the model + # # will not generate JSON without a system or user message instructing it to do so. # # # # @param type [Symbol, :json_object] # # diff --git a/lib/openai/models/response_format_json_schema.rb b/lib/openai/models/response_format_json_schema.rb index 0bb1f00c..56f2e3b1 100644 --- a/lib/openai/models/response_format_json_schema.rb +++ b/lib/openai/models/response_format_json_schema.rb @@ -17,8 +17,8 @@ class ResponseFormatJSONSchema < OpenAI::Internal::Type::BaseModel # @!parse # # JSON Schema response format. Used to generate structured JSON responses. Learn - # # more about - # # [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). + # # more about + # # [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). # # # # @param json_schema [OpenAI::Models::ResponseFormatJSONSchema::JSONSchema] # # @param type [Symbol, :json_schema] @@ -31,14 +31,14 @@ class ResponseFormatJSONSchema < OpenAI::Internal::Type::BaseModel class JSONSchema < OpenAI::Internal::Type::BaseModel # @!attribute name # The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores - # and dashes, with a maximum length of 64. + # and dashes, with a maximum length of 64. # # @return [String] required :name, String # @!attribute [r] description # A description of what the response format is for, used by the model to determine - # how to respond in the format. + # how to respond in the format. # # @return [String, nil] optional :description, String @@ -49,7 +49,7 @@ class JSONSchema < OpenAI::Internal::Type::BaseModel # @!attribute [r] schema # The schema for the response format, described as a JSON Schema object. Learn how - # to build JSON schemas [here](https://json-schema.org/). + # to build JSON schemas [here](https://json-schema.org/). # # @return [Hash{Symbol=>Object}, nil] optional :schema, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] @@ -60,10 +60,10 @@ class JSONSchema < OpenAI::Internal::Type::BaseModel # @!attribute strict # Whether to enable strict schema adherence when generating the output. If set to - # true, the model will always follow the exact schema defined in the `schema` - # field. Only a subset of JSON Schema is supported when `strict` is `true`. To - # learn more, read the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # true, the model will always follow the exact schema defined in the `schema` + # field. Only a subset of JSON Schema is supported when `strict` is `true`. To + # learn more, read the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # # @return [Boolean, nil] optional :strict, OpenAI::Internal::Type::Boolean, nil?: true diff --git a/lib/openai/models/responses/computer_tool.rb b/lib/openai/models/responses/computer_tool.rb index dcbbba28..3af1eabe 100644 --- a/lib/openai/models/responses/computer_tool.rb +++ b/lib/openai/models/responses/computer_tool.rb @@ -30,7 +30,7 @@ class ComputerTool < OpenAI::Internal::Type::BaseModel # @!parse # # A tool that controls a virtual computer. Learn more about the - # # [computer tool](https://platform.openai.com/docs/guides/tools-computer-use). + # # [computer tool](https://platform.openai.com/docs/guides/tools-computer-use). # # # # @param display_height [Float] # # @param display_width [Float] diff --git a/lib/openai/models/responses/easy_input_message.rb b/lib/openai/models/responses/easy_input_message.rb index d7c78f66..68cbd033 100644 --- a/lib/openai/models/responses/easy_input_message.rb +++ b/lib/openai/models/responses/easy_input_message.rb @@ -6,14 +6,14 @@ module Responses class EasyInputMessage < OpenAI::Internal::Type::BaseModel # @!attribute content # Text, image, or audio input to the model, used to generate a response. Can also - # contain previous assistant responses. + # contain previous assistant responses. # # @return [String, Array] required :content, union: -> { OpenAI::Models::Responses::EasyInputMessage::Content } # @!attribute role # The role of the message input. One of `user`, `assistant`, `system`, or - # `developer`. + # `developer`. # # @return [Symbol, OpenAI::Models::Responses::EasyInputMessage::Role] required :role, enum: -> { OpenAI::Models::Responses::EasyInputMessage::Role } @@ -30,10 +30,10 @@ class EasyInputMessage < OpenAI::Internal::Type::BaseModel # @!parse # # A message input to the model with a role indicating instruction following - # # hierarchy. Instructions given with the `developer` or `system` role take - # # precedence over instructions given with the `user` role. Messages with the - # # `assistant` role are presumed to have been generated by the model in previous - # # interactions. + # # hierarchy. Instructions given with the `developer` or `system` role take + # # precedence over instructions given with the `user` role. Messages with the + # # `assistant` role are presumed to have been generated by the model in previous + # # interactions. # # # # @param content [String, Array] # # @param role [Symbol, OpenAI::Models::Responses::EasyInputMessage::Role] @@ -44,7 +44,7 @@ class EasyInputMessage < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # Text, image, or audio input to the model, used to generate a response. Can also - # contain previous assistant responses. + # contain previous assistant responses. # # @see OpenAI::Models::Responses::EasyInputMessage#content module Content @@ -63,7 +63,7 @@ module Content end # The role of the message input. One of `user`, `assistant`, `system`, or - # `developer`. + # `developer`. # # @see OpenAI::Models::Responses::EasyInputMessage#role module Role diff --git a/lib/openai/models/responses/file_search_tool.rb b/lib/openai/models/responses/file_search_tool.rb index 62d497ad..8fd1295e 100644 --- a/lib/openai/models/responses/file_search_tool.rb +++ b/lib/openai/models/responses/file_search_tool.rb @@ -28,7 +28,7 @@ class FileSearchTool < OpenAI::Internal::Type::BaseModel # @!attribute [r] max_num_results # The maximum number of results to return. This number should be between 1 and 50 - # inclusive. + # inclusive. # # @return [Integer, nil] optional :max_num_results, Integer @@ -49,8 +49,8 @@ class FileSearchTool < OpenAI::Internal::Type::BaseModel # @!parse # # A tool that searches for relevant content from uploaded files. Learn more about - # # the - # # [file search tool](https://platform.openai.com/docs/guides/tools-file-search). + # # the + # # [file search tool](https://platform.openai.com/docs/guides/tools-file-search). # # # # @param vector_store_ids [Array] # # @param filters [OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter] @@ -93,8 +93,8 @@ class RankingOptions < OpenAI::Internal::Type::BaseModel # @!attribute [r] score_threshold # The score threshold for the file search, a number between 0 and 1. Numbers - # closer to 1 will attempt to return only the most relevant results, but may - # return fewer results. + # closer to 1 will attempt to return only the most relevant results, but may + # return fewer results. # # @return [Float, nil] optional :score_threshold, Float diff --git a/lib/openai/models/responses/function_tool.rb b/lib/openai/models/responses/function_tool.rb index 596b365a..8b2433dc 100644 --- a/lib/openai/models/responses/function_tool.rb +++ b/lib/openai/models/responses/function_tool.rb @@ -30,15 +30,15 @@ class FunctionTool < OpenAI::Internal::Type::BaseModel # @!attribute description # A description of the function. Used by the model to determine whether or not to - # call the function. + # call the function. # # @return [String, nil] optional :description, String, nil?: true # @!parse # # Defines a function in your own code the model can choose to call. Learn more - # # about - # # [function calling](https://platform.openai.com/docs/guides/function-calling). + # # about + # # [function calling](https://platform.openai.com/docs/guides/function-calling). # # # # @param name [String] # # @param parameters [Hash{Symbol=>Object}] diff --git a/lib/openai/models/responses/input_item_list_params.rb b/lib/openai/models/responses/input_item_list_params.rb index a6a7a77f..7a4d136a 100644 --- a/lib/openai/models/responses/input_item_list_params.rb +++ b/lib/openai/models/responses/input_item_list_params.rb @@ -31,7 +31,7 @@ class InputItemListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] include # Additional fields to include in the response. See the `include` parameter for - # Response creation above for more information. + # Response creation above for more information. # # @return [Array, nil] optional :include, @@ -43,7 +43,7 @@ class InputItemListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] limit # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. # # @return [Integer, nil] optional :limit, Integer @@ -55,8 +55,8 @@ class InputItemListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] order # The order to return the input items in. Default is `asc`. # - # - `asc`: Return the input items in ascending order. - # - `desc`: Return the input items in descending order. + # - `asc`: Return the input items in ascending order. + # - `desc`: Return the input items in descending order. # # @return [Symbol, OpenAI::Models::Responses::InputItemListParams::Order, nil] optional :order, enum: -> { OpenAI::Models::Responses::InputItemListParams::Order } @@ -79,8 +79,8 @@ class InputItemListParams < OpenAI::Internal::Type::BaseModel # The order to return the input items in. Default is `asc`. # - # - `asc`: Return the input items in ascending order. - # - `desc`: Return the input items in descending order. + # - `asc`: Return the input items in ascending order. + # - `desc`: Return the input items in descending order. module Order extend OpenAI::Internal::Type::Enum diff --git a/lib/openai/models/responses/response.rb b/lib/openai/models/responses/response.rb index 844833f7..052c32f9 100644 --- a/lib/openai/models/responses/response.rb +++ b/lib/openai/models/responses/response.rb @@ -33,32 +33,32 @@ class Response < OpenAI::Internal::Type::BaseModel # @!attribute instructions # Inserts a system (or developer) message as the first item in the model's - # context. + # context. # - # When using along with `previous_response_id`, the instructions from a previous - # response will not be carried over to the next response. This makes it simple to - # swap out system (or developer) messages in new responses. + # When using along with `previous_response_id`, the instructions from a previous + # response will not be carried over to the next response. This makes it simple to + # swap out system (or developer) messages in new responses. # # @return [String, nil] required :instructions, String, nil?: true # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true # @!attribute model # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a - # wide range of models with different capabilities, performance characteristics, - # and price points. Refer to the - # [model guide](https://platform.openai.com/docs/models) to browse and compare - # available models. + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. # # @return [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel] required :model, union: -> { OpenAI::Models::ResponsesModel } @@ -72,11 +72,11 @@ class Response < OpenAI::Internal::Type::BaseModel # @!attribute output # An array of content items generated by the model. # - # - The length and order of items in the `output` array is dependent on the - # model's response. - # - Rather than accessing the first item in the `output` array and assuming it's - # an `assistant` message with the content generated by the model, you might - # consider using the `output_text` property where supported in SDKs. + # - The length and order of items in the `output` array is dependent on the + # model's response. + # - Rather than accessing the first item in the `output` array and assuming it's + # an `assistant` message with the content generated by the model, you might + # consider using the `output_text` property where supported in SDKs. # # @return [Array] required :output, @@ -90,62 +90,62 @@ class Response < OpenAI::Internal::Type::BaseModel # @!attribute temperature # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. We generally recommend altering this or `top_p` but - # not both. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. # # @return [Float, nil] required :temperature, Float, nil?: true # @!attribute tool_choice # How the model should select which tool (or tools) to use when generating a - # response. See the `tools` parameter to see how to specify which tools the model - # can call. + # response. See the `tools` parameter to see how to specify which tools the model + # can call. # # @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction] required :tool_choice, union: -> { OpenAI::Models::Responses::Response::ToolChoice } # @!attribute tools # An array of tools the model may call while generating a response. You can - # specify which tool to use by setting the `tool_choice` parameter. + # specify which tool to use by setting the `tool_choice` parameter. # - # The two categories of tools you can provide the model are: + # The two categories of tools you can provide the model are: # - # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's - # capabilities, like - # [web search](https://platform.openai.com/docs/guides/tools-web-search) or - # [file search](https://platform.openai.com/docs/guides/tools-file-search). - # Learn more about - # [built-in tools](https://platform.openai.com/docs/guides/tools). - # - **Function calls (custom tools)**: Functions that are defined by you, enabling - # the model to call your own code. Learn more about - # [function calling](https://platform.openai.com/docs/guides/function-calling). + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code. Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). # # @return [Array] required :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Models::Responses::Tool] } # @!attribute top_p # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or `temperature` but not both. + # We generally recommend altering this or `temperature` but not both. # # @return [Float, nil] required :top_p, Float, nil?: true # @!attribute max_output_tokens # An upper bound for the number of tokens that can be generated for a response, - # including visible output tokens and - # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). # # @return [Integer, nil] optional :max_output_tokens, Integer, nil?: true # @!attribute previous_response_id # The unique ID of the previous response to the model. Use this to create - # multi-turn conversations. Learn more about - # [conversation state](https://platform.openai.com/docs/guides/conversation-state). + # multi-turn conversations. Learn more about + # [conversation state](https://platform.openai.com/docs/guides/conversation-state). # # @return [String, nil] optional :previous_response_id, String, nil?: true @@ -153,15 +153,15 @@ class Response < OpenAI::Internal::Type::BaseModel # @!attribute reasoning # **o-series models only** # - # Configuration options for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). + # Configuration options for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). # # @return [OpenAI::Models::Reasoning, nil] optional :reasoning, -> { OpenAI::Models::Reasoning }, nil?: true # @!attribute [r] status # The status of the response generation. One of `completed`, `failed`, - # `in_progress`, or `incomplete`. + # `in_progress`, or `incomplete`. # # @return [Symbol, OpenAI::Models::Responses::ResponseStatus, nil] optional :status, enum: -> { OpenAI::Models::Responses::ResponseStatus } @@ -172,10 +172,10 @@ class Response < OpenAI::Internal::Type::BaseModel # @!attribute [r] text # Configuration options for a text response from the model. Can be plain text or - # structured JSON data. Learn more: + # structured JSON data. Learn more: # - # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) # # @return [OpenAI::Models::Responses::ResponseTextConfig, nil] optional :text, -> { OpenAI::Models::Responses::ResponseTextConfig } @@ -187,18 +187,18 @@ class Response < OpenAI::Internal::Type::BaseModel # @!attribute truncation # The truncation strategy to use for the model response. # - # - `auto`: If the context of this response and previous ones exceeds the model's - # context window size, the model will truncate the response to fit the context - # window by dropping input items in the middle of the conversation. - # - `disabled` (default): If a model response will exceed the context window size - # for a model, the request will fail with a 400 error. + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. # # @return [Symbol, OpenAI::Models::Responses::Response::Truncation, nil] optional :truncation, enum: -> { OpenAI::Models::Responses::Response::Truncation }, nil?: true # @!attribute [r] usage # Represents token usage details including input tokens, output tokens, a - # breakdown of output tokens, and the total tokens used. + # breakdown of output tokens, and the total tokens used. # # @return [OpenAI::Models::Responses::ResponseUsage, nil] optional :usage, -> { OpenAI::Models::Responses::ResponseUsage } @@ -209,8 +209,8 @@ class Response < OpenAI::Internal::Type::BaseModel # @!attribute [r] user # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). # # @return [String, nil] optional :user, String @@ -312,8 +312,8 @@ module Reason end # How the model should select which tool (or tools) to use when generating a - # response. See the `tools` parameter to see how to specify which tools the model - # can call. + # response. See the `tools` parameter to see how to specify which tools the model + # can call. # # @see OpenAI::Models::Responses::Response#tool_choice module ToolChoice @@ -343,11 +343,11 @@ module ToolChoice # The truncation strategy to use for the model response. # - # - `auto`: If the context of this response and previous ones exceeds the model's - # context window size, the model will truncate the response to fit the context - # window by dropping input items in the middle of the conversation. - # - `disabled` (default): If a model response will exceed the context window size - # for a model, the request will fail with a 400 error. + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. # # @see OpenAI::Models::Responses::Response#truncation module Truncation diff --git a/lib/openai/models/responses/response_computer_tool_call.rb b/lib/openai/models/responses/response_computer_tool_call.rb index 7eafa499..c8d6e68a 100644 --- a/lib/openai/models/responses/response_computer_tool_call.rb +++ b/lib/openai/models/responses/response_computer_tool_call.rb @@ -31,7 +31,7 @@ class ResponseComputerToolCall < OpenAI::Internal::Type::BaseModel # @!attribute status # The status of the item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. + # Populated when items are returned via API. # # @return [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Status] required :status, enum: -> { OpenAI::Models::Responses::ResponseComputerToolCall::Status } @@ -44,8 +44,8 @@ class ResponseComputerToolCall < OpenAI::Internal::Type::BaseModel # @!parse # # A tool call to a computer use tool. See the - # # [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) - # # for more information. + # # [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) + # # for more information. # # # # @param id [String] # # @param action [OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click, OpenAI::Models::Responses::ResponseComputerToolCall::Action::DoubleClick, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Keypress, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Move, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Screenshot, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Scroll, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Type, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Wait] @@ -96,14 +96,14 @@ module Action class Click < OpenAI::Internal::Type::BaseModel # @!attribute button # Indicates which mouse button was pressed during the click. One of `left`, - # `right`, `wheel`, `back`, or `forward`. + # `right`, `wheel`, `back`, or `forward`. # # @return [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click::Button] required :button, enum: -> { OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click::Button } # @!attribute type # Specifies the event type. For a click action, this property is always set to - # `click`. + # `click`. # # @return [Symbol, :click] required :type, const: :click @@ -133,7 +133,7 @@ class Click < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # Indicates which mouse button was pressed during the click. One of `left`, - # `right`, `wheel`, `back`, or `forward`. + # `right`, `wheel`, `back`, or `forward`. # # @see OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click#button module Button @@ -156,7 +156,7 @@ module Button class DoubleClick < OpenAI::Internal::Type::BaseModel # @!attribute type # Specifies the event type. For a double click action, this property is always set - # to `double_click`. + # to `double_click`. # # @return [Symbol, :double_click] required :type, const: :double_click @@ -188,14 +188,14 @@ class DoubleClick < OpenAI::Internal::Type::BaseModel class Drag < OpenAI::Internal::Type::BaseModel # @!attribute path # An array of coordinates representing the path of the drag action. Coordinates - # will appear as an array of objects, eg + # will appear as an array of objects, eg # - # ``` - # [ - # { x: 100, y: 200 }, - # { x: 200, y: 300 } - # ] - # ``` + # ``` + # [ + # { x: 100, y: 200 }, + # { x: 200, y: 300 } + # ] + # ``` # # @return [Array] required :path, @@ -203,7 +203,7 @@ class Drag < OpenAI::Internal::Type::BaseModel # @!attribute type # Specifies the event type. For a drag action, this property is always set to - # `drag`. + # `drag`. # # @return [Symbol, :drag] required :type, const: :drag @@ -246,14 +246,14 @@ class Path < OpenAI::Internal::Type::BaseModel class Keypress < OpenAI::Internal::Type::BaseModel # @!attribute keys # The combination of keys the model is requesting to be pressed. This is an array - # of strings, each representing a key. + # of strings, each representing a key. # # @return [Array] required :keys, OpenAI::Internal::Type::ArrayOf[String] # @!attribute type # Specifies the event type. For a keypress action, this property is always set to - # `keypress`. + # `keypress`. # # @return [Symbol, :keypress] required :type, const: :keypress @@ -272,7 +272,7 @@ class Keypress < OpenAI::Internal::Type::BaseModel class Move < OpenAI::Internal::Type::BaseModel # @!attribute type # Specifies the event type. For a move action, this property is always set to - # `move`. + # `move`. # # @return [Symbol, :move] required :type, const: :move @@ -304,7 +304,7 @@ class Move < OpenAI::Internal::Type::BaseModel class Screenshot < OpenAI::Internal::Type::BaseModel # @!attribute type # Specifies the event type. For a screenshot action, this property is always set - # to `screenshot`. + # to `screenshot`. # # @return [Symbol, :screenshot] required :type, const: :screenshot @@ -334,7 +334,7 @@ class Scroll < OpenAI::Internal::Type::BaseModel # @!attribute type # Specifies the event type. For a scroll action, this property is always set to - # `scroll`. + # `scroll`. # # @return [Symbol, :scroll] required :type, const: :scroll @@ -374,7 +374,7 @@ class Type < OpenAI::Internal::Type::BaseModel # @!attribute type # Specifies the event type. For a type action, this property is always set to - # `type`. + # `type`. # # @return [Symbol, :type] required :type, const: :type @@ -393,7 +393,7 @@ class Type < OpenAI::Internal::Type::BaseModel class Wait < OpenAI::Internal::Type::BaseModel # @!attribute type # Specifies the event type. For a wait action, this property is always set to - # `wait`. + # `wait`. # # @return [Symbol, :wait] required :type, const: :wait @@ -445,7 +445,7 @@ class PendingSafetyCheck < OpenAI::Internal::Type::BaseModel end # The status of the item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. + # Populated when items are returned via API. # # @see OpenAI::Models::Responses::ResponseComputerToolCall#status module Status diff --git a/lib/openai/models/responses/response_computer_tool_call_output_item.rb b/lib/openai/models/responses/response_computer_tool_call_output_item.rb index 0db1e5cc..2cc50149 100644 --- a/lib/openai/models/responses/response_computer_tool_call_output_item.rb +++ b/lib/openai/models/responses/response_computer_tool_call_output_item.rb @@ -30,7 +30,7 @@ class ResponseComputerToolCallOutputItem < OpenAI::Internal::Type::BaseModel # @!attribute [r] acknowledged_safety_checks # The safety checks reported by the API that have been acknowledged by the - # developer. + # developer. # # @return [Array, nil] optional :acknowledged_safety_checks, @@ -42,7 +42,7 @@ class ResponseComputerToolCallOutputItem < OpenAI::Internal::Type::BaseModel # @!attribute [r] status # The status of the message input. One of `in_progress`, `completed`, or - # `incomplete`. Populated when input items are returned via API. + # `incomplete`. Populated when input items are returned via API. # # @return [Symbol, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem::Status, nil] optional :status, enum: -> { OpenAI::Models::Responses::ResponseComputerToolCallOutputItem::Status } @@ -95,7 +95,7 @@ class AcknowledgedSafetyCheck < OpenAI::Internal::Type::BaseModel end # The status of the message input. One of `in_progress`, `completed`, or - # `incomplete`. Populated when input items are returned via API. + # `incomplete`. Populated when input items are returned via API. # # @see OpenAI::Models::Responses::ResponseComputerToolCallOutputItem#status module Status diff --git a/lib/openai/models/responses/response_computer_tool_call_output_screenshot.rb b/lib/openai/models/responses/response_computer_tool_call_output_screenshot.rb index 1ac341fb..9cfc543a 100644 --- a/lib/openai/models/responses/response_computer_tool_call_output_screenshot.rb +++ b/lib/openai/models/responses/response_computer_tool_call_output_screenshot.rb @@ -6,7 +6,7 @@ module Responses class ResponseComputerToolCallOutputScreenshot < OpenAI::Internal::Type::BaseModel # @!attribute type # Specifies the event type. For a computer screenshot, this property is always set - # to `computer_screenshot`. + # to `computer_screenshot`. # # @return [Symbol, :computer_screenshot] required :type, const: :computer_screenshot diff --git a/lib/openai/models/responses/response_create_params.rb b/lib/openai/models/responses/response_create_params.rb index 38550327..0e96b549 100644 --- a/lib/openai/models/responses/response_create_params.rb +++ b/lib/openai/models/responses/response_create_params.rb @@ -14,36 +14,36 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute input # Text, image, or file inputs to the model, used to generate a response. # - # Learn more: + # Learn more: # - # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - # - [Image inputs](https://platform.openai.com/docs/guides/images) - # - [File inputs](https://platform.openai.com/docs/guides/pdf-files) - # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) - # - [Function calling](https://platform.openai.com/docs/guides/function-calling) + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Image inputs](https://platform.openai.com/docs/guides/images) + # - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + # - [Function calling](https://platform.openai.com/docs/guides/function-calling) # # @return [String, Array] required :input, union: -> { OpenAI::Models::Responses::ResponseCreateParams::Input } # @!attribute model # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a - # wide range of models with different capabilities, performance characteristics, - # and price points. Refer to the - # [model guide](https://platform.openai.com/docs/models) to browse and compare - # available models. + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. # # @return [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel] required :model, union: -> { OpenAI::Models::ResponsesModel } # @!attribute include # Specify additional output data to include in the model response. Currently - # supported values are: + # supported values are: # - # - `file_search_call.results`: Include the search results of the file search tool - # call. - # - `message.input_image.image_url`: Include image urls from the input message. - # - `computer_call_output.output.image_url`: Include image urls from the computer - # call output. + # - `file_search_call.results`: Include the search results of the file search tool + # call. + # - `message.input_image.image_url`: Include image urls from the input message. + # - `computer_call_output.output.image_url`: Include image urls from the computer + # call output. # # @return [Array, nil] optional :include, @@ -52,30 +52,30 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute instructions # Inserts a system (or developer) message as the first item in the model's - # context. + # context. # - # When using along with `previous_response_id`, the instructions from a previous - # response will not be carried over to the next response. This makes it simple to - # swap out system (or developer) messages in new responses. + # When using along with `previous_response_id`, the instructions from a previous + # response will not be carried over to the next response. This makes it simple to + # swap out system (or developer) messages in new responses. # # @return [String, nil] optional :instructions, String, nil?: true # @!attribute max_output_tokens # An upper bound for the number of tokens that can be generated for a response, - # including visible output tokens and - # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). # # @return [Integer, nil] optional :max_output_tokens, Integer, nil?: true # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true @@ -88,8 +88,8 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute previous_response_id # The unique ID of the previous response to the model. Use this to create - # multi-turn conversations. Learn more about - # [conversation state](https://platform.openai.com/docs/guides/conversation-state). + # multi-turn conversations. Learn more about + # [conversation state](https://platform.openai.com/docs/guides/conversation-state). # # @return [String, nil] optional :previous_response_id, String, nil?: true @@ -97,8 +97,8 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute reasoning # **o-series models only** # - # Configuration options for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). + # Configuration options for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). # # @return [OpenAI::Models::Reasoning, nil] optional :reasoning, -> { OpenAI::Models::Reasoning }, nil?: true @@ -111,19 +111,19 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute temperature # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. We generally recommend altering this or `top_p` but - # not both. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. # # @return [Float, nil] optional :temperature, Float, nil?: true # @!attribute [r] text # Configuration options for a text response from the model. Can be plain text or - # structured JSON data. Learn more: + # structured JSON data. Learn more: # - # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) # # @return [OpenAI::Models::Responses::ResponseTextConfig, nil] optional :text, -> { OpenAI::Models::Responses::ResponseTextConfig } @@ -134,8 +134,8 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] tool_choice # How the model should select which tool (or tools) to use when generating a - # response. See the `tools` parameter to see how to specify which tools the model - # can call. + # response. See the `tools` parameter to see how to specify which tools the model + # can call. # # @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, nil] optional :tool_choice, union: -> { OpenAI::Models::Responses::ResponseCreateParams::ToolChoice } @@ -146,19 +146,19 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] tools # An array of tools the model may call while generating a response. You can - # specify which tool to use by setting the `tool_choice` parameter. + # specify which tool to use by setting the `tool_choice` parameter. # - # The two categories of tools you can provide the model are: + # The two categories of tools you can provide the model are: # - # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's - # capabilities, like - # [web search](https://platform.openai.com/docs/guides/tools-web-search) or - # [file search](https://platform.openai.com/docs/guides/tools-file-search). - # Learn more about - # [built-in tools](https://platform.openai.com/docs/guides/tools). - # - **Function calls (custom tools)**: Functions that are defined by you, enabling - # the model to call your own code. Learn more about - # [function calling](https://platform.openai.com/docs/guides/function-calling). + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code. Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). # # @return [Array, nil] optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Models::Responses::Tool] } @@ -169,10 +169,10 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute top_p # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or `temperature` but not both. + # We generally recommend altering this or `temperature` but not both. # # @return [Float, nil] optional :top_p, Float, nil?: true @@ -180,19 +180,19 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute truncation # The truncation strategy to use for the model response. # - # - `auto`: If the context of this response and previous ones exceeds the model's - # context window size, the model will truncate the response to fit the context - # window by dropping input items in the middle of the conversation. - # - `disabled` (default): If a model response will exceed the context window size - # for a model, the request will fail with a 400 error. + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. # # @return [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Truncation, nil] optional :truncation, enum: -> { OpenAI::Models::Responses::ResponseCreateParams::Truncation }, nil?: true # @!attribute [r] user # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). # # @return [String, nil] optional :user, String @@ -249,13 +249,13 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel # Text, image, or file inputs to the model, used to generate a response. # - # Learn more: + # Learn more: # - # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - # - [Image inputs](https://platform.openai.com/docs/guides/images) - # - [File inputs](https://platform.openai.com/docs/guides/pdf-files) - # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) - # - [Function calling](https://platform.openai.com/docs/guides/function-calling) + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Image inputs](https://platform.openai.com/docs/guides/images) + # - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + # - [Function calling](https://platform.openai.com/docs/guides/function-calling) module Input extend OpenAI::Internal::Type::Union @@ -273,8 +273,8 @@ module Input end # How the model should select which tool (or tools) to use when generating a - # response. See the `tools` parameter to see how to specify which tools the model - # can call. + # response. See the `tools` parameter to see how to specify which tools the model + # can call. module ToolChoice extend OpenAI::Internal::Type::Union @@ -302,11 +302,11 @@ module ToolChoice # The truncation strategy to use for the model response. # - # - `auto`: If the context of this response and previous ones exceeds the model's - # context window size, the model will truncate the response to fit the context - # window by dropping input items in the middle of the conversation. - # - `disabled` (default): If a model response will exceed the context window size - # for a model, the request will fail with a 400 error. + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. module Truncation extend OpenAI::Internal::Type::Enum diff --git a/lib/openai/models/responses/response_file_search_tool_call.rb b/lib/openai/models/responses/response_file_search_tool_call.rb index c8afd7e5..7048c7a4 100644 --- a/lib/openai/models/responses/response_file_search_tool_call.rb +++ b/lib/openai/models/responses/response_file_search_tool_call.rb @@ -18,7 +18,7 @@ class ResponseFileSearchToolCall < OpenAI::Internal::Type::BaseModel # @!attribute status # The status of the file search tool call. One of `in_progress`, `searching`, - # `incomplete` or `failed`, + # `incomplete` or `failed`, # # @return [Symbol, OpenAI::Models::Responses::ResponseFileSearchToolCall::Status] required :status, enum: -> { OpenAI::Models::Responses::ResponseFileSearchToolCall::Status } @@ -39,8 +39,8 @@ class ResponseFileSearchToolCall < OpenAI::Internal::Type::BaseModel # @!parse # # The results of a file search tool call. See the - # # [file search guide](https://platform.openai.com/docs/guides/tools-file-search) - # # for more information. + # # [file search guide](https://platform.openai.com/docs/guides/tools-file-search) + # # for more information. # # # # @param id [String] # # @param queries [Array] @@ -53,7 +53,7 @@ class ResponseFileSearchToolCall < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # The status of the file search tool call. One of `in_progress`, `searching`, - # `incomplete` or `failed`, + # `incomplete` or `failed`, # # @see OpenAI::Models::Responses::ResponseFileSearchToolCall#status module Status @@ -75,10 +75,10 @@ module Status class Result < OpenAI::Internal::Type::BaseModel # @!attribute attributes # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. Keys are strings with a maximum - # length of 64 characters. Values are strings with a maximum length of 512 - # characters, booleans, or numbers. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. # # @return [Hash{Symbol=>String, Float, Boolean}, nil] optional :attributes, diff --git a/lib/openai/models/responses/response_format_text_config.rb b/lib/openai/models/responses/response_format_text_config.rb index bdacd77c..bdb5e64f 100644 --- a/lib/openai/models/responses/response_format_text_config.rb +++ b/lib/openai/models/responses/response_format_text_config.rb @@ -5,17 +5,17 @@ module Models module Responses # An object specifying the format that the model must output. # - # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which - # ensures the model will match your supplied JSON schema. Learn more in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + # ensures the model will match your supplied JSON schema. Learn more in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # The default format is `{ "type": "text" }` with no additional options. + # The default format is `{ "type": "text" }` with no additional options. # - # **Not recommended for gpt-4o and newer models:** + # **Not recommended for gpt-4o and newer models:** # - # Setting to `{ "type": "json_object" }` enables the older JSON mode, which - # ensures the message the model generates is valid JSON. Using `json_schema` is - # preferred for models that support it. + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. module ResponseFormatTextConfig extend OpenAI::Internal::Type::Union diff --git a/lib/openai/models/responses/response_format_text_json_schema_config.rb b/lib/openai/models/responses/response_format_text_json_schema_config.rb index 0fac4e6d..3cbc68bd 100644 --- a/lib/openai/models/responses/response_format_text_json_schema_config.rb +++ b/lib/openai/models/responses/response_format_text_json_schema_config.rb @@ -6,14 +6,14 @@ module Responses class ResponseFormatTextJSONSchemaConfig < OpenAI::Internal::Type::BaseModel # @!attribute name # The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores - # and dashes, with a maximum length of 64. + # and dashes, with a maximum length of 64. # # @return [String] required :name, String # @!attribute schema # The schema for the response format, described as a JSON Schema object. Learn how - # to build JSON schemas [here](https://json-schema.org/). + # to build JSON schemas [here](https://json-schema.org/). # # @return [Hash{Symbol=>Object}] required :schema, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] @@ -26,7 +26,7 @@ class ResponseFormatTextJSONSchemaConfig < OpenAI::Internal::Type::BaseModel # @!attribute [r] description # A description of what the response format is for, used by the model to determine - # how to respond in the format. + # how to respond in the format. # # @return [String, nil] optional :description, String @@ -37,18 +37,18 @@ class ResponseFormatTextJSONSchemaConfig < OpenAI::Internal::Type::BaseModel # @!attribute strict # Whether to enable strict schema adherence when generating the output. If set to - # true, the model will always follow the exact schema defined in the `schema` - # field. Only a subset of JSON Schema is supported when `strict` is `true`. To - # learn more, read the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # true, the model will always follow the exact schema defined in the `schema` + # field. Only a subset of JSON Schema is supported when `strict` is `true`. To + # learn more, read the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # # @return [Boolean, nil] optional :strict, OpenAI::Internal::Type::Boolean, nil?: true # @!parse # # JSON Schema response format. Used to generate structured JSON responses. Learn - # # more about - # # [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). + # # more about + # # [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). # # # # @param name [String] # # @param schema [Hash{Symbol=>Object}] diff --git a/lib/openai/models/responses/response_function_tool_call.rb b/lib/openai/models/responses/response_function_tool_call.rb index 61ce02d2..5bf53133 100644 --- a/lib/openai/models/responses/response_function_tool_call.rb +++ b/lib/openai/models/responses/response_function_tool_call.rb @@ -40,7 +40,7 @@ class ResponseFunctionToolCall < OpenAI::Internal::Type::BaseModel # @!attribute [r] status # The status of the item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. + # Populated when items are returned via API. # # @return [Symbol, OpenAI::Models::Responses::ResponseFunctionToolCall::Status, nil] optional :status, enum: -> { OpenAI::Models::Responses::ResponseFunctionToolCall::Status } @@ -51,8 +51,8 @@ class ResponseFunctionToolCall < OpenAI::Internal::Type::BaseModel # @!parse # # A tool call to run a function. See the - # # [function calling guide](https://platform.openai.com/docs/guides/function-calling) - # # for more information. + # # [function calling guide](https://platform.openai.com/docs/guides/function-calling) + # # for more information. # # # # @param arguments [String] # # @param call_id [String] @@ -66,7 +66,7 @@ class ResponseFunctionToolCall < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # The status of the item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. + # Populated when items are returned via API. # # @see OpenAI::Models::Responses::ResponseFunctionToolCall#status module Status diff --git a/lib/openai/models/responses/response_function_tool_call_item.rb b/lib/openai/models/responses/response_function_tool_call_item.rb index 8315cb64..17e2ceff 100644 --- a/lib/openai/models/responses/response_function_tool_call_item.rb +++ b/lib/openai/models/responses/response_function_tool_call_item.rb @@ -12,8 +12,8 @@ class ResponseFunctionToolCallItem < OpenAI::Models::Responses::ResponseFunction # @!parse # # A tool call to run a function. See the - # # [function calling guide](https://platform.openai.com/docs/guides/function-calling) - # # for more information. + # # [function calling guide](https://platform.openai.com/docs/guides/function-calling) + # # for more information. # # # # @param id [String] # # diff --git a/lib/openai/models/responses/response_function_tool_call_output_item.rb b/lib/openai/models/responses/response_function_tool_call_output_item.rb index 081ef864..2123bd97 100644 --- a/lib/openai/models/responses/response_function_tool_call_output_item.rb +++ b/lib/openai/models/responses/response_function_tool_call_output_item.rb @@ -30,7 +30,7 @@ class ResponseFunctionToolCallOutputItem < OpenAI::Internal::Type::BaseModel # @!attribute [r] status # The status of the item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. + # Populated when items are returned via API. # # @return [Symbol, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem::Status, nil] optional :status, enum: -> { OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem::Status } @@ -51,7 +51,7 @@ class ResponseFunctionToolCallOutputItem < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # The status of the item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. + # Populated when items are returned via API. # # @see OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem#status module Status diff --git a/lib/openai/models/responses/response_function_web_search.rb b/lib/openai/models/responses/response_function_web_search.rb index c2cdfa12..e6d9a2e8 100644 --- a/lib/openai/models/responses/response_function_web_search.rb +++ b/lib/openai/models/responses/response_function_web_search.rb @@ -24,8 +24,8 @@ class ResponseFunctionWebSearch < OpenAI::Internal::Type::BaseModel # @!parse # # The results of a web search tool call. See the - # # [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for - # # more information. + # # [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for + # # more information. # # # # @param id [String] # # @param status [Symbol, OpenAI::Models::Responses::ResponseFunctionWebSearch::Status] diff --git a/lib/openai/models/responses/response_includable.rb b/lib/openai/models/responses/response_includable.rb index 26bd124c..71a49423 100644 --- a/lib/openai/models/responses/response_includable.rb +++ b/lib/openai/models/responses/response_includable.rb @@ -4,13 +4,13 @@ module OpenAI module Models module Responses # Specify additional output data to include in the model response. Currently - # supported values are: + # supported values are: # - # - `file_search_call.results`: Include the search results of the file search tool - # call. - # - `message.input_image.image_url`: Include image urls from the input message. - # - `computer_call_output.output.image_url`: Include image urls from the computer - # call output. + # - `file_search_call.results`: Include the search results of the file search tool + # call. + # - `message.input_image.image_url`: Include image urls from the input message. + # - `computer_call_output.output.image_url`: Include image urls from the computer + # call output. module ResponseIncludable extend OpenAI::Internal::Type::Enum diff --git a/lib/openai/models/responses/response_input_image.rb b/lib/openai/models/responses/response_input_image.rb index 883f8491..c508cf8a 100644 --- a/lib/openai/models/responses/response_input_image.rb +++ b/lib/openai/models/responses/response_input_image.rb @@ -6,7 +6,7 @@ module Responses class ResponseInputImage < OpenAI::Internal::Type::BaseModel # @!attribute detail # The detail level of the image to be sent to the model. One of `high`, `low`, or - # `auto`. Defaults to `auto`. + # `auto`. Defaults to `auto`. # # @return [Symbol, OpenAI::Models::Responses::ResponseInputImage::Detail] required :detail, enum: -> { OpenAI::Models::Responses::ResponseInputImage::Detail } @@ -25,14 +25,14 @@ class ResponseInputImage < OpenAI::Internal::Type::BaseModel # @!attribute image_url # The URL of the image to be sent to the model. A fully qualified URL or base64 - # encoded image in a data URL. + # encoded image in a data URL. # # @return [String, nil] optional :image_url, String, nil?: true # @!parse # # An image input to the model. Learn about - # # [image inputs](https://platform.openai.com/docs/guides/vision). + # # [image inputs](https://platform.openai.com/docs/guides/vision). # # # # @param detail [Symbol, OpenAI::Models::Responses::ResponseInputImage::Detail] # # @param file_id [String, nil] @@ -44,7 +44,7 @@ class ResponseInputImage < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # The detail level of the image to be sent to the model. One of `high`, `low`, or - # `auto`. Defaults to `auto`. + # `auto`. Defaults to `auto`. # # @see OpenAI::Models::Responses::ResponseInputImage#detail module Detail diff --git a/lib/openai/models/responses/response_input_item.rb b/lib/openai/models/responses/response_input_item.rb index 62862e0e..db0e26d2 100644 --- a/lib/openai/models/responses/response_input_item.rb +++ b/lib/openai/models/responses/response_input_item.rb @@ -4,10 +4,10 @@ module OpenAI module Models module Responses # A message input to the model with a role indicating instruction following - # hierarchy. Instructions given with the `developer` or `system` role take - # precedence over instructions given with the `user` role. Messages with the - # `assistant` role are presumed to have been generated by the model in previous - # interactions. + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. module ResponseInputItem extend OpenAI::Internal::Type::Union @@ -60,7 +60,7 @@ module ResponseInputItem class Message < OpenAI::Internal::Type::BaseModel # @!attribute content # A list of one or many input items to the model, containing different content - # types. + # types. # # @return [Array] required :content, @@ -74,7 +74,7 @@ class Message < OpenAI::Internal::Type::BaseModel # @!attribute [r] status # The status of item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. + # Populated when items are returned via API. # # @return [Symbol, OpenAI::Models::Responses::ResponseInputItem::Message::Status, nil] optional :status, enum: -> { OpenAI::Models::Responses::ResponseInputItem::Message::Status } @@ -95,8 +95,8 @@ class Message < OpenAI::Internal::Type::BaseModel # @!parse # # A message input to the model with a role indicating instruction following - # # hierarchy. Instructions given with the `developer` or `system` role take - # # precedence over instructions given with the `user` role. + # # hierarchy. Instructions given with the `developer` or `system` role take + # # precedence over instructions given with the `user` role. # # # # @param content [Array] # # @param role [Symbol, OpenAI::Models::Responses::ResponseInputItem::Message::Role] @@ -125,7 +125,7 @@ module Role end # The status of item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. + # Populated when items are returned via API. # # @see OpenAI::Models::Responses::ResponseInputItem::Message#status module Status @@ -189,7 +189,7 @@ class ComputerCallOutput < OpenAI::Internal::Type::BaseModel # @!attribute [r] acknowledged_safety_checks # The safety checks reported by the API that have been acknowledged by the - # developer. + # developer. # # @return [Array, nil] optional :acknowledged_safety_checks, @@ -201,7 +201,7 @@ class ComputerCallOutput < OpenAI::Internal::Type::BaseModel # @!attribute [r] status # The status of the message input. One of `in_progress`, `completed`, or - # `incomplete`. Populated when input items are returned via API. + # `incomplete`. Populated when input items are returned via API. # # @return [Symbol, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::Status, nil] optional :status, enum: -> { OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::Status } @@ -256,7 +256,7 @@ class AcknowledgedSafetyCheck < OpenAI::Internal::Type::BaseModel end # The status of the message input. One of `in_progress`, `completed`, or - # `incomplete`. Populated when input items are returned via API. + # `incomplete`. Populated when input items are returned via API. # # @see OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput#status module Status @@ -295,7 +295,7 @@ class FunctionCallOutput < OpenAI::Internal::Type::BaseModel # @!attribute [r] id # The unique ID of the function tool call output. Populated when this item is - # returned via API. + # returned via API. # # @return [String, nil] optional :id, String @@ -306,7 +306,7 @@ class FunctionCallOutput < OpenAI::Internal::Type::BaseModel # @!attribute [r] status # The status of the item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. + # Populated when items are returned via API. # # @return [Symbol, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput::Status, nil] optional :status, enum: -> { OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput::Status } @@ -329,7 +329,7 @@ class FunctionCallOutput < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # The status of the item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. + # Populated when items are returned via API. # # @see OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput#status module Status diff --git a/lib/openai/models/responses/response_input_message_item.rb b/lib/openai/models/responses/response_input_message_item.rb index 3bb170a9..b270e74d 100644 --- a/lib/openai/models/responses/response_input_message_item.rb +++ b/lib/openai/models/responses/response_input_message_item.rb @@ -12,7 +12,7 @@ class ResponseInputMessageItem < OpenAI::Internal::Type::BaseModel # @!attribute content # A list of one or many input items to the model, containing different content - # types. + # types. # # @return [Array] required :content, @@ -26,7 +26,7 @@ class ResponseInputMessageItem < OpenAI::Internal::Type::BaseModel # @!attribute [r] status # The status of item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. + # Populated when items are returned via API. # # @return [Symbol, OpenAI::Models::Responses::ResponseInputMessageItem::Status, nil] optional :status, enum: -> { OpenAI::Models::Responses::ResponseInputMessageItem::Status } @@ -74,7 +74,7 @@ module Role end # The status of item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. + # Populated when items are returned via API. # # @see OpenAI::Models::Responses::ResponseInputMessageItem#status module Status diff --git a/lib/openai/models/responses/response_output_message.rb b/lib/openai/models/responses/response_output_message.rb index 272c8ab1..b46f43d3 100644 --- a/lib/openai/models/responses/response_output_message.rb +++ b/lib/openai/models/responses/response_output_message.rb @@ -25,7 +25,7 @@ class ResponseOutputMessage < OpenAI::Internal::Type::BaseModel # @!attribute status # The status of the message input. One of `in_progress`, `completed`, or - # `incomplete`. Populated when input items are returned via API. + # `incomplete`. Populated when input items are returned via API. # # @return [Symbol, OpenAI::Models::Responses::ResponseOutputMessage::Status] required :status, enum: -> { OpenAI::Models::Responses::ResponseOutputMessage::Status } @@ -67,7 +67,7 @@ module Content end # The status of the message input. One of `in_progress`, `completed`, or - # `incomplete`. Populated when input items are returned via API. + # `incomplete`. Populated when input items are returned via API. # # @see OpenAI::Models::Responses::ResponseOutputMessage#status module Status diff --git a/lib/openai/models/responses/response_reasoning_item.rb b/lib/openai/models/responses/response_reasoning_item.rb index fa4d428c..3ae31538 100644 --- a/lib/openai/models/responses/response_reasoning_item.rb +++ b/lib/openai/models/responses/response_reasoning_item.rb @@ -25,7 +25,7 @@ class ResponseReasoningItem < OpenAI::Internal::Type::BaseModel # @!attribute [r] status # The status of the item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. + # Populated when items are returned via API. # # @return [Symbol, OpenAI::Models::Responses::ResponseReasoningItem::Status, nil] optional :status, enum: -> { OpenAI::Models::Responses::ResponseReasoningItem::Status } @@ -36,7 +36,7 @@ class ResponseReasoningItem < OpenAI::Internal::Type::BaseModel # @!parse # # A description of the chain of thought used by a reasoning model while generating - # # a response. + # # a response. # # # # @param id [String] # # @param summary [Array] @@ -70,7 +70,7 @@ class Summary < OpenAI::Internal::Type::BaseModel end # The status of the item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. + # Populated when items are returned via API. # # @see OpenAI::Models::Responses::ResponseReasoningItem#status module Status diff --git a/lib/openai/models/responses/response_retrieve_params.rb b/lib/openai/models/responses/response_retrieve_params.rb index eaef643f..1253ccfa 100644 --- a/lib/openai/models/responses/response_retrieve_params.rb +++ b/lib/openai/models/responses/response_retrieve_params.rb @@ -11,7 +11,7 @@ class ResponseRetrieveParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] include # Additional fields to include in the response. See the `include` parameter for - # Response creation above for more information. + # Response creation above for more information. # # @return [Array, nil] optional :include, diff --git a/lib/openai/models/responses/response_status.rb b/lib/openai/models/responses/response_status.rb index 15876663..da96c2e8 100644 --- a/lib/openai/models/responses/response_status.rb +++ b/lib/openai/models/responses/response_status.rb @@ -4,7 +4,7 @@ module OpenAI module Models module Responses # The status of the response generation. One of `completed`, `failed`, - # `in_progress`, or `incomplete`. + # `in_progress`, or `incomplete`. module ResponseStatus extend OpenAI::Internal::Type::Enum diff --git a/lib/openai/models/responses/response_text_config.rb b/lib/openai/models/responses/response_text_config.rb index 65f31b36..7901b8fb 100644 --- a/lib/openai/models/responses/response_text_config.rb +++ b/lib/openai/models/responses/response_text_config.rb @@ -7,17 +7,17 @@ class ResponseTextConfig < OpenAI::Internal::Type::BaseModel # @!attribute [r] format_ # An object specifying the format that the model must output. # - # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which - # ensures the model will match your supplied JSON schema. Learn more in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + # ensures the model will match your supplied JSON schema. Learn more in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # The default format is `{ "type": "text" }` with no additional options. + # The default format is `{ "type": "text" }` with no additional options. # - # **Not recommended for gpt-4o and newer models:** + # **Not recommended for gpt-4o and newer models:** # - # Setting to `{ "type": "json_object" }` enables the older JSON mode, which - # ensures the message the model generates is valid JSON. Using `json_schema` is - # preferred for models that support it. + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. # # @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject, nil] optional :format_, union: -> { OpenAI::Models::Responses::ResponseFormatTextConfig }, api_name: :format @@ -28,10 +28,10 @@ class ResponseTextConfig < OpenAI::Internal::Type::BaseModel # @!parse # # Configuration options for a text response from the model. Can be plain text or - # # structured JSON data. Learn more: + # # structured JSON data. Learn more: # # - # # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - # # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + # # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) # # # # @param format_ [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject] # # diff --git a/lib/openai/models/responses/response_usage.rb b/lib/openai/models/responses/response_usage.rb index 8d6bee6d..2a2ecd8f 100644 --- a/lib/openai/models/responses/response_usage.rb +++ b/lib/openai/models/responses/response_usage.rb @@ -36,7 +36,7 @@ class ResponseUsage < OpenAI::Internal::Type::BaseModel # @!parse # # Represents token usage details including input tokens, output tokens, a - # # breakdown of output tokens, and the total tokens used. + # # breakdown of output tokens, and the total tokens used. # # # # @param input_tokens [Integer] # # @param input_tokens_details [OpenAI::Models::Responses::ResponseUsage::InputTokensDetails] @@ -52,7 +52,7 @@ class ResponseUsage < OpenAI::Internal::Type::BaseModel class InputTokensDetails < OpenAI::Internal::Type::BaseModel # @!attribute cached_tokens # The number of tokens that were retrieved from the cache. - # [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching). + # [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching). # # @return [Integer] required :cached_tokens, Integer diff --git a/lib/openai/models/responses/tool.rb b/lib/openai/models/responses/tool.rb index 9c696f1f..e9be0652 100644 --- a/lib/openai/models/responses/tool.rb +++ b/lib/openai/models/responses/tool.rb @@ -4,8 +4,8 @@ module OpenAI module Models module Responses # A tool that searches for relevant content from uploaded files. Learn more about - # the - # [file search tool](https://platform.openai.com/docs/guides/tools-file-search). + # the + # [file search tool](https://platform.openai.com/docs/guides/tools-file-search). module Tool extend OpenAI::Internal::Type::Union diff --git a/lib/openai/models/responses/tool_choice_options.rb b/lib/openai/models/responses/tool_choice_options.rb index 7b08e8c7..789817e8 100644 --- a/lib/openai/models/responses/tool_choice_options.rb +++ b/lib/openai/models/responses/tool_choice_options.rb @@ -5,12 +5,12 @@ module Models module Responses # Controls which (if any) tool is called by the model. # - # `none` means the model will not call any tool and instead generates a message. + # `none` means the model will not call any tool and instead generates a message. # - # `auto` means the model can pick between generating a message or calling one or - # more tools. + # `auto` means the model can pick between generating a message or calling one or + # more tools. # - # `required` means the model must call one or more tools. + # `required` means the model must call one or more tools. module ToolChoiceOptions extend OpenAI::Internal::Type::Enum diff --git a/lib/openai/models/responses/tool_choice_types.rb b/lib/openai/models/responses/tool_choice_types.rb index fe1d606d..d26c027f 100644 --- a/lib/openai/models/responses/tool_choice_types.rb +++ b/lib/openai/models/responses/tool_choice_types.rb @@ -6,20 +6,20 @@ module Responses class ToolChoiceTypes < OpenAI::Internal::Type::BaseModel # @!attribute type # The type of hosted tool the model should to use. Learn more about - # [built-in tools](https://platform.openai.com/docs/guides/tools). + # [built-in tools](https://platform.openai.com/docs/guides/tools). # - # Allowed values are: + # Allowed values are: # - # - `file_search` - # - `web_search_preview` - # - `computer_use_preview` + # - `file_search` + # - `web_search_preview` + # - `computer_use_preview` # # @return [Symbol, OpenAI::Models::Responses::ToolChoiceTypes::Type] required :type, enum: -> { OpenAI::Models::Responses::ToolChoiceTypes::Type } # @!parse # # Indicates that the model should use a built-in tool to generate a response. - # # [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools). + # # [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools). # # # # @param type [Symbol, OpenAI::Models::Responses::ToolChoiceTypes::Type] # # @@ -28,13 +28,13 @@ class ToolChoiceTypes < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # The type of hosted tool the model should to use. Learn more about - # [built-in tools](https://platform.openai.com/docs/guides/tools). + # [built-in tools](https://platform.openai.com/docs/guides/tools). # - # Allowed values are: + # Allowed values are: # - # - `file_search` - # - `web_search_preview` - # - `computer_use_preview` + # - `file_search` + # - `web_search_preview` + # - `computer_use_preview` # # @see OpenAI::Models::Responses::ToolChoiceTypes#type module Type diff --git a/lib/openai/models/responses/web_search_tool.rb b/lib/openai/models/responses/web_search_tool.rb index 504bcab1..90f84d39 100644 --- a/lib/openai/models/responses/web_search_tool.rb +++ b/lib/openai/models/responses/web_search_tool.rb @@ -7,15 +7,15 @@ class WebSearchTool < OpenAI::Internal::Type::BaseModel # @!attribute type # The type of the web search tool. One of: # - # - `web_search_preview` - # - `web_search_preview_2025_03_11` + # - `web_search_preview` + # - `web_search_preview_2025_03_11` # # @return [Symbol, OpenAI::Models::Responses::WebSearchTool::Type] required :type, enum: -> { OpenAI::Models::Responses::WebSearchTool::Type } # @!attribute [r] search_context_size # High level guidance for the amount of context window space to use for the - # search. One of `low`, `medium`, or `high`. `medium` is the default. + # search. One of `low`, `medium`, or `high`. `medium` is the default. # # @return [Symbol, OpenAI::Models::Responses::WebSearchTool::SearchContextSize, nil] optional :search_context_size, enum: -> { OpenAI::Models::Responses::WebSearchTool::SearchContextSize } @@ -31,8 +31,8 @@ class WebSearchTool < OpenAI::Internal::Type::BaseModel # @!parse # # This tool searches the web for relevant results to use in a response. Learn more - # # about the - # # [web search tool](https://platform.openai.com/docs/guides/tools-web-search). + # # about the + # # [web search tool](https://platform.openai.com/docs/guides/tools-web-search). # # # # @param type [Symbol, OpenAI::Models::Responses::WebSearchTool::Type] # # @param search_context_size [Symbol, OpenAI::Models::Responses::WebSearchTool::SearchContextSize] @@ -44,8 +44,8 @@ class WebSearchTool < OpenAI::Internal::Type::BaseModel # The type of the web search tool. One of: # - # - `web_search_preview` - # - `web_search_preview_2025_03_11` + # - `web_search_preview` + # - `web_search_preview_2025_03_11` # # @see OpenAI::Models::Responses::WebSearchTool#type module Type @@ -62,7 +62,7 @@ module Type end # High level guidance for the amount of context window space to use for the - # search. One of `low`, `medium`, or `high`. `medium` is the default. + # search. One of `low`, `medium`, or `high`. `medium` is the default. # # @see OpenAI::Models::Responses::WebSearchTool#search_context_size module SearchContextSize @@ -99,7 +99,7 @@ class UserLocation < OpenAI::Internal::Type::BaseModel # @!attribute [r] country # The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of - # the user, e.g. `US`. + # the user, e.g. `US`. # # @return [String, nil] optional :country, String @@ -120,7 +120,7 @@ class UserLocation < OpenAI::Internal::Type::BaseModel # @!attribute [r] timezone # The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the - # user, e.g. `America/Los_Angeles`. + # user, e.g. `America/Los_Angeles`. # # @return [String, nil] optional :timezone, String diff --git a/lib/openai/models/static_file_chunking_strategy.rb b/lib/openai/models/static_file_chunking_strategy.rb index 9f466cc0..9bff61c3 100644 --- a/lib/openai/models/static_file_chunking_strategy.rb +++ b/lib/openai/models/static_file_chunking_strategy.rb @@ -6,14 +6,14 @@ class StaticFileChunkingStrategy < OpenAI::Internal::Type::BaseModel # @!attribute chunk_overlap_tokens # The number of tokens that overlap between chunks. The default value is `400`. # - # Note that the overlap must not exceed half of `max_chunk_size_tokens`. + # Note that the overlap must not exceed half of `max_chunk_size_tokens`. # # @return [Integer] required :chunk_overlap_tokens, Integer # @!attribute max_chunk_size_tokens # The maximum number of tokens in each chunk. The default value is `800`. The - # minimum value is `100` and the maximum value is `4096`. + # minimum value is `100` and the maximum value is `4096`. # # @return [Integer] required :max_chunk_size_tokens, Integer diff --git a/lib/openai/models/upload.rb b/lib/openai/models/upload.rb index d0da7a8f..9151584f 100644 --- a/lib/openai/models/upload.rb +++ b/lib/openai/models/upload.rb @@ -42,8 +42,8 @@ class Upload < OpenAI::Internal::Type::BaseModel # @!attribute purpose # The intended purpose of the file. - # [Please refer here](https://platform.openai.com/docs/api-reference/files/object#files/object-purpose) - # for acceptable values. + # [Please refer here](https://platform.openai.com/docs/api-reference/files/object#files/object-purpose) + # for acceptable values. # # @return [String] required :purpose, String diff --git a/lib/openai/models/upload_complete_params.rb b/lib/openai/models/upload_complete_params.rb index 378f2269..3b8eb1b5 100644 --- a/lib/openai/models/upload_complete_params.rb +++ b/lib/openai/models/upload_complete_params.rb @@ -16,7 +16,7 @@ class UploadCompleteParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] md5 # The optional md5 checksum for the file contents to verify if the bytes uploaded - # matches what you expect. + # matches what you expect. # # @return [String, nil] optional :md5, String diff --git a/lib/openai/models/upload_create_params.rb b/lib/openai/models/upload_create_params.rb index 9b199b9d..aa01ef38 100644 --- a/lib/openai/models/upload_create_params.rb +++ b/lib/openai/models/upload_create_params.rb @@ -23,8 +23,8 @@ class UploadCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute mime_type # The MIME type of the file. # - # This must fall within the supported MIME types for your file purpose. See the - # supported MIME types for assistants and vision. + # This must fall within the supported MIME types for your file purpose. See the + # supported MIME types for assistants and vision. # # @return [String] required :mime_type, String @@ -32,8 +32,8 @@ class UploadCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute purpose # The intended purpose of the uploaded file. # - # See the - # [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). + # See the + # [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). # # @return [Symbol, OpenAI::Models::FilePurpose] required :purpose, enum: -> { OpenAI::Models::FilePurpose } diff --git a/lib/openai/models/vector_store.rb b/lib/openai/models/vector_store.rb index 4b214ef5..9abda127 100644 --- a/lib/openai/models/vector_store.rb +++ b/lib/openai/models/vector_store.rb @@ -29,11 +29,11 @@ class VectorStore < OpenAI::Internal::Type::BaseModel # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true @@ -52,8 +52,8 @@ class VectorStore < OpenAI::Internal::Type::BaseModel # @!attribute status # The status of the vector store, which can be either `expired`, `in_progress`, or - # `completed`. A status of `completed` indicates that the vector store is ready - # for use. + # `completed`. A status of `completed` indicates that the vector store is ready + # for use. # # @return [Symbol, OpenAI::Models::VectorStore::Status] required :status, enum: -> { OpenAI::Models::VectorStore::Status } @@ -82,7 +82,7 @@ class VectorStore < OpenAI::Internal::Type::BaseModel # @!parse # # A vector store is a collection of processed files can be used by the - # # `file_search` tool. + # # `file_search` tool. # # # # @param id [String] # # @param created_at [Integer] @@ -160,8 +160,8 @@ class FileCounts < OpenAI::Internal::Type::BaseModel end # The status of the vector store, which can be either `expired`, `in_progress`, or - # `completed`. A status of `completed` indicates that the vector store is ready - # for use. + # `completed`. A status of `completed` indicates that the vector store is ready + # for use. # # @see OpenAI::Models::VectorStore#status module Status @@ -182,7 +182,7 @@ module Status class ExpiresAfter < OpenAI::Internal::Type::BaseModel # @!attribute anchor # Anchor timestamp after which the expiration policy applies. Supported anchors: - # `last_active_at`. + # `last_active_at`. # # @return [Symbol, :last_active_at] required :anchor, const: :last_active_at diff --git a/lib/openai/models/vector_store_create_params.rb b/lib/openai/models/vector_store_create_params.rb index 42739728..8380c18b 100644 --- a/lib/openai/models/vector_store_create_params.rb +++ b/lib/openai/models/vector_store_create_params.rb @@ -10,7 +10,7 @@ class VectorStoreCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] chunking_strategy # The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. Only applicable if `file_ids` is non-empty. + # strategy. Only applicable if `file_ids` is non-empty. # # @return [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam, nil] optional :chunking_strategy, union: -> { OpenAI::Models::FileChunkingStrategyParam } @@ -31,8 +31,8 @@ class VectorStoreCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] file_ids # A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that - # the vector store should use. Useful for tools like `file_search` that can access - # files. + # the vector store should use. Useful for tools like `file_search` that can access + # files. # # @return [Array, nil] optional :file_ids, OpenAI::Internal::Type::ArrayOf[String] @@ -43,11 +43,11 @@ class VectorStoreCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true @@ -87,7 +87,7 @@ class VectorStoreCreateParams < OpenAI::Internal::Type::BaseModel class ExpiresAfter < OpenAI::Internal::Type::BaseModel # @!attribute anchor # Anchor timestamp after which the expiration policy applies. Supported anchors: - # `last_active_at`. + # `last_active_at`. # # @return [Symbol, :last_active_at] required :anchor, const: :last_active_at diff --git a/lib/openai/models/vector_store_list_params.rb b/lib/openai/models/vector_store_list_params.rb index eaea91f5..345b2830 100644 --- a/lib/openai/models/vector_store_list_params.rb +++ b/lib/openai/models/vector_store_list_params.rb @@ -10,9 +10,9 @@ class VectorStoreListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] after # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. # # @return [String, nil] optional :after, String @@ -23,9 +23,9 @@ class VectorStoreListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] before # A cursor for use in pagination. `before` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # starting with obj_foo, your subsequent call can include before=obj_foo in order - # to fetch the previous page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. # # @return [String, nil] optional :before, String @@ -36,7 +36,7 @@ class VectorStoreListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] limit # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. # # @return [Integer, nil] optional :limit, Integer @@ -47,7 +47,7 @@ class VectorStoreListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] order # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. # # @return [Symbol, OpenAI::Models::VectorStoreListParams::Order, nil] optional :order, enum: -> { OpenAI::Models::VectorStoreListParams::Order } @@ -68,7 +68,7 @@ class VectorStoreListParams < OpenAI::Internal::Type::BaseModel # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. module Order extend OpenAI::Internal::Type::Enum diff --git a/lib/openai/models/vector_store_search_params.rb b/lib/openai/models/vector_store_search_params.rb index 6325952c..653db38a 100644 --- a/lib/openai/models/vector_store_search_params.rb +++ b/lib/openai/models/vector_store_search_params.rb @@ -26,7 +26,7 @@ class VectorStoreSearchParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] max_num_results # The maximum number of results to return. This number should be between 1 and 50 - # inclusive. + # inclusive. # # @return [Integer, nil] optional :max_num_results, Integer diff --git a/lib/openai/models/vector_store_search_response.rb b/lib/openai/models/vector_store_search_response.rb index 61475c54..6b076e29 100644 --- a/lib/openai/models/vector_store_search_response.rb +++ b/lib/openai/models/vector_store_search_response.rb @@ -6,10 +6,10 @@ module Models class VectorStoreSearchResponse < OpenAI::Internal::Type::BaseModel # @!attribute attributes # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. Keys are strings with a maximum - # length of 64 characters. Values are strings with a maximum length of 512 - # characters, booleans, or numbers. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. # # @return [Hash{Symbol=>String, Float, Boolean}, nil] required :attributes, diff --git a/lib/openai/models/vector_store_update_params.rb b/lib/openai/models/vector_store_update_params.rb index 46a09e25..145e2808 100644 --- a/lib/openai/models/vector_store_update_params.rb +++ b/lib/openai/models/vector_store_update_params.rb @@ -16,11 +16,11 @@ class VectorStoreUpdateParams < OpenAI::Internal::Type::BaseModel # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true @@ -44,7 +44,7 @@ class VectorStoreUpdateParams < OpenAI::Internal::Type::BaseModel class ExpiresAfter < OpenAI::Internal::Type::BaseModel # @!attribute anchor # Anchor timestamp after which the expiration policy applies. Supported anchors: - # `last_active_at`. + # `last_active_at`. # # @return [Symbol, :last_active_at] required :anchor, const: :last_active_at diff --git a/lib/openai/models/vector_stores/file_batch_create_params.rb b/lib/openai/models/vector_stores/file_batch_create_params.rb index e6dd47c8..31d32b92 100644 --- a/lib/openai/models/vector_stores/file_batch_create_params.rb +++ b/lib/openai/models/vector_stores/file_batch_create_params.rb @@ -11,18 +11,18 @@ class FileBatchCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute file_ids # A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that - # the vector store should use. Useful for tools like `file_search` that can access - # files. + # the vector store should use. Useful for tools like `file_search` that can access + # files. # # @return [Array] required :file_ids, OpenAI::Internal::Type::ArrayOf[String] # @!attribute attributes # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. Keys are strings with a maximum - # length of 64 characters. Values are strings with a maximum length of 512 - # characters, booleans, or numbers. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. # # @return [Hash{Symbol=>String, Float, Boolean}, nil] optional :attributes, @@ -31,7 +31,7 @@ class FileBatchCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] chunking_strategy # The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. Only applicable if `file_ids` is non-empty. + # strategy. Only applicable if `file_ids` is non-empty. # # @return [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam, nil] optional :chunking_strategy, union: -> { OpenAI::Models::FileChunkingStrategyParam } diff --git a/lib/openai/models/vector_stores/file_batch_list_files_params.rb b/lib/openai/models/vector_stores/file_batch_list_files_params.rb index 991ac9cc..0f0bacb3 100644 --- a/lib/openai/models/vector_stores/file_batch_list_files_params.rb +++ b/lib/openai/models/vector_stores/file_batch_list_files_params.rb @@ -16,9 +16,9 @@ class FileBatchListFilesParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] after # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. # # @return [String, nil] optional :after, String @@ -29,9 +29,9 @@ class FileBatchListFilesParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] before # A cursor for use in pagination. `before` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # starting with obj_foo, your subsequent call can include before=obj_foo in order - # to fetch the previous page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. # # @return [String, nil] optional :before, String @@ -52,7 +52,7 @@ class FileBatchListFilesParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] limit # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. # # @return [Integer, nil] optional :limit, Integer @@ -63,7 +63,7 @@ class FileBatchListFilesParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] order # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. # # @return [Symbol, OpenAI::Models::VectorStores::FileBatchListFilesParams::Order, nil] optional :order, enum: -> { OpenAI::Models::VectorStores::FileBatchListFilesParams::Order } @@ -102,7 +102,7 @@ module Filter end # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. module Order extend OpenAI::Internal::Type::Enum diff --git a/lib/openai/models/vector_stores/file_create_params.rb b/lib/openai/models/vector_stores/file_create_params.rb index 37899d8f..6767c7dc 100644 --- a/lib/openai/models/vector_stores/file_create_params.rb +++ b/lib/openai/models/vector_stores/file_create_params.rb @@ -11,18 +11,18 @@ class FileCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute file_id # A [File](https://platform.openai.com/docs/api-reference/files) ID that the - # vector store should use. Useful for tools like `file_search` that can access - # files. + # vector store should use. Useful for tools like `file_search` that can access + # files. # # @return [String] required :file_id, String # @!attribute attributes # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. Keys are strings with a maximum - # length of 64 characters. Values are strings with a maximum length of 512 - # characters, booleans, or numbers. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. # # @return [Hash{Symbol=>String, Float, Boolean}, nil] optional :attributes, @@ -31,7 +31,7 @@ class FileCreateParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] chunking_strategy # The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. Only applicable if `file_ids` is non-empty. + # strategy. Only applicable if `file_ids` is non-empty. # # @return [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam, nil] optional :chunking_strategy, union: -> { OpenAI::Models::FileChunkingStrategyParam } diff --git a/lib/openai/models/vector_stores/file_list_params.rb b/lib/openai/models/vector_stores/file_list_params.rb index 0bcfb9d3..88d3a55c 100644 --- a/lib/openai/models/vector_stores/file_list_params.rb +++ b/lib/openai/models/vector_stores/file_list_params.rb @@ -11,9 +11,9 @@ class FileListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] after # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. # # @return [String, nil] optional :after, String @@ -24,9 +24,9 @@ class FileListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] before # A cursor for use in pagination. `before` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # starting with obj_foo, your subsequent call can include before=obj_foo in order - # to fetch the previous page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. # # @return [String, nil] optional :before, String @@ -47,7 +47,7 @@ class FileListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] limit # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. # # @return [Integer, nil] optional :limit, Integer @@ -58,7 +58,7 @@ class FileListParams < OpenAI::Internal::Type::BaseModel # @!attribute [r] order # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. # # @return [Symbol, OpenAI::Models::VectorStores::FileListParams::Order, nil] optional :order, enum: -> { OpenAI::Models::VectorStores::FileListParams::Order } @@ -96,7 +96,7 @@ module Filter end # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. module Order extend OpenAI::Internal::Type::Enum diff --git a/lib/openai/models/vector_stores/file_update_params.rb b/lib/openai/models/vector_stores/file_update_params.rb index 7f72b454..2e5f4d52 100644 --- a/lib/openai/models/vector_stores/file_update_params.rb +++ b/lib/openai/models/vector_stores/file_update_params.rb @@ -16,10 +16,10 @@ class FileUpdateParams < OpenAI::Internal::Type::BaseModel # @!attribute attributes # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. Keys are strings with a maximum - # length of 64 characters. Values are strings with a maximum length of 512 - # characters, booleans, or numbers. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. # # @return [Hash{Symbol=>String, Float, Boolean}, nil] required :attributes, diff --git a/lib/openai/models/vector_stores/vector_store_file.rb b/lib/openai/models/vector_stores/vector_store_file.rb index 7845ce47..71ba4e7d 100644 --- a/lib/openai/models/vector_stores/vector_store_file.rb +++ b/lib/openai/models/vector_stores/vector_store_file.rb @@ -19,7 +19,7 @@ class VectorStoreFile < OpenAI::Internal::Type::BaseModel # @!attribute last_error # The last error associated with this vector store file. Will be `null` if there - # are no errors. + # are no errors. # # @return [OpenAI::Models::VectorStores::VectorStoreFile::LastError, nil] required :last_error, -> { OpenAI::Models::VectorStores::VectorStoreFile::LastError }, nil?: true @@ -32,34 +32,34 @@ class VectorStoreFile < OpenAI::Internal::Type::BaseModel # @!attribute status # The status of the vector store file, which can be either `in_progress`, - # `completed`, `cancelled`, or `failed`. The status `completed` indicates that the - # vector store file is ready for use. + # `completed`, `cancelled`, or `failed`. The status `completed` indicates that the + # vector store file is ready for use. # # @return [Symbol, OpenAI::Models::VectorStores::VectorStoreFile::Status] required :status, enum: -> { OpenAI::Models::VectorStores::VectorStoreFile::Status } # @!attribute usage_bytes # The total vector store usage in bytes. Note that this may be different from the - # original file size. + # original file size. # # @return [Integer] required :usage_bytes, Integer # @!attribute vector_store_id # The ID of the - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # that the [File](https://platform.openai.com/docs/api-reference/files) is - # attached to. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # that the [File](https://platform.openai.com/docs/api-reference/files) is + # attached to. # # @return [String] required :vector_store_id, String # @!attribute attributes # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. Keys are strings with a maximum - # length of 64 characters. Values are strings with a maximum length of 512 - # characters, booleans, or numbers. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. # # @return [Hash{Symbol=>String, Float, Boolean}, nil] optional :attributes, @@ -122,7 +122,7 @@ class LastError < OpenAI::Internal::Type::BaseModel # @!parse # # The last error associated with this vector store file. Will be `null` if there - # # are no errors. + # # are no errors. # # # # @param code [Symbol, OpenAI::Models::VectorStores::VectorStoreFile::LastError::Code] # # @param message [String] @@ -150,8 +150,8 @@ module Code end # The status of the vector store file, which can be either `in_progress`, - # `completed`, `cancelled`, or `failed`. The status `completed` indicates that the - # vector store file is ready for use. + # `completed`, `cancelled`, or `failed`. The status `completed` indicates that the + # vector store file is ready for use. # # @see OpenAI::Models::VectorStores::VectorStoreFile#status module Status diff --git a/lib/openai/models/vector_stores/vector_store_file_batch.rb b/lib/openai/models/vector_stores/vector_store_file_batch.rb index ebcd7390..55e8644b 100644 --- a/lib/openai/models/vector_stores/vector_store_file_batch.rb +++ b/lib/openai/models/vector_stores/vector_store_file_batch.rb @@ -13,7 +13,7 @@ class VectorStoreFileBatch < OpenAI::Internal::Type::BaseModel # @!attribute created_at # The Unix timestamp (in seconds) for when the vector store files batch was - # created. + # created. # # @return [Integer] required :created_at, Integer @@ -31,16 +31,16 @@ class VectorStoreFileBatch < OpenAI::Internal::Type::BaseModel # @!attribute status # The status of the vector store files batch, which can be either `in_progress`, - # `completed`, `cancelled` or `failed`. + # `completed`, `cancelled` or `failed`. # # @return [Symbol, OpenAI::Models::VectorStores::VectorStoreFileBatch::Status] required :status, enum: -> { OpenAI::Models::VectorStores::VectorStoreFileBatch::Status } # @!attribute vector_store_id # The ID of the - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # that the [File](https://platform.openai.com/docs/api-reference/files) is - # attached to. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # that the [File](https://platform.openai.com/docs/api-reference/files) is + # attached to. # # @return [String] required :vector_store_id, String @@ -104,7 +104,7 @@ class FileCounts < OpenAI::Internal::Type::BaseModel end # The status of the vector store files batch, which can be either `in_progress`, - # `completed`, `cancelled` or `failed`. + # `completed`, `cancelled` or `failed`. # # @see OpenAI::Models::VectorStores::VectorStoreFileBatch#status module Status diff --git a/lib/openai/request_options.rb b/lib/openai/request_options.rb index 5f374fb2..f4eaf933 100644 --- a/lib/openai/request_options.rb +++ b/lib/openai/request_options.rb @@ -2,10 +2,10 @@ module OpenAI # Specify HTTP behaviour to use for a specific request. These options supplement - # or override those provided at the client level. + # or override those provided at the client level. # - # When making a request, you can pass an actual {RequestOptions} instance, or - # simply pass a Hash with symbol keys matching the attributes on this class. + # When making a request, you can pass an actual {RequestOptions} instance, or + # simply pass a Hash with symbol keys matching the attributes on this class. class RequestOptions < OpenAI::Internal::Type::BaseModel # @api private # @@ -27,28 +27,28 @@ def self.validate!(opts) # @!attribute idempotency_key # Idempotency key to send with request and all associated retries. Will only be - # sent for write requests. + # sent for write requests. # # @return [String, nil] optional :idempotency_key, String # @!attribute extra_query # Extra query params to send with the request. These are `.merge`’d into any - # `query` given at the client level. + # `query` given at the client level. # # @return [Hash{String=>Array, String, nil}, nil] optional :extra_query, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::ArrayOf[String]] # @!attribute extra_headers # Extra headers to send with the request. These are `.merged`’d into any - # `extra_headers` given at the client level. + # `extra_headers` given at the client level. # # @return [Hash{String=>String, nil}, nil] optional :extra_headers, OpenAI::Internal::Type::HashOf[String, nil?: true] # @!attribute extra_body # Extra data to send with the request. These are deep merged into any data - # generated as part of the normal request. + # generated as part of the normal request. # # @return [Object, nil] optional :extra_body, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] diff --git a/lib/openai/resources/audio/transcriptions.rb b/lib/openai/resources/audio/transcriptions.rb index 2a669ca4..647f80c8 100644 --- a/lib/openai/resources/audio/transcriptions.rb +++ b/lib/openai/resources/audio/transcriptions.rb @@ -5,9 +5,9 @@ module Resources class Audio class Transcriptions # See {OpenAI::Resources::Audio::Transcriptions#create_streaming} for streaming - # counterpart. + # counterpart. # - # Transcribes audio into the input language. + # Transcribes audio into the input language. # # @overload create(file:, model:, include: nil, language: nil, prompt: nil, response_format: nil, temperature: nil, timestamp_granularities: nil, request_options: {}) # @@ -41,9 +41,9 @@ def create(params) end # See {OpenAI::Resources::Audio::Transcriptions#create} for non-streaming - # counterpart. + # counterpart. # - # Transcribes audio into the input language. + # Transcribes audio into the input language. # # @overload create_streaming(file:, model:, include: nil, language: nil, prompt: nil, response_format: nil, temperature: nil, timestamp_granularities: nil, request_options: {}) # diff --git a/lib/openai/resources/batches.rb b/lib/openai/resources/batches.rb index fdd3806a..84f5ad93 100644 --- a/lib/openai/resources/batches.rb +++ b/lib/openai/resources/batches.rb @@ -70,8 +70,8 @@ def list(params = {}) end # Cancels an in-progress batch. The batch will be in status `cancelling` for up to - # 10 minutes, before changing to `cancelled`, where it will have partial results - # (if any) available in the output file. + # 10 minutes, before changing to `cancelled`, where it will have partial results + # (if any) available in the output file. # # @overload cancel(batch_id, request_options: {}) # diff --git a/lib/openai/resources/beta/threads.rb b/lib/openai/resources/beta/threads.rb index 3eea6fe8..b72ad867 100644 --- a/lib/openai/resources/beta/threads.rb +++ b/lib/openai/resources/beta/threads.rb @@ -96,7 +96,7 @@ def delete(thread_id, params = {}) # See {OpenAI::Resources::Beta::Threads#stream_raw} for streaming counterpart. # - # Create a thread and run it in one request. + # Create a thread and run it in one request. # # @overload create_and_run(assistant_id:, instructions: nil, max_completion_tokens: nil, max_prompt_tokens: nil, metadata: nil, model: nil, parallel_tool_calls: nil, response_format: nil, temperature: nil, thread: nil, tool_choice: nil, tool_resources: nil, tools: nil, top_p: nil, truncation_strategy: nil, request_options: {}) # @@ -136,9 +136,9 @@ def create_and_run(params) end # See {OpenAI::Resources::Beta::Threads#create_and_run} for non-streaming - # counterpart. + # counterpart. # - # Create a thread and run it in one request. + # Create a thread and run it in one request. # # @overload stream_raw(assistant_id:, instructions: nil, max_completion_tokens: nil, max_prompt_tokens: nil, metadata: nil, model: nil, parallel_tool_calls: nil, response_format: nil, temperature: nil, thread: nil, tool_choice: nil, tool_resources: nil, tools: nil, top_p: nil, truncation_strategy: nil, request_options: {}) # diff --git a/lib/openai/resources/beta/threads/runs.rb b/lib/openai/resources/beta/threads/runs.rb index cd12bbcc..8ebefd9c 100644 --- a/lib/openai/resources/beta/threads/runs.rb +++ b/lib/openai/resources/beta/threads/runs.rb @@ -9,9 +9,9 @@ class Runs attr_reader :steps # See {OpenAI::Resources::Beta::Threads::Runs#create_stream_raw} for streaming - # counterpart. + # counterpart. # - # Create a run. + # Create a run. # # @overload create(thread_id, assistant_id:, include: nil, additional_instructions: nil, additional_messages: nil, instructions: nil, max_completion_tokens: nil, max_prompt_tokens: nil, metadata: nil, model: nil, parallel_tool_calls: nil, reasoning_effort: nil, response_format: nil, temperature: nil, tool_choice: nil, tools: nil, top_p: nil, truncation_strategy: nil, request_options: {}) # @@ -56,9 +56,9 @@ def create(thread_id, params) end # See {OpenAI::Resources::Beta::Threads::Runs#create} for non-streaming - # counterpart. + # counterpart. # - # Create a run. + # Create a run. # # @overload create_stream_raw(thread_id, assistant_id:, include: nil, additional_instructions: nil, additional_messages: nil, instructions: nil, max_completion_tokens: nil, max_prompt_tokens: nil, metadata: nil, model: nil, parallel_tool_calls: nil, reasoning_effort: nil, response_format: nil, temperature: nil, tool_choice: nil, tools: nil, top_p: nil, truncation_strategy: nil, request_options: {}) # @@ -209,12 +209,12 @@ def cancel(run_id, params) end # See {OpenAI::Resources::Beta::Threads::Runs#submit_tool_outputs_stream_raw} for - # streaming counterpart. + # streaming counterpart. # - # When a run has the `status: "requires_action"` and `required_action.type` is - # `submit_tool_outputs`, this endpoint can be used to submit the outputs from the - # tool calls once they're all completed. All outputs must be submitted in a single - # request. + # When a run has the `status: "requires_action"` and `required_action.type` is + # `submit_tool_outputs`, this endpoint can be used to submit the outputs from the + # tool calls once they're all completed. All outputs must be submitted in a single + # request. # # @overload submit_tool_outputs(run_id, thread_id:, tool_outputs:, request_options: {}) # @@ -246,12 +246,12 @@ def submit_tool_outputs(run_id, params) end # See {OpenAI::Resources::Beta::Threads::Runs#submit_tool_outputs} for - # non-streaming counterpart. + # non-streaming counterpart. # - # When a run has the `status: "requires_action"` and `required_action.type` is - # `submit_tool_outputs`, this endpoint can be used to submit the outputs from the - # tool calls once they're all completed. All outputs must be submitted in a single - # request. + # When a run has the `status: "requires_action"` and `required_action.type` is + # `submit_tool_outputs`, this endpoint can be used to submit the outputs from the + # tool calls once they're all completed. All outputs must be submitted in a single + # request. # # @overload submit_tool_outputs_stream_raw(run_id, thread_id:, tool_outputs:, request_options: {}) # diff --git a/lib/openai/resources/chat/completions.rb b/lib/openai/resources/chat/completions.rb index 8d816acc..499c9f52 100644 --- a/lib/openai/resources/chat/completions.rb +++ b/lib/openai/resources/chat/completions.rb @@ -9,23 +9,23 @@ class Completions # See {OpenAI::Resources::Chat::Completions#stream_raw} for streaming counterpart. # - # **Starting a new project?** We recommend trying - # [Responses](https://platform.openai.com/docs/api-reference/responses) to take - # advantage of the latest OpenAI platform features. Compare - # [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses). + # **Starting a new project?** We recommend trying + # [Responses](https://platform.openai.com/docs/api-reference/responses) to take + # advantage of the latest OpenAI platform features. Compare + # [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses). # - # --- + # --- # - # Creates a model response for the given chat conversation. Learn more in the - # [text generation](https://platform.openai.com/docs/guides/text-generation), - # [vision](https://platform.openai.com/docs/guides/vision), and - # [audio](https://platform.openai.com/docs/guides/audio) guides. + # Creates a model response for the given chat conversation. Learn more in the + # [text generation](https://platform.openai.com/docs/guides/text-generation), + # [vision](https://platform.openai.com/docs/guides/vision), and + # [audio](https://platform.openai.com/docs/guides/audio) guides. # - # Parameter support can differ depending on the model used to generate the - # response, particularly for newer reasoning models. Parameters that are only - # supported for reasoning models are noted below. For the current state of - # unsupported parameters in reasoning models, - # [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). + # Parameter support can differ depending on the model used to generate the + # response, particularly for newer reasoning models. Parameters that are only + # supported for reasoning models are noted below. For the current state of + # unsupported parameters in reasoning models, + # [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). # # @overload create(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, reasoning_effort: nil, response_format: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, web_search_options: nil, request_options: {}) # @@ -81,23 +81,23 @@ def create(params) # See {OpenAI::Resources::Chat::Completions#create} for non-streaming counterpart. # - # **Starting a new project?** We recommend trying - # [Responses](https://platform.openai.com/docs/api-reference/responses) to take - # advantage of the latest OpenAI platform features. Compare - # [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses). + # **Starting a new project?** We recommend trying + # [Responses](https://platform.openai.com/docs/api-reference/responses) to take + # advantage of the latest OpenAI platform features. Compare + # [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses). # - # --- + # --- # - # Creates a model response for the given chat conversation. Learn more in the - # [text generation](https://platform.openai.com/docs/guides/text-generation), - # [vision](https://platform.openai.com/docs/guides/vision), and - # [audio](https://platform.openai.com/docs/guides/audio) guides. + # Creates a model response for the given chat conversation. Learn more in the + # [text generation](https://platform.openai.com/docs/guides/text-generation), + # [vision](https://platform.openai.com/docs/guides/vision), and + # [audio](https://platform.openai.com/docs/guides/audio) guides. # - # Parameter support can differ depending on the model used to generate the - # response, particularly for newer reasoning models. Parameters that are only - # supported for reasoning models are noted below. For the current state of - # unsupported parameters in reasoning models, - # [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). + # Parameter support can differ depending on the model used to generate the + # response, particularly for newer reasoning models. Parameters that are only + # supported for reasoning models are noted below. For the current state of + # unsupported parameters in reasoning models, + # [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). # # @overload stream_raw(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, reasoning_effort: nil, response_format: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, web_search_options: nil, request_options: {}) # @@ -155,7 +155,7 @@ def stream_raw(params) end # Get a stored chat completion. Only Chat Completions that have been created with - # the `store` parameter set to `true` will be returned. + # the `store` parameter set to `true` will be returned. # # @overload retrieve(completion_id, request_options: {}) # @@ -175,8 +175,8 @@ def retrieve(completion_id, params = {}) end # Modify a stored chat completion. Only Chat Completions that have been created - # with the `store` parameter set to `true` can be modified. Currently, the only - # supported modification is to update the `metadata` field. + # with the `store` parameter set to `true` can be modified. Currently, the only + # supported modification is to update the `metadata` field. # # @overload update(completion_id, metadata:, request_options: {}) # @@ -199,7 +199,7 @@ def update(completion_id, params) end # List stored Chat Completions. Only Chat Completions that have been stored with - # the `store` parameter set to `true` will be returned. + # the `store` parameter set to `true` will be returned. # # @overload list(after: nil, limit: nil, metadata: nil, model: nil, order: nil, request_options: {}) # @@ -226,7 +226,7 @@ def list(params = {}) end # Delete a stored chat completion. Only Chat Completions that have been created - # with the `store` parameter set to `true` can be deleted. + # with the `store` parameter set to `true` can be deleted. # # @overload delete(completion_id, request_options: {}) # diff --git a/lib/openai/resources/chat/completions/messages.rb b/lib/openai/resources/chat/completions/messages.rb index 489df23b..ab3a3e19 100644 --- a/lib/openai/resources/chat/completions/messages.rb +++ b/lib/openai/resources/chat/completions/messages.rb @@ -6,7 +6,7 @@ class Chat class Completions class Messages # Get the messages in a stored chat completion. Only Chat Completions that have - # been created with the `store` parameter set to `true` will be returned. + # been created with the `store` parameter set to `true` will be returned. # # @overload list(completion_id, after: nil, limit: nil, order: nil, request_options: {}) # diff --git a/lib/openai/resources/completions.rb b/lib/openai/resources/completions.rb index 3440bd65..859c6b7f 100644 --- a/lib/openai/resources/completions.rb +++ b/lib/openai/resources/completions.rb @@ -5,7 +5,7 @@ module Resources class Completions # See {OpenAI::Resources::Completions#create_streaming} for streaming counterpart. # - # Creates a completion for the provided prompt and parameters. + # Creates a completion for the provided prompt and parameters. # # @overload create(model:, prompt:, best_of: nil, echo: nil, frequency_penalty: nil, logit_bias: nil, logprobs: nil, max_tokens: nil, n: nil, presence_penalty: nil, seed: nil, stop: nil, stream_options: nil, suffix: nil, temperature: nil, top_p: nil, user: nil, request_options: {}) # @@ -48,7 +48,7 @@ def create(params) # See {OpenAI::Resources::Completions#create} for non-streaming counterpart. # - # Creates a completion for the provided prompt and parameters. + # Creates a completion for the provided prompt and parameters. # # @overload create_streaming(model:, prompt:, best_of: nil, echo: nil, frequency_penalty: nil, logit_bias: nil, logprobs: nil, max_tokens: nil, n: nil, presence_penalty: nil, seed: nil, stop: nil, stream_options: nil, suffix: nil, temperature: nil, top_p: nil, user: nil, request_options: {}) # diff --git a/lib/openai/resources/files.rb b/lib/openai/resources/files.rb index 2c1b995e..3dc1bc0e 100644 --- a/lib/openai/resources/files.rb +++ b/lib/openai/resources/files.rb @@ -4,26 +4,26 @@ module OpenAI module Resources class Files # Upload a file that can be used across various endpoints. Individual files can be - # up to 512 MB, and the size of all files uploaded by one organization can be up - # to 100 GB. - # - # The Assistants API supports files up to 2 million tokens and of specific file - # types. See the - # [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for - # details. - # - # The Fine-tuning API only supports `.jsonl` files. The input also has certain - # required formats for fine-tuning - # [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or - # [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) - # models. - # - # The Batch API only supports `.jsonl` files up to 200 MB in size. The input also - # has a specific required - # [format](https://platform.openai.com/docs/api-reference/batch/request-input). - # - # Please [contact us](https://help.openai.com/) if you need to increase these - # storage limits. + # up to 512 MB, and the size of all files uploaded by one organization can be up + # to 100 GB. + # + # The Assistants API supports files up to 2 million tokens and of specific file + # types. See the + # [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for + # details. + # + # The Fine-tuning API only supports `.jsonl` files. The input also has certain + # required formats for fine-tuning + # [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + # [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + # models. + # + # The Batch API only supports `.jsonl` files up to 200 MB in size. The input also + # has a specific required + # [format](https://platform.openai.com/docs/api-reference/batch/request-input). + # + # Please [contact us](https://help.openai.com/) if you need to increase these + # storage limits. # # @overload create(file:, purpose:, request_options: {}) # diff --git a/lib/openai/resources/fine_tuning/jobs.rb b/lib/openai/resources/fine_tuning/jobs.rb index 8032932a..da5b8f03 100644 --- a/lib/openai/resources/fine_tuning/jobs.rb +++ b/lib/openai/resources/fine_tuning/jobs.rb @@ -8,12 +8,12 @@ class Jobs attr_reader :checkpoints # Creates a fine-tuning job which begins the process of creating a new model from - # a given dataset. + # a given dataset. # - # Response includes details of the enqueued job including job status and the name - # of the fine-tuned models once complete. + # Response includes details of the enqueued job including job status and the name + # of the fine-tuned models once complete. # - # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) + # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) # # @overload create(model:, training_file:, hyperparameters: nil, integrations: nil, metadata: nil, method_: nil, seed: nil, suffix: nil, validation_file: nil, request_options: {}) # @@ -44,7 +44,7 @@ def create(params) # Get info about a fine-tuning job. # - # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) + # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) # # @overload retrieve(fine_tuning_job_id, request_options: {}) # diff --git a/lib/openai/resources/models.rb b/lib/openai/resources/models.rb index 3ab368d3..b77422d4 100644 --- a/lib/openai/resources/models.rb +++ b/lib/openai/resources/models.rb @@ -4,7 +4,7 @@ module OpenAI module Resources class Models # Retrieves a model instance, providing basic information about the model such as - # the owner and permissioning. + # the owner and permissioning. # # @overload retrieve(model, request_options: {}) # @@ -24,7 +24,7 @@ def retrieve(model, params = {}) end # Lists the currently available models, and provides basic information about each - # one such as the owner and availability. + # one such as the owner and availability. # # @overload list(request_options: {}) # @@ -44,7 +44,7 @@ def list(params = {}) end # Delete a fine-tuned model. You must have the Owner role in your organization to - # delete a model. + # delete a model. # # @overload delete(model, request_options: {}) # diff --git a/lib/openai/resources/moderations.rb b/lib/openai/resources/moderations.rb index 802ccbd1..6df4561d 100644 --- a/lib/openai/resources/moderations.rb +++ b/lib/openai/resources/moderations.rb @@ -4,7 +4,7 @@ module OpenAI module Resources class Moderations # Classifies if text and/or image inputs are potentially harmful. Learn more in - # the [moderation guide](https://platform.openai.com/docs/guides/moderation). + # the [moderation guide](https://platform.openai.com/docs/guides/moderation). # # @overload create(input:, model: nil, request_options: {}) # diff --git a/lib/openai/resources/responses.rb b/lib/openai/resources/responses.rb index a4a9ff87..e77901b4 100644 --- a/lib/openai/resources/responses.rb +++ b/lib/openai/resources/responses.rb @@ -8,17 +8,17 @@ class Responses # See {OpenAI::Resources::Responses#stream_raw} for streaming counterpart. # - # Creates a model response. Provide - # [text](https://platform.openai.com/docs/guides/text) or - # [image](https://platform.openai.com/docs/guides/images) inputs to generate - # [text](https://platform.openai.com/docs/guides/text) or - # [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have - # the model call your own - # [custom code](https://platform.openai.com/docs/guides/function-calling) or use - # built-in [tools](https://platform.openai.com/docs/guides/tools) like - # [web search](https://platform.openai.com/docs/guides/tools-web-search) or - # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use - # your own data as input for the model's response. + # Creates a model response. Provide + # [text](https://platform.openai.com/docs/guides/text) or + # [image](https://platform.openai.com/docs/guides/images) inputs to generate + # [text](https://platform.openai.com/docs/guides/text) or + # [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have + # the model call your own + # [custom code](https://platform.openai.com/docs/guides/function-calling) or use + # built-in [tools](https://platform.openai.com/docs/guides/tools) like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use + # your own data as input for the model's response. # # @overload create(input:, model:, include: nil, instructions: nil, max_output_tokens: nil, metadata: nil, parallel_tool_calls: nil, previous_response_id: nil, reasoning: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_p: nil, truncation: nil, user: nil, request_options: {}) # @@ -61,17 +61,17 @@ def create(params) # See {OpenAI::Resources::Responses#create} for non-streaming counterpart. # - # Creates a model response. Provide - # [text](https://platform.openai.com/docs/guides/text) or - # [image](https://platform.openai.com/docs/guides/images) inputs to generate - # [text](https://platform.openai.com/docs/guides/text) or - # [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have - # the model call your own - # [custom code](https://platform.openai.com/docs/guides/function-calling) or use - # built-in [tools](https://platform.openai.com/docs/guides/tools) like - # [web search](https://platform.openai.com/docs/guides/tools-web-search) or - # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use - # your own data as input for the model's response. + # Creates a model response. Provide + # [text](https://platform.openai.com/docs/guides/text) or + # [image](https://platform.openai.com/docs/guides/images) inputs to generate + # [text](https://platform.openai.com/docs/guides/text) or + # [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have + # the model call your own + # [custom code](https://platform.openai.com/docs/guides/function-calling) or use + # built-in [tools](https://platform.openai.com/docs/guides/tools) like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use + # your own data as input for the model's response. # # @overload stream_raw(input:, model:, include: nil, instructions: nil, max_output_tokens: nil, metadata: nil, parallel_tool_calls: nil, previous_response_id: nil, reasoning: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_p: nil, truncation: nil, user: nil, request_options: {}) # diff --git a/lib/openai/resources/uploads.rb b/lib/openai/resources/uploads.rb index 1f4fc2bf..1c497d22 100644 --- a/lib/openai/resources/uploads.rb +++ b/lib/openai/resources/uploads.rb @@ -7,24 +7,24 @@ class Uploads attr_reader :parts # Creates an intermediate - # [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object - # that you can add - # [Parts](https://platform.openai.com/docs/api-reference/uploads/part-object) to. - # Currently, an Upload can accept at most 8 GB in total and expires after an hour - # after you create it. + # [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object + # that you can add + # [Parts](https://platform.openai.com/docs/api-reference/uploads/part-object) to. + # Currently, an Upload can accept at most 8 GB in total and expires after an hour + # after you create it. # - # Once you complete the Upload, we will create a - # [File](https://platform.openai.com/docs/api-reference/files/object) object that - # contains all the parts you uploaded. This File is usable in the rest of our - # platform as a regular File object. + # Once you complete the Upload, we will create a + # [File](https://platform.openai.com/docs/api-reference/files/object) object that + # contains all the parts you uploaded. This File is usable in the rest of our + # platform as a regular File object. # - # For certain `purpose` values, the correct `mime_type` must be specified. Please - # refer to documentation for the - # [supported MIME types for your use case](https://platform.openai.com/docs/assistants/tools/file-search#supported-files). + # For certain `purpose` values, the correct `mime_type` must be specified. Please + # refer to documentation for the + # [supported MIME types for your use case](https://platform.openai.com/docs/assistants/tools/file-search#supported-files). # - # For guidance on the proper filename extensions for each purpose, please follow - # the documentation on - # [creating a File](https://platform.openai.com/docs/api-reference/files/create). + # For guidance on the proper filename extensions for each purpose, please follow + # the documentation on + # [creating a File](https://platform.openai.com/docs/api-reference/files/create). # # @overload create(bytes:, filename:, mime_type:, purpose:, request_options: {}) # @@ -68,18 +68,18 @@ def cancel(upload_id, params = {}) end # Completes the - # [Upload](https://platform.openai.com/docs/api-reference/uploads/object). + # [Upload](https://platform.openai.com/docs/api-reference/uploads/object). # - # Within the returned Upload object, there is a nested - # [File](https://platform.openai.com/docs/api-reference/files/object) object that - # is ready to use in the rest of the platform. + # Within the returned Upload object, there is a nested + # [File](https://platform.openai.com/docs/api-reference/files/object) object that + # is ready to use in the rest of the platform. # - # You can specify the order of the Parts by passing in an ordered list of the Part - # IDs. + # You can specify the order of the Parts by passing in an ordered list of the Part + # IDs. # - # The number of bytes uploaded upon completion must match the number of bytes - # initially specified when creating the Upload object. No Parts may be added after - # an Upload is completed. + # The number of bytes uploaded upon completion must match the number of bytes + # initially specified when creating the Upload object. No Parts may be added after + # an Upload is completed. # # @overload complete(upload_id, part_ids:, md5: nil, request_options: {}) # diff --git a/lib/openai/resources/uploads/parts.rb b/lib/openai/resources/uploads/parts.rb index 11eaeb11..343a20e3 100644 --- a/lib/openai/resources/uploads/parts.rb +++ b/lib/openai/resources/uploads/parts.rb @@ -5,16 +5,16 @@ module Resources class Uploads class Parts # Adds a - # [Part](https://platform.openai.com/docs/api-reference/uploads/part-object) to an - # [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object. - # A Part represents a chunk of bytes from the file you are trying to upload. + # [Part](https://platform.openai.com/docs/api-reference/uploads/part-object) to an + # [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object. + # A Part represents a chunk of bytes from the file you are trying to upload. # - # Each Part can be at most 64 MB, and you can add Parts until you hit the Upload - # maximum of 8 GB. + # Each Part can be at most 64 MB, and you can add Parts until you hit the Upload + # maximum of 8 GB. # - # It is possible to add multiple Parts in parallel. You can decide the intended - # order of the Parts when you - # [complete the Upload](https://platform.openai.com/docs/api-reference/uploads/complete). + # It is possible to add multiple Parts in parallel. You can decide the intended + # order of the Parts when you + # [complete the Upload](https://platform.openai.com/docs/api-reference/uploads/complete). # # @overload create(upload_id, data:, request_options: {}) # diff --git a/lib/openai/resources/vector_stores.rb b/lib/openai/resources/vector_stores.rb index 1fc69777..3547ae53 100644 --- a/lib/openai/resources/vector_stores.rb +++ b/lib/openai/resources/vector_stores.rb @@ -122,7 +122,7 @@ def delete(vector_store_id, params = {}) end # Search a vector store for relevant chunks based on a query and file attributes - # filter. + # filter. # # @overload search(vector_store_id, query:, filters: nil, max_num_results: nil, ranking_options: nil, rewrite_query: nil, request_options: {}) # diff --git a/lib/openai/resources/vector_stores/file_batches.rb b/lib/openai/resources/vector_stores/file_batches.rb index 5772d514..71268646 100644 --- a/lib/openai/resources/vector_stores/file_batches.rb +++ b/lib/openai/resources/vector_stores/file_batches.rb @@ -54,7 +54,7 @@ def retrieve(batch_id, params) end # Cancel a vector store file batch. This attempts to cancel the processing of - # files in this batch as soon as possible. + # files in this batch as soon as possible. # # @overload cancel(batch_id, vector_store_id:, request_options: {}) # diff --git a/lib/openai/resources/vector_stores/files.rb b/lib/openai/resources/vector_stores/files.rb index 0c044e34..9f6900e7 100644 --- a/lib/openai/resources/vector_stores/files.rb +++ b/lib/openai/resources/vector_stores/files.rb @@ -5,8 +5,8 @@ module Resources class VectorStores class Files # Create a vector store file by attaching a - # [File](https://platform.openai.com/docs/api-reference/files) to a - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object). + # [File](https://platform.openai.com/docs/api-reference/files) to a + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object). # # @overload create(vector_store_id, file_id:, attributes: nil, chunking_strategy: nil, request_options: {}) # @@ -110,9 +110,9 @@ def list(vector_store_id, params = {}) end # Delete a vector store file. This will remove the file from the vector store but - # the file itself will not be deleted. To delete the file, use the - # [delete file](https://platform.openai.com/docs/api-reference/files/delete) - # endpoint. + # the file itself will not be deleted. To delete the file, use the + # [delete file](https://platform.openai.com/docs/api-reference/files/delete) + # endpoint. # # @overload delete(file_id, vector_store_id:, request_options: {}) # diff --git a/rbi/lib/openai/internal.rbi b/rbi/lib/openai/internal.rbi index 42138966..76548034 100644 --- a/rbi/lib/openai/internal.rbi +++ b/rbi/lib/openai/internal.rbi @@ -4,7 +4,7 @@ module OpenAI # @api private module Internal # Due to the current WIP status of Shapes support in Sorbet, types referencing - # this alias might be refined in the future. + # this alias might be refined in the future. AnyHash = T.type_alias { T::Hash[Symbol, T.anything] } OMIT = T.let(T.anything, T.anything) diff --git a/rbi/lib/openai/internal/transport/base_client.rbi b/rbi/lib/openai/internal/transport/base_client.rbi index b8b35744..5653189b 100644 --- a/rbi/lib/openai/internal/transport/base_client.rbi +++ b/rbi/lib/openai/internal/transport/base_client.rbi @@ -154,7 +154,7 @@ module OpenAI private def send_request(request, redirect_count:, retry_count:, send_retry_header:); end # Execute the request specified by `req`. This is the method that all resource - # methods call into. + # methods call into. # # @overload request(method, path, query: {}, headers: {}, body: nil, unwrap: nil, page: nil, stream: nil, model: OpenAI::Internal::Type::Unknown, options: {}) sig do diff --git a/rbi/lib/openai/internal/transport/pooled_net_requester.rbi b/rbi/lib/openai/internal/transport/pooled_net_requester.rbi index 35b91830..fb9572da 100644 --- a/rbi/lib/openai/internal/transport/pooled_net_requester.rbi +++ b/rbi/lib/openai/internal/transport/pooled_net_requester.rbi @@ -17,7 +17,7 @@ module OpenAI end # from the golang stdlib - # https://github.com/golang/go/blob/c8eced8580028328fde7c03cbfcb720ce15b2358/src/net/http/transport.go#L49 + # https://github.com/golang/go/blob/c8eced8580028328fde7c03cbfcb720ce15b2358/src/net/http/transport.go#L49 KEEP_ALIVE_TIMEOUT = 30 class << self diff --git a/rbi/lib/openai/internal/type/base_model.rbi b/rbi/lib/openai/internal/type/base_model.rbi index 8b8c9d16..0b0d262a 100644 --- a/rbi/lib/openai/internal/type/base_model.rbi +++ b/rbi/lib/openai/internal/type/base_model.rbi @@ -16,7 +16,7 @@ module OpenAI # @api private # # Assumes superclass fields are totally defined before fields are accessed / - # defined on subclasses. + # defined on subclasses. sig do returns( T::Hash[ @@ -99,7 +99,7 @@ module OpenAI # @api private # # `request_only` attributes not excluded from `.#coerce` when receiving responses - # even if well behaved servers should not send them + # even if well behaved servers should not send them sig { params(blk: T.proc.void).void } private def request_only(&blk); end @@ -138,33 +138,33 @@ module OpenAI end # Returns the raw value associated with the given key, if found. Otherwise, nil is - # returned. + # returned. # - # It is valid to lookup keys that are not in the API spec, for example to access - # undocumented features. This method does not parse response data into - # higher-level types. Lookup by anything other than a Symbol is an ArgumentError. + # It is valid to lookup keys that are not in the API spec, for example to access + # undocumented features. This method does not parse response data into + # higher-level types. Lookup by anything other than a Symbol is an ArgumentError. sig { params(key: Symbol).returns(T.nilable(T.anything)) } def [](key); end # Returns a Hash of the data underlying this object. O(1) # - # Keys are Symbols and values are the raw values from the response. The return - # value indicates which values were ever set on the object. i.e. there will be a - # key in this hash if they ever were, even if the set value was nil. + # Keys are Symbols and values are the raw values from the response. The return + # value indicates which values were ever set on the object. i.e. there will be a + # key in this hash if they ever were, even if the set value was nil. # - # This method is not recursive. The returned value is shared by the object, so it - # should not be mutated. + # This method is not recursive. The returned value is shared by the object, so it + # should not be mutated. sig { overridable.returns(OpenAI::Internal::AnyHash) } def to_h; end # Returns a Hash of the data underlying this object. O(1) # - # Keys are Symbols and values are the raw values from the response. The return - # value indicates which values were ever set on the object. i.e. there will be a - # key in this hash if they ever were, even if the set value was nil. + # Keys are Symbols and values are the raw values from the response. The return + # value indicates which values were ever set on the object. i.e. there will be a + # key in this hash if they ever were, even if the set value was nil. # - # This method is not recursive. The returned value is shared by the object, so it - # should not be mutated. + # This method is not recursive. The returned value is shared by the object, so it + # should not be mutated. sig { overridable.returns(OpenAI::Internal::AnyHash) } def to_hash; end diff --git a/rbi/lib/openai/internal/type/converter.rbi b/rbi/lib/openai/internal/type/converter.rbi index 99bef8c2..cec2e8a2 100644 --- a/rbi/lib/openai/internal/type/converter.rbi +++ b/rbi/lib/openai/internal/type/converter.rbi @@ -51,13 +51,13 @@ module OpenAI # # Based on `target`, transform `value` into `target`, to the extent possible: # - # 1. if the given `value` conforms to `target` already, return the given `value` - # 2. if it's possible and safe to convert the given `value` to `target`, then the - # converted value - # 3. otherwise, the given `value` unaltered + # 1. if the given `value` conforms to `target` already, return the given `value` + # 2. if it's possible and safe to convert the given `value` to `target`, then the + # converted value + # 3. otherwise, the given `value` unaltered # - # The coercion process is subject to improvement between minor release versions. - # See https://docs.pydantic.dev/latest/concepts/unions/#smart-mode + # The coercion process is subject to improvement between minor release versions. + # See https://docs.pydantic.dev/latest/concepts/unions/#smart-mode sig do params( target: OpenAI::Internal::Type::Converter::Input, @@ -70,24 +70,24 @@ module OpenAI target, value, # The `strictness` is one of `true`, `false`, or `:strong`. This informs the - # coercion strategy when we have to decide between multiple possible conversion - # targets: + # coercion strategy when we have to decide between multiple possible conversion + # targets: # - # - `true`: the conversion must be exact, with minimum coercion. - # - `false`: the conversion can be approximate, with some coercion. - # - `:strong`: the conversion must be exact, with no coercion, and raise an error - # if not possible. + # - `true`: the conversion must be exact, with minimum coercion. + # - `false`: the conversion can be approximate, with some coercion. + # - `:strong`: the conversion must be exact, with no coercion, and raise an error + # if not possible. # - # The `exactness` is `Hash` with keys being one of `yes`, `no`, or `maybe`. For - # any given conversion attempt, the exactness will be updated based on how closely - # the value recursively matches the target type: + # The `exactness` is `Hash` with keys being one of `yes`, `no`, or `maybe`. For + # any given conversion attempt, the exactness will be updated based on how closely + # the value recursively matches the target type: # - # - `yes`: the value can be converted to the target type with minimum coercion. - # - `maybe`: the value can be converted to the target type with some reasonable - # coercion. - # - `no`: the value cannot be converted to the target type. + # - `yes`: the value can be converted to the target type with minimum coercion. + # - `maybe`: the value can be converted to the target type with some reasonable + # coercion. + # - `no`: the value cannot be converted to the target type. # - # See implementation below for more details. + # See implementation below for more details. state: {strictness: true, exactness: {yes: 0, no: 0, maybe: 0}, branched: 0} ); end # @api private diff --git a/rbi/lib/openai/internal/type/enum.rbi b/rbi/lib/openai/internal/type/enum.rbi index 9fcc30ee..f74dc677 100644 --- a/rbi/lib/openai/internal/type/enum.rbi +++ b/rbi/lib/openai/internal/type/enum.rbi @@ -6,15 +6,15 @@ module OpenAI # @api private # # A value from among a specified list of options. OpenAPI enum values map to Ruby - # values in the SDK as follows: + # values in the SDK as follows: # - # 1. boolean => true | false - # 2. integer => Integer - # 3. float => Float - # 4. string => Symbol + # 1. boolean => true | false + # 2. integer => Integer + # 3. float => Float + # 4. string => Symbol # - # We can therefore convert string values to Symbols, but can't convert other - # values safely. + # We can therefore convert string values to Symbols, but can't convert other + # values safely. module Enum include OpenAI::Internal::Type::Converter @@ -37,7 +37,7 @@ module OpenAI # @api private # # Unlike with primitives, `Enum` additionally validates that the value is a member - # of the enum. + # of the enum. sig do override .params(value: T.any(String, Symbol, T.anything), state: OpenAI::Internal::Type::Converter::State) diff --git a/rbi/lib/openai/internal/util.rbi b/rbi/lib/openai/internal/util.rbi index 36689b2b..60be3113 100644 --- a/rbi/lib/openai/internal/util.rbi +++ b/rbi/lib/openai/internal/util.rbi @@ -52,7 +52,7 @@ module OpenAI # @api private # # Recursively merge one hash with another. If the values at a given key are not - # both hashes, just take the new value. + # both hashes, just take the new value. sig do params(values: T::Array[T.anything], sentinel: T.nilable(T.anything), concat: T::Boolean) .returns(T.anything) diff --git a/rbi/lib/openai/models/audio/speech_create_params.rbi b/rbi/lib/openai/models/audio/speech_create_params.rbi index 9a810d27..8dd902b4 100644 --- a/rbi/lib/openai/models/audio/speech_create_params.rbi +++ b/rbi/lib/openai/models/audio/speech_create_params.rbi @@ -12,19 +12,19 @@ module OpenAI attr_accessor :input # One of the available [TTS models](https://platform.openai.com/docs/models#tts): - # `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`. + # `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`. sig { returns(T.any(String, OpenAI::Models::Audio::SpeechModel::OrSymbol)) } attr_accessor :model # The voice to use when generating the audio. Supported voices are `alloy`, `ash`, - # `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and - # `verse`. Previews of the voices are available in the - # [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). + # `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and + # `verse`. Previews of the voices are available in the + # [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). sig { returns(T.any(String, OpenAI::Models::Audio::SpeechCreateParams::Voice::OrSymbol)) } attr_accessor :voice # Control the voice of your generated audio with additional instructions. Does not - # work with `tts-1` or `tts-1-hd`. + # work with `tts-1` or `tts-1-hd`. sig { returns(T.nilable(String)) } attr_reader :instructions @@ -32,7 +32,7 @@ module OpenAI attr_writer :instructions # The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, - # `wav`, and `pcm`. + # `wav`, and `pcm`. sig { returns(T.nilable(OpenAI::Models::Audio::SpeechCreateParams::ResponseFormat::OrSymbol)) } attr_reader :response_format @@ -40,7 +40,7 @@ module OpenAI attr_writer :response_format # The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is - # the default. + # the default. sig { returns(T.nilable(Float)) } attr_reader :speed @@ -87,7 +87,7 @@ module OpenAI def to_hash; end # One of the available [TTS models](https://platform.openai.com/docs/models#tts): - # `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`. + # `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`. module Model extend OpenAI::Internal::Type::Union @@ -96,9 +96,9 @@ module OpenAI end # The voice to use when generating the audio. Supported voices are `alloy`, `ash`, - # `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and - # `verse`. Previews of the voices are available in the - # [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). + # `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and + # `verse`. Previews of the voices are available in the + # [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). module Voice extend OpenAI::Internal::Type::Union @@ -123,7 +123,7 @@ module OpenAI end # The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, - # `wav`, and `pcm`. + # `wav`, and `pcm`. module ResponseFormat extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/audio/transcription.rbi b/rbi/lib/openai/models/audio/transcription.rbi index 97b95ef4..ccea9d77 100644 --- a/rbi/lib/openai/models/audio/transcription.rbi +++ b/rbi/lib/openai/models/audio/transcription.rbi @@ -9,8 +9,8 @@ module OpenAI attr_accessor :text # The log probabilities of the tokens in the transcription. Only returned with the - # models `gpt-4o-transcribe` and `gpt-4o-mini-transcribe` if `logprobs` is added - # to the `include` array. + # models `gpt-4o-transcribe` and `gpt-4o-mini-transcribe` if `logprobs` is added + # to the `include` array. sig { returns(T.nilable(T::Array[OpenAI::Models::Audio::Transcription::Logprob])) } attr_reader :logprobs @@ -23,7 +23,7 @@ module OpenAI attr_writer :logprobs # Represents a transcription response returned by model, based on the provided - # input. + # input. sig do params( text: String, diff --git a/rbi/lib/openai/models/audio/transcription_create_params.rbi b/rbi/lib/openai/models/audio/transcription_create_params.rbi index 866e4c50..5511185c 100644 --- a/rbi/lib/openai/models/audio/transcription_create_params.rbi +++ b/rbi/lib/openai/models/audio/transcription_create_params.rbi @@ -8,21 +8,21 @@ module OpenAI include OpenAI::Internal::Type::RequestParameters # The audio file object (not file name) to transcribe, in one of these formats: - # flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + # flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. sig { returns(T.any(IO, StringIO)) } attr_accessor :file # ID of the model to use. The options are `gpt-4o-transcribe`, - # `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source - # Whisper V2 model). + # `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source + # Whisper V2 model). sig { returns(T.any(String, OpenAI::Models::AudioModel::OrSymbol)) } attr_accessor :model # Additional information to include in the transcription response. `logprobs` will - # return the log probabilities of the tokens in the response to understand the - # model's confidence in the transcription. `logprobs` only works with - # response_format set to `json` and only with the models `gpt-4o-transcribe` and - # `gpt-4o-mini-transcribe`. + # return the log probabilities of the tokens in the response to understand the + # model's confidence in the transcription. `logprobs` only works with + # response_format set to `json` and only with the models `gpt-4o-transcribe` and + # `gpt-4o-mini-transcribe`. sig { returns(T.nilable(T::Array[OpenAI::Models::Audio::TranscriptionInclude::OrSymbol])) } attr_reader :include @@ -30,8 +30,8 @@ module OpenAI attr_writer :include # The language of the input audio. Supplying the input language in - # [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) - # format will improve accuracy and latency. + # [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + # format will improve accuracy and latency. sig { returns(T.nilable(String)) } attr_reader :language @@ -39,9 +39,9 @@ module OpenAI attr_writer :language # An optional text to guide the model's style or continue a previous audio - # segment. The - # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) - # should match the audio language. + # segment. The + # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + # should match the audio language. sig { returns(T.nilable(String)) } attr_reader :prompt @@ -49,8 +49,8 @@ module OpenAI attr_writer :prompt # The format of the output, in one of these options: `json`, `text`, `srt`, - # `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, - # the only supported format is `json`. + # `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, + # the only supported format is `json`. sig { returns(T.nilable(OpenAI::Models::AudioResponseFormat::OrSymbol)) } attr_reader :response_format @@ -58,10 +58,10 @@ module OpenAI attr_writer :response_format # The sampling temperature, between 0 and 1. Higher values like 0.8 will make the - # output more random, while lower values like 0.2 will make it more focused and - # deterministic. If set to 0, the model will use - # [log probability](https://en.wikipedia.org/wiki/Log_probability) to - # automatically increase the temperature until certain thresholds are hit. + # output more random, while lower values like 0.2 will make it more focused and + # deterministic. If set to 0, the model will use + # [log probability](https://en.wikipedia.org/wiki/Log_probability) to + # automatically increase the temperature until certain thresholds are hit. sig { returns(T.nilable(Float)) } attr_reader :temperature @@ -69,10 +69,10 @@ module OpenAI attr_writer :temperature # The timestamp granularities to populate for this transcription. - # `response_format` must be set `verbose_json` to use timestamp granularities. - # Either or both of these options are supported: `word`, or `segment`. Note: There - # is no additional latency for segment timestamps, but generating word timestamps - # incurs additional latency. + # `response_format` must be set `verbose_json` to use timestamp granularities. + # Either or both of these options are supported: `word`, or `segment`. Note: There + # is no additional latency for segment timestamps, but generating word timestamps + # incurs additional latency. sig do returns( T.nilable(T::Array[OpenAI::Models::Audio::TranscriptionCreateParams::TimestampGranularity::OrSymbol]) @@ -132,8 +132,8 @@ module OpenAI def to_hash; end # ID of the model to use. The options are `gpt-4o-transcribe`, - # `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source - # Whisper V2 model). + # `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source + # Whisper V2 model). module Model extend OpenAI::Internal::Type::Union diff --git a/rbi/lib/openai/models/audio/transcription_create_response.rbi b/rbi/lib/openai/models/audio/transcription_create_response.rbi index ba3131e3..c6ca8f08 100644 --- a/rbi/lib/openai/models/audio/transcription_create_response.rbi +++ b/rbi/lib/openai/models/audio/transcription_create_response.rbi @@ -4,7 +4,7 @@ module OpenAI module Models module Audio # Represents a transcription response returned by model, based on the provided - # input. + # input. module TranscriptionCreateResponse extend OpenAI::Internal::Type::Union diff --git a/rbi/lib/openai/models/audio/transcription_segment.rbi b/rbi/lib/openai/models/audio/transcription_segment.rbi index 60e0d8c4..37306ed0 100644 --- a/rbi/lib/openai/models/audio/transcription_segment.rbi +++ b/rbi/lib/openai/models/audio/transcription_segment.rbi @@ -9,12 +9,12 @@ module OpenAI attr_accessor :id # Average logprob of the segment. If the value is lower than -1, consider the - # logprobs failed. + # logprobs failed. sig { returns(Float) } attr_accessor :avg_logprob # Compression ratio of the segment. If the value is greater than 2.4, consider the - # compression failed. + # compression failed. sig { returns(Float) } attr_accessor :compression_ratio @@ -23,7 +23,7 @@ module OpenAI attr_accessor :end_ # Probability of no speech in the segment. If the value is higher than 1.0 and the - # `avg_logprob` is below -1, consider this segment silent. + # `avg_logprob` is below -1, consider this segment silent. sig { returns(Float) } attr_accessor :no_speech_prob diff --git a/rbi/lib/openai/models/audio/transcription_stream_event.rbi b/rbi/lib/openai/models/audio/transcription_stream_event.rbi index d09c413a..4a3dc34e 100644 --- a/rbi/lib/openai/models/audio/transcription_stream_event.rbi +++ b/rbi/lib/openai/models/audio/transcription_stream_event.rbi @@ -4,9 +4,9 @@ module OpenAI module Models module Audio # Emitted when there is an additional text delta. This is also the first event - # emitted when the transcription starts. Only emitted when you - # [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) - # with the `Stream` parameter set to `true`. + # emitted when the transcription starts. Only emitted when you + # [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + # with the `Stream` parameter set to `true`. module TranscriptionStreamEvent extend OpenAI::Internal::Type::Union diff --git a/rbi/lib/openai/models/audio/transcription_text_delta_event.rbi b/rbi/lib/openai/models/audio/transcription_text_delta_event.rbi index c37fbdd0..c0c5e553 100644 --- a/rbi/lib/openai/models/audio/transcription_text_delta_event.rbi +++ b/rbi/lib/openai/models/audio/transcription_text_delta_event.rbi @@ -13,8 +13,8 @@ module OpenAI attr_accessor :type # The log probabilities of the delta. Only included if you - # [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) - # with the `include[]` parameter set to `logprobs`. + # [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + # with the `include[]` parameter set to `logprobs`. sig { returns(T.nilable(T::Array[OpenAI::Models::Audio::TranscriptionTextDeltaEvent::Logprob])) } attr_reader :logprobs @@ -27,9 +27,9 @@ module OpenAI attr_writer :logprobs # Emitted when there is an additional text delta. This is also the first event - # emitted when the transcription starts. Only emitted when you - # [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) - # with the `Stream` parameter set to `true`. + # emitted when the transcription starts. Only emitted when you + # [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + # with the `Stream` parameter set to `true`. sig do params( delta: String, diff --git a/rbi/lib/openai/models/audio/transcription_text_done_event.rbi b/rbi/lib/openai/models/audio/transcription_text_done_event.rbi index 03d16168..95c68dcc 100644 --- a/rbi/lib/openai/models/audio/transcription_text_done_event.rbi +++ b/rbi/lib/openai/models/audio/transcription_text_done_event.rbi @@ -13,9 +13,9 @@ module OpenAI attr_accessor :type # The log probabilities of the individual tokens in the transcription. Only - # included if you - # [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) - # with the `include[]` parameter set to `logprobs`. + # included if you + # [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + # with the `include[]` parameter set to `logprobs`. sig { returns(T.nilable(T::Array[OpenAI::Models::Audio::TranscriptionTextDoneEvent::Logprob])) } attr_reader :logprobs @@ -28,9 +28,9 @@ module OpenAI attr_writer :logprobs # Emitted when the transcription is complete. Contains the complete transcription - # text. Only emitted when you - # [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) - # with the `Stream` parameter set to `true`. + # text. Only emitted when you + # [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + # with the `Stream` parameter set to `true`. sig do params( text: String, diff --git a/rbi/lib/openai/models/audio/transcription_verbose.rbi b/rbi/lib/openai/models/audio/transcription_verbose.rbi index a8c181ae..76b0838c 100644 --- a/rbi/lib/openai/models/audio/transcription_verbose.rbi +++ b/rbi/lib/openai/models/audio/transcription_verbose.rbi @@ -34,7 +34,7 @@ module OpenAI attr_writer :words # Represents a verbose json transcription response returned by model, based on the - # provided input. + # provided input. sig do params( duration: Float, diff --git a/rbi/lib/openai/models/audio/translation_create_params.rbi b/rbi/lib/openai/models/audio/translation_create_params.rbi index adc943d6..7e113ff2 100644 --- a/rbi/lib/openai/models/audio/translation_create_params.rbi +++ b/rbi/lib/openai/models/audio/translation_create_params.rbi @@ -8,19 +8,19 @@ module OpenAI include OpenAI::Internal::Type::RequestParameters # The audio file object (not file name) translate, in one of these formats: flac, - # mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + # mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. sig { returns(T.any(IO, StringIO)) } attr_accessor :file # ID of the model to use. Only `whisper-1` (which is powered by our open source - # Whisper V2 model) is currently available. + # Whisper V2 model) is currently available. sig { returns(T.any(String, OpenAI::Models::AudioModel::OrSymbol)) } attr_accessor :model # An optional text to guide the model's style or continue a previous audio - # segment. The - # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) - # should be in English. + # segment. The + # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + # should be in English. sig { returns(T.nilable(String)) } attr_reader :prompt @@ -28,7 +28,7 @@ module OpenAI attr_writer :prompt # The format of the output, in one of these options: `json`, `text`, `srt`, - # `verbose_json`, or `vtt`. + # `verbose_json`, or `vtt`. sig { returns(T.nilable(OpenAI::Models::Audio::TranslationCreateParams::ResponseFormat::OrSymbol)) } attr_reader :response_format @@ -36,10 +36,10 @@ module OpenAI attr_writer :response_format # The sampling temperature, between 0 and 1. Higher values like 0.8 will make the - # output more random, while lower values like 0.2 will make it more focused and - # deterministic. If set to 0, the model will use - # [log probability](https://en.wikipedia.org/wiki/Log_probability) to - # automatically increase the temperature until certain thresholds are hit. + # output more random, while lower values like 0.2 will make it more focused and + # deterministic. If set to 0, the model will use + # [log probability](https://en.wikipedia.org/wiki/Log_probability) to + # automatically increase the temperature until certain thresholds are hit. sig { returns(T.nilable(Float)) } attr_reader :temperature @@ -76,7 +76,7 @@ module OpenAI def to_hash; end # ID of the model to use. Only `whisper-1` (which is powered by our open source - # Whisper V2 model) is currently available. + # Whisper V2 model) is currently available. module Model extend OpenAI::Internal::Type::Union @@ -85,7 +85,7 @@ module OpenAI end # The format of the output, in one of these options: `json`, `text`, `srt`, - # `verbose_json`, or `vtt`. + # `verbose_json`, or `vtt`. module ResponseFormat extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/audio_response_format.rbi b/rbi/lib/openai/models/audio_response_format.rbi index c5d2582e..23a709ab 100644 --- a/rbi/lib/openai/models/audio_response_format.rbi +++ b/rbi/lib/openai/models/audio_response_format.rbi @@ -3,8 +3,8 @@ module OpenAI module Models # The format of the output, in one of these options: `json`, `text`, `srt`, - # `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, - # the only supported format is `json`. + # `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, + # the only supported format is `json`. module AudioResponseFormat extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/auto_file_chunking_strategy_param.rbi b/rbi/lib/openai/models/auto_file_chunking_strategy_param.rbi index d1ba038a..d072297d 100644 --- a/rbi/lib/openai/models/auto_file_chunking_strategy_param.rbi +++ b/rbi/lib/openai/models/auto_file_chunking_strategy_param.rbi @@ -8,7 +8,7 @@ module OpenAI attr_accessor :type # The default strategy. This strategy currently uses a `max_chunk_size_tokens` of - # `800` and `chunk_overlap_tokens` of `400`. + # `800` and `chunk_overlap_tokens` of `400`. sig { params(type: Symbol).returns(T.attached_class) } def self.new(type: :auto); end diff --git a/rbi/lib/openai/models/batch.rbi b/rbi/lib/openai/models/batch.rbi index a8a659d1..7542c9b1 100644 --- a/rbi/lib/openai/models/batch.rbi +++ b/rbi/lib/openai/models/batch.rbi @@ -100,11 +100,11 @@ module OpenAI attr_writer :in_progress_at # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata diff --git a/rbi/lib/openai/models/batch_create_params.rbi b/rbi/lib/openai/models/batch_create_params.rbi index f33919b3..6f907cae 100644 --- a/rbi/lib/openai/models/batch_create_params.rbi +++ b/rbi/lib/openai/models/batch_create_params.rbi @@ -7,35 +7,35 @@ module OpenAI include OpenAI::Internal::Type::RequestParameters # The time frame within which the batch should be processed. Currently only `24h` - # is supported. + # is supported. sig { returns(OpenAI::Models::BatchCreateParams::CompletionWindow::OrSymbol) } attr_accessor :completion_window # The endpoint to be used for all requests in the batch. Currently - # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` - # are supported. Note that `/v1/embeddings` batches are also restricted to a - # maximum of 50,000 embedding inputs across all requests in the batch. + # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` + # are supported. Note that `/v1/embeddings` batches are also restricted to a + # maximum of 50,000 embedding inputs across all requests in the batch. sig { returns(OpenAI::Models::BatchCreateParams::Endpoint::OrSymbol) } attr_accessor :endpoint # The ID of an uploaded file that contains requests for the new batch. # - # See [upload file](https://platform.openai.com/docs/api-reference/files/create) - # for how to upload a file. + # See [upload file](https://platform.openai.com/docs/api-reference/files/create) + # for how to upload a file. # - # Your input file must be formatted as a - # [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), - # and must be uploaded with the purpose `batch`. The file can contain up to 50,000 - # requests, and can be up to 200 MB in size. + # Your input file must be formatted as a + # [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), + # and must be uploaded with the purpose `batch`. The file can contain up to 50,000 + # requests, and can be up to 200 MB in size. sig { returns(String) } attr_accessor :input_file_id # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata @@ -66,7 +66,7 @@ module OpenAI def to_hash; end # The time frame within which the batch should be processed. Currently only `24h` - # is supported. + # is supported. module CompletionWindow extend OpenAI::Internal::Type::Enum @@ -81,9 +81,9 @@ module OpenAI end # The endpoint to be used for all requests in the batch. Currently - # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` - # are supported. Note that `/v1/embeddings` batches are also restricted to a - # maximum of 50,000 embedding inputs across all requests in the batch. + # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` + # are supported. Note that `/v1/embeddings` batches are also restricted to a + # maximum of 50,000 embedding inputs across all requests in the batch. module Endpoint extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/batch_list_params.rbi b/rbi/lib/openai/models/batch_list_params.rbi index a78f54c3..9ae31328 100644 --- a/rbi/lib/openai/models/batch_list_params.rbi +++ b/rbi/lib/openai/models/batch_list_params.rbi @@ -7,9 +7,9 @@ module OpenAI include OpenAI::Internal::Type::RequestParameters # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. sig { returns(T.nilable(String)) } attr_reader :after @@ -17,7 +17,7 @@ module OpenAI attr_writer :after # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. sig { returns(T.nilable(Integer)) } attr_reader :limit diff --git a/rbi/lib/openai/models/beta/assistant.rbi b/rbi/lib/openai/models/beta/assistant.rbi index 4dbc147a..f8ef7368 100644 --- a/rbi/lib/openai/models/beta/assistant.rbi +++ b/rbi/lib/openai/models/beta/assistant.rbi @@ -17,24 +17,24 @@ module OpenAI attr_accessor :description # The system instructions that the assistant uses. The maximum length is 256,000 - # characters. + # characters. sig { returns(T.nilable(String)) } attr_accessor :instructions # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. sig { returns(String) } attr_accessor :model @@ -47,8 +47,8 @@ module OpenAI attr_accessor :object # A list of tool enabled on the assistant. There can be a maximum of 128 tools per - # assistant. Tools can be of types `code_interpreter`, `file_search`, or - # `function`. + # assistant. Tools can be of types `code_interpreter`, `file_search`, or + # `function`. sig do returns( T::Array[ @@ -63,25 +63,25 @@ module OpenAI attr_accessor :tools # Specifies the format that the model must output. Compatible with - # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), - # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - # message the model generates is valid JSON. + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. # - # **Important:** when using JSON mode, you **must** also instruct the model to - # produce JSON yourself via a system or user message. Without this, the model may - # generate an unending stream of whitespace until the generation reaches the token - # limit, resulting in a long-running and seemingly "stuck" request. Also note that - # the message content may be partially cut off if `finish_reason="length"`, which - # indicates the generation exceeded `max_tokens` or the conversation exceeded the - # max context length. + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. sig do returns( T.nilable( @@ -97,15 +97,15 @@ module OpenAI attr_accessor :response_format # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. sig { returns(T.nilable(Float)) } attr_accessor :temperature # A set of resources that are used by the assistant's tools. The resources are - # specific to the type of tool. For example, the `code_interpreter` tool requires - # a list of file IDs, while the `file_search` tool requires a list of vector store - # IDs. + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. sig { returns(T.nilable(OpenAI::Models::Beta::Assistant::ToolResources)) } attr_reader :tool_resources @@ -118,10 +118,10 @@ module OpenAI attr_writer :tool_resources # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or temperature but not both. + # We generally recommend altering this or temperature but not both. sig { returns(T.nilable(Float)) } attr_accessor :top_p @@ -233,9 +233,9 @@ module OpenAI attr_writer :file_search # A set of resources that are used by the assistant's tools. The resources are - # specific to the type of tool. For example, the `code_interpreter` tool requires - # a list of file IDs, while the `file_search` tool requires a list of vector store - # IDs. + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. sig do params( code_interpreter: T.any(OpenAI::Models::Beta::Assistant::ToolResources::CodeInterpreter, OpenAI::Internal::AnyHash), @@ -258,8 +258,8 @@ module OpenAI class CodeInterpreter < OpenAI::Internal::Type::BaseModel # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made - # available to the `code_interpreter`` tool. There can be a maximum of 20 files - # associated with the tool. + # available to the `code_interpreter`` tool. There can be a maximum of 20 files + # associated with the tool. sig { returns(T.nilable(T::Array[String])) } attr_reader :file_ids @@ -275,9 +275,9 @@ module OpenAI class FileSearch < OpenAI::Internal::Type::BaseModel # The ID of the - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # attached to this assistant. There can be a maximum of 1 vector store attached to - # the assistant. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this assistant. There can be a maximum of 1 vector store attached to + # the assistant. sig { returns(T.nilable(T::Array[String])) } attr_reader :vector_store_ids diff --git a/rbi/lib/openai/models/beta/assistant_create_params.rbi b/rbi/lib/openai/models/beta/assistant_create_params.rbi index b225b423..b0c7ab98 100644 --- a/rbi/lib/openai/models/beta/assistant_create_params.rbi +++ b/rbi/lib/openai/models/beta/assistant_create_params.rbi @@ -8,10 +8,10 @@ module OpenAI include OpenAI::Internal::Type::RequestParameters # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. sig { returns(T.any(String, OpenAI::Models::ChatModel::OrSymbol)) } attr_accessor :model @@ -20,16 +20,16 @@ module OpenAI attr_accessor :description # The system instructions that the assistant uses. The maximum length is 256,000 - # characters. + # characters. sig { returns(T.nilable(String)) } attr_accessor :instructions # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata @@ -39,33 +39,33 @@ module OpenAI # **o-series models only** # - # Constrains effort on reasoning for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. sig { returns(T.nilable(OpenAI::Models::ReasoningEffort::OrSymbol)) } attr_accessor :reasoning_effort # Specifies the format that the model must output. Compatible with - # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), - # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - # message the model generates is valid JSON. + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. # - # **Important:** when using JSON mode, you **must** also instruct the model to - # produce JSON yourself via a system or user message. Without this, the model may - # generate an unending stream of whitespace until the generation reaches the token - # limit, resulting in a long-running and seemingly "stuck" request. Also note that - # the message content may be partially cut off if `finish_reason="length"`, which - # indicates the generation exceeded `max_tokens` or the conversation exceeded the - # max context length. + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. sig do returns( T.nilable( @@ -81,15 +81,15 @@ module OpenAI attr_accessor :response_format # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. sig { returns(T.nilable(Float)) } attr_accessor :temperature # A set of resources that are used by the assistant's tools. The resources are - # specific to the type of tool. For example, the `code_interpreter` tool requires - # a list of file IDs, while the `file_search` tool requires a list of vector store - # IDs. + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. sig { returns(T.nilable(OpenAI::Models::Beta::AssistantCreateParams::ToolResources)) } attr_reader :tool_resources @@ -102,8 +102,8 @@ module OpenAI attr_writer :tool_resources # A list of tool enabled on the assistant. There can be a maximum of 128 tools per - # assistant. Tools can be of types `code_interpreter`, `file_search`, or - # `function`. + # assistant. Tools can be of types `code_interpreter`, `file_search`, or + # `function`. sig do returns( T.nilable( @@ -135,10 +135,10 @@ module OpenAI attr_writer :tools # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or temperature but not both. + # We generally recommend altering this or temperature but not both. sig { returns(T.nilable(Float)) } attr_accessor :top_p @@ -223,10 +223,10 @@ module OpenAI def to_hash; end # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. module Model extend OpenAI::Internal::Type::Union @@ -261,9 +261,9 @@ module OpenAI attr_writer :file_search # A set of resources that are used by the assistant's tools. The resources are - # specific to the type of tool. For example, the `code_interpreter` tool requires - # a list of file IDs, while the `file_search` tool requires a list of vector store - # IDs. + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. sig do params( code_interpreter: T.any( @@ -289,8 +289,8 @@ module OpenAI class CodeInterpreter < OpenAI::Internal::Type::BaseModel # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made - # available to the `code_interpreter` tool. There can be a maximum of 20 files - # associated with the tool. + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. sig { returns(T.nilable(T::Array[String])) } attr_reader :file_ids @@ -306,9 +306,9 @@ module OpenAI class FileSearch < OpenAI::Internal::Type::BaseModel # The - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # attached to this assistant. There can be a maximum of 1 vector store attached to - # the assistant. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this assistant. There can be a maximum of 1 vector store attached to + # the assistant. sig { returns(T.nilable(T::Array[String])) } attr_reader :vector_store_ids @@ -316,9 +316,9 @@ module OpenAI attr_writer :vector_store_ids # A helper to create a - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # with file_ids and attach it to this assistant. There can be a maximum of 1 - # vector store attached to the assistant. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # with file_ids and attach it to this assistant. There can be a maximum of 1 + # vector store attached to the assistant. sig do returns( T.nilable(T::Array[OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore]) @@ -366,7 +366,7 @@ module OpenAI class VectorStore < OpenAI::Internal::Type::BaseModel # The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. + # strategy. sig do returns( T.nilable( @@ -392,8 +392,8 @@ module OpenAI attr_writer :chunking_strategy # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to - # add to the vector store. There can be a maximum of 10000 files in a vector - # store. + # add to the vector store. There can be a maximum of 10000 files in a vector + # store. sig { returns(T.nilable(T::Array[String])) } attr_reader :file_ids @@ -401,11 +401,11 @@ module OpenAI attr_writer :file_ids # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata @@ -439,7 +439,7 @@ module OpenAI def to_hash; end # The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. + # strategy. module ChunkingStrategy extend OpenAI::Internal::Type::Union @@ -449,7 +449,7 @@ module OpenAI attr_accessor :type # The default strategy. This strategy currently uses a `max_chunk_size_tokens` of - # `800` and `chunk_overlap_tokens` of `400`. + # `800` and `chunk_overlap_tokens` of `400`. sig { params(type: Symbol).returns(T.attached_class) } def self.new(type: :auto); end @@ -506,12 +506,12 @@ module OpenAI class Static < OpenAI::Internal::Type::BaseModel # The number of tokens that overlap between chunks. The default value is `400`. # - # Note that the overlap must not exceed half of `max_chunk_size_tokens`. + # Note that the overlap must not exceed half of `max_chunk_size_tokens`. sig { returns(Integer) } attr_accessor :chunk_overlap_tokens # The maximum number of tokens in each chunk. The default value is `800`. The - # minimum value is `100` and the maximum value is `4096`. + # minimum value is `100` and the maximum value is `4096`. sig { returns(Integer) } attr_accessor :max_chunk_size_tokens diff --git a/rbi/lib/openai/models/beta/assistant_list_params.rbi b/rbi/lib/openai/models/beta/assistant_list_params.rbi index 9bf1cab2..52cd7565 100644 --- a/rbi/lib/openai/models/beta/assistant_list_params.rbi +++ b/rbi/lib/openai/models/beta/assistant_list_params.rbi @@ -8,9 +8,9 @@ module OpenAI include OpenAI::Internal::Type::RequestParameters # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. sig { returns(T.nilable(String)) } attr_reader :after @@ -18,9 +18,9 @@ module OpenAI attr_writer :after # A cursor for use in pagination. `before` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # starting with obj_foo, your subsequent call can include before=obj_foo in order - # to fetch the previous page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. sig { returns(T.nilable(String)) } attr_reader :before @@ -28,7 +28,7 @@ module OpenAI attr_writer :before # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. sig { returns(T.nilable(Integer)) } attr_reader :limit @@ -36,7 +36,7 @@ module OpenAI attr_writer :limit # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. sig { returns(T.nilable(OpenAI::Models::Beta::AssistantListParams::Order::OrSymbol)) } attr_reader :order @@ -70,7 +70,7 @@ module OpenAI def to_hash; end # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. module Order extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/beta/assistant_response_format_option.rbi b/rbi/lib/openai/models/beta/assistant_response_format_option.rbi index 43f47c46..af1951eb 100644 --- a/rbi/lib/openai/models/beta/assistant_response_format_option.rbi +++ b/rbi/lib/openai/models/beta/assistant_response_format_option.rbi @@ -4,25 +4,25 @@ module OpenAI module Models module Beta # Specifies the format that the model must output. Compatible with - # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), - # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - # message the model generates is valid JSON. + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. # - # **Important:** when using JSON mode, you **must** also instruct the model to - # produce JSON yourself via a system or user message. Without this, the model may - # generate an unending stream of whitespace until the generation reaches the token - # limit, resulting in a long-running and seemingly "stuck" request. Also note that - # the message content may be partially cut off if `finish_reason="length"`, which - # indicates the generation exceeded `max_tokens` or the conversation exceeded the - # max context length. + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. module AssistantResponseFormatOption extend OpenAI::Internal::Type::Union diff --git a/rbi/lib/openai/models/beta/assistant_stream_event.rbi b/rbi/lib/openai/models/beta/assistant_stream_event.rbi index 407895b1..5cb77eb2 100644 --- a/rbi/lib/openai/models/beta/assistant_stream_event.rbi +++ b/rbi/lib/openai/models/beta/assistant_stream_event.rbi @@ -5,30 +5,30 @@ module OpenAI module Beta # Represents an event emitted when streaming a Run. # - # Each event in a server-sent events stream has an `event` and `data` property: + # Each event in a server-sent events stream has an `event` and `data` property: # - # ``` - # event: thread.created - # data: {"id": "thread_123", "object": "thread", ...} - # ``` + # ``` + # event: thread.created + # data: {"id": "thread_123", "object": "thread", ...} + # ``` # - # We emit events whenever a new object is created, transitions to a new state, or - # is being streamed in parts (deltas). For example, we emit `thread.run.created` - # when a new run is created, `thread.run.completed` when a run completes, and so - # on. When an Assistant chooses to create a message during a run, we emit a - # `thread.message.created event`, a `thread.message.in_progress` event, many - # `thread.message.delta` events, and finally a `thread.message.completed` event. + # We emit events whenever a new object is created, transitions to a new state, or + # is being streamed in parts (deltas). For example, we emit `thread.run.created` + # when a new run is created, `thread.run.completed` when a run completes, and so + # on. When an Assistant chooses to create a message during a run, we emit a + # `thread.message.created event`, a `thread.message.in_progress` event, many + # `thread.message.delta` events, and finally a `thread.message.completed` event. # - # We may add additional events over time, so we recommend handling unknown events - # gracefully in your code. See the - # [Assistants API quickstart](https://platform.openai.com/docs/assistants/overview) - # to learn how to integrate the Assistants API with streaming. + # We may add additional events over time, so we recommend handling unknown events + # gracefully in your code. See the + # [Assistants API quickstart](https://platform.openai.com/docs/assistants/overview) + # to learn how to integrate the Assistants API with streaming. module AssistantStreamEvent extend OpenAI::Internal::Type::Union class ThreadCreated < OpenAI::Internal::Type::BaseModel # Represents a thread that contains - # [messages](https://platform.openai.com/docs/api-reference/messages). + # [messages](https://platform.openai.com/docs/api-reference/messages). sig { returns(OpenAI::Models::Beta::Thread) } attr_reader :data @@ -46,8 +46,8 @@ module OpenAI attr_writer :enabled # Occurs when a new - # [thread](https://platform.openai.com/docs/api-reference/threads/object) is - # created. + # [thread](https://platform.openai.com/docs/api-reference/threads/object) is + # created. sig do params( data: T.any(OpenAI::Models::Beta::Thread, OpenAI::Internal::AnyHash), @@ -64,7 +64,7 @@ module OpenAI class ThreadRunCreated < OpenAI::Internal::Type::BaseModel # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } attr_reader :data @@ -75,7 +75,7 @@ module OpenAI attr_accessor :event # Occurs when a new - # [run](https://platform.openai.com/docs/api-reference/runs/object) is created. + # [run](https://platform.openai.com/docs/api-reference/runs/object) is created. sig do params(data: T.any(OpenAI::Models::Beta::Threads::Run, OpenAI::Internal::AnyHash), event: Symbol) .returns(T.attached_class) @@ -88,7 +88,7 @@ module OpenAI class ThreadRunQueued < OpenAI::Internal::Type::BaseModel # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } attr_reader :data @@ -99,7 +99,7 @@ module OpenAI attr_accessor :event # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # moves to a `queued` status. + # moves to a `queued` status. sig do params(data: T.any(OpenAI::Models::Beta::Threads::Run, OpenAI::Internal::AnyHash), event: Symbol) .returns(T.attached_class) @@ -112,7 +112,7 @@ module OpenAI class ThreadRunInProgress < OpenAI::Internal::Type::BaseModel # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } attr_reader :data @@ -123,7 +123,7 @@ module OpenAI attr_accessor :event # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # moves to an `in_progress` status. + # moves to an `in_progress` status. sig do params(data: T.any(OpenAI::Models::Beta::Threads::Run, OpenAI::Internal::AnyHash), event: Symbol) .returns(T.attached_class) @@ -136,7 +136,7 @@ module OpenAI class ThreadRunRequiresAction < OpenAI::Internal::Type::BaseModel # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } attr_reader :data @@ -147,7 +147,7 @@ module OpenAI attr_accessor :event # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # moves to a `requires_action` status. + # moves to a `requires_action` status. sig do params(data: T.any(OpenAI::Models::Beta::Threads::Run, OpenAI::Internal::AnyHash), event: Symbol) .returns(T.attached_class) @@ -160,7 +160,7 @@ module OpenAI class ThreadRunCompleted < OpenAI::Internal::Type::BaseModel # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } attr_reader :data @@ -171,7 +171,7 @@ module OpenAI attr_accessor :event # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # is completed. + # is completed. sig do params(data: T.any(OpenAI::Models::Beta::Threads::Run, OpenAI::Internal::AnyHash), event: Symbol) .returns(T.attached_class) @@ -184,7 +184,7 @@ module OpenAI class ThreadRunIncomplete < OpenAI::Internal::Type::BaseModel # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } attr_reader :data @@ -195,7 +195,7 @@ module OpenAI attr_accessor :event # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # ends with status `incomplete`. + # ends with status `incomplete`. sig do params(data: T.any(OpenAI::Models::Beta::Threads::Run, OpenAI::Internal::AnyHash), event: Symbol) .returns(T.attached_class) @@ -208,7 +208,7 @@ module OpenAI class ThreadRunFailed < OpenAI::Internal::Type::BaseModel # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } attr_reader :data @@ -219,7 +219,7 @@ module OpenAI attr_accessor :event # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # fails. + # fails. sig do params(data: T.any(OpenAI::Models::Beta::Threads::Run, OpenAI::Internal::AnyHash), event: Symbol) .returns(T.attached_class) @@ -232,7 +232,7 @@ module OpenAI class ThreadRunCancelling < OpenAI::Internal::Type::BaseModel # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } attr_reader :data @@ -243,7 +243,7 @@ module OpenAI attr_accessor :event # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # moves to a `cancelling` status. + # moves to a `cancelling` status. sig do params(data: T.any(OpenAI::Models::Beta::Threads::Run, OpenAI::Internal::AnyHash), event: Symbol) .returns(T.attached_class) @@ -256,7 +256,7 @@ module OpenAI class ThreadRunCancelled < OpenAI::Internal::Type::BaseModel # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } attr_reader :data @@ -267,7 +267,7 @@ module OpenAI attr_accessor :event # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # is cancelled. + # is cancelled. sig do params(data: T.any(OpenAI::Models::Beta::Threads::Run, OpenAI::Internal::AnyHash), event: Symbol) .returns(T.attached_class) @@ -280,7 +280,7 @@ module OpenAI class ThreadRunExpired < OpenAI::Internal::Type::BaseModel # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } attr_reader :data @@ -291,7 +291,7 @@ module OpenAI attr_accessor :event # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # expires. + # expires. sig do params(data: T.any(OpenAI::Models::Beta::Threads::Run, OpenAI::Internal::AnyHash), event: Symbol) .returns(T.attached_class) @@ -314,8 +314,8 @@ module OpenAI attr_accessor :event # Occurs when a - # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # is created. + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is created. sig do params( data: T.any(OpenAI::Models::Beta::Threads::Runs::RunStep, OpenAI::Internal::AnyHash), @@ -341,8 +341,8 @@ module OpenAI attr_accessor :event # Occurs when a - # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # moves to an `in_progress` state. + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # moves to an `in_progress` state. sig do params( data: T.any(OpenAI::Models::Beta::Threads::Runs::RunStep, OpenAI::Internal::AnyHash), @@ -358,7 +358,7 @@ module OpenAI class ThreadRunStepDelta < OpenAI::Internal::Type::BaseModel # Represents a run step delta i.e. any changed fields on a run step during - # streaming. + # streaming. sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent) } attr_reader :data @@ -371,8 +371,8 @@ module OpenAI attr_accessor :event # Occurs when parts of a - # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # are being streamed. + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # are being streamed. sig do params( data: T.any(OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent, OpenAI::Internal::AnyHash), @@ -398,8 +398,8 @@ module OpenAI attr_accessor :event # Occurs when a - # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # is completed. + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is completed. sig do params( data: T.any(OpenAI::Models::Beta::Threads::Runs::RunStep, OpenAI::Internal::AnyHash), @@ -425,8 +425,8 @@ module OpenAI attr_accessor :event # Occurs when a - # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # fails. + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # fails. sig do params( data: T.any(OpenAI::Models::Beta::Threads::Runs::RunStep, OpenAI::Internal::AnyHash), @@ -452,8 +452,8 @@ module OpenAI attr_accessor :event # Occurs when a - # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # is cancelled. + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is cancelled. sig do params( data: T.any(OpenAI::Models::Beta::Threads::Runs::RunStep, OpenAI::Internal::AnyHash), @@ -479,8 +479,8 @@ module OpenAI attr_accessor :event # Occurs when a - # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # expires. + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # expires. sig do params( data: T.any(OpenAI::Models::Beta::Threads::Runs::RunStep, OpenAI::Internal::AnyHash), @@ -496,7 +496,7 @@ module OpenAI class ThreadMessageCreated < OpenAI::Internal::Type::BaseModel # Represents a message within a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Message) } attr_reader :data @@ -507,8 +507,8 @@ module OpenAI attr_accessor :event # Occurs when a - # [message](https://platform.openai.com/docs/api-reference/messages/object) is - # created. + # [message](https://platform.openai.com/docs/api-reference/messages/object) is + # created. sig do params(data: T.any(OpenAI::Models::Beta::Threads::Message, OpenAI::Internal::AnyHash), event: Symbol) .returns(T.attached_class) @@ -521,7 +521,7 @@ module OpenAI class ThreadMessageInProgress < OpenAI::Internal::Type::BaseModel # Represents a message within a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Message) } attr_reader :data @@ -532,8 +532,8 @@ module OpenAI attr_accessor :event # Occurs when a - # [message](https://platform.openai.com/docs/api-reference/messages/object) moves - # to an `in_progress` state. + # [message](https://platform.openai.com/docs/api-reference/messages/object) moves + # to an `in_progress` state. sig do params(data: T.any(OpenAI::Models::Beta::Threads::Message, OpenAI::Internal::AnyHash), event: Symbol) .returns(T.attached_class) @@ -546,7 +546,7 @@ module OpenAI class ThreadMessageDelta < OpenAI::Internal::Type::BaseModel # Represents a message delta i.e. any changed fields on a message during - # streaming. + # streaming. sig { returns(OpenAI::Models::Beta::Threads::MessageDeltaEvent) } attr_reader :data @@ -557,8 +557,8 @@ module OpenAI attr_accessor :event # Occurs when parts of a - # [Message](https://platform.openai.com/docs/api-reference/messages/object) are - # being streamed. + # [Message](https://platform.openai.com/docs/api-reference/messages/object) are + # being streamed. sig do params( data: T.any(OpenAI::Models::Beta::Threads::MessageDeltaEvent, OpenAI::Internal::AnyHash), @@ -574,7 +574,7 @@ module OpenAI class ThreadMessageCompleted < OpenAI::Internal::Type::BaseModel # Represents a message within a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Message) } attr_reader :data @@ -585,8 +585,8 @@ module OpenAI attr_accessor :event # Occurs when a - # [message](https://platform.openai.com/docs/api-reference/messages/object) is - # completed. + # [message](https://platform.openai.com/docs/api-reference/messages/object) is + # completed. sig do params(data: T.any(OpenAI::Models::Beta::Threads::Message, OpenAI::Internal::AnyHash), event: Symbol) .returns(T.attached_class) @@ -599,7 +599,7 @@ module OpenAI class ThreadMessageIncomplete < OpenAI::Internal::Type::BaseModel # Represents a message within a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Message) } attr_reader :data @@ -610,8 +610,8 @@ module OpenAI attr_accessor :event # Occurs when a - # [message](https://platform.openai.com/docs/api-reference/messages/object) ends - # before it is completed. + # [message](https://platform.openai.com/docs/api-reference/messages/object) ends + # before it is completed. sig do params(data: T.any(OpenAI::Models::Beta::Threads::Message, OpenAI::Internal::AnyHash), event: Symbol) .returns(T.attached_class) @@ -633,8 +633,8 @@ module OpenAI attr_accessor :event # Occurs when an - # [error](https://platform.openai.com/docs/guides/error-codes#api-errors) occurs. - # This can happen due to an internal server error or a timeout. + # [error](https://platform.openai.com/docs/guides/error-codes#api-errors) occurs. + # This can happen due to an internal server error or a timeout. sig do params(data: T.any(OpenAI::Models::ErrorObject, OpenAI::Internal::AnyHash), event: Symbol) .returns(T.attached_class) diff --git a/rbi/lib/openai/models/beta/assistant_tool_choice.rbi b/rbi/lib/openai/models/beta/assistant_tool_choice.rbi index 71efdfec..4b2436cd 100644 --- a/rbi/lib/openai/models/beta/assistant_tool_choice.rbi +++ b/rbi/lib/openai/models/beta/assistant_tool_choice.rbi @@ -15,7 +15,7 @@ module OpenAI attr_writer :function # Specifies a tool the model should use. Use to force the model to call a specific - # tool. + # tool. sig do params( type: OpenAI::Models::Beta::AssistantToolChoice::Type::OrSymbol, diff --git a/rbi/lib/openai/models/beta/assistant_tool_choice_option.rbi b/rbi/lib/openai/models/beta/assistant_tool_choice_option.rbi index 88d3b4e5..ce3df8b3 100644 --- a/rbi/lib/openai/models/beta/assistant_tool_choice_option.rbi +++ b/rbi/lib/openai/models/beta/assistant_tool_choice_option.rbi @@ -4,19 +4,19 @@ module OpenAI module Models module Beta # Controls which (if any) tool is called by the model. `none` means the model will - # not call any tools and instead generates a message. `auto` is the default value - # and means the model can pick between generating a message or calling one or more - # tools. `required` means the model must call one or more tools before responding - # to the user. Specifying a particular tool like `{"type": "file_search"}` or - # `{"type": "function", "function": {"name": "my_function"}}` forces the model to - # call that tool. + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. module AssistantToolChoiceOption extend OpenAI::Internal::Type::Union # `none` means the model will not call any tools and instead generates a message. - # `auto` means the model can pick between generating a message or calling one or - # more tools. `required` means the model must call one or more tools before - # responding to the user. + # `auto` means the model can pick between generating a message or calling one or + # more tools. `required` means the model must call one or more tools before + # responding to the user. module Auto extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/beta/assistant_update_params.rbi b/rbi/lib/openai/models/beta/assistant_update_params.rbi index 10c48190..eb334355 100644 --- a/rbi/lib/openai/models/beta/assistant_update_params.rbi +++ b/rbi/lib/openai/models/beta/assistant_update_params.rbi @@ -12,24 +12,24 @@ module OpenAI attr_accessor :description # The system instructions that the assistant uses. The maximum length is 256,000 - # characters. + # characters. sig { returns(T.nilable(String)) } attr_accessor :instructions # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. sig { returns(T.nilable(T.any(String, OpenAI::Models::Beta::AssistantUpdateParams::Model::OrSymbol))) } attr_reader :model @@ -42,33 +42,33 @@ module OpenAI # **o-series models only** # - # Constrains effort on reasoning for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. sig { returns(T.nilable(OpenAI::Models::ReasoningEffort::OrSymbol)) } attr_accessor :reasoning_effort # Specifies the format that the model must output. Compatible with - # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), - # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - # message the model generates is valid JSON. + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. # - # **Important:** when using JSON mode, you **must** also instruct the model to - # produce JSON yourself via a system or user message. Without this, the model may - # generate an unending stream of whitespace until the generation reaches the token - # limit, resulting in a long-running and seemingly "stuck" request. Also note that - # the message content may be partially cut off if `finish_reason="length"`, which - # indicates the generation exceeded `max_tokens` or the conversation exceeded the - # max context length. + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. sig do returns( T.nilable( @@ -84,15 +84,15 @@ module OpenAI attr_accessor :response_format # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. sig { returns(T.nilable(Float)) } attr_accessor :temperature # A set of resources that are used by the assistant's tools. The resources are - # specific to the type of tool. For example, the `code_interpreter` tool requires - # a list of file IDs, while the `file_search` tool requires a list of vector store - # IDs. + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. sig { returns(T.nilable(OpenAI::Models::Beta::AssistantUpdateParams::ToolResources)) } attr_reader :tool_resources @@ -105,8 +105,8 @@ module OpenAI attr_writer :tool_resources # A list of tool enabled on the assistant. There can be a maximum of 128 tools per - # assistant. Tools can be of types `code_interpreter`, `file_search`, or - # `function`. + # assistant. Tools can be of types `code_interpreter`, `file_search`, or + # `function`. sig do returns( T.nilable( @@ -138,10 +138,10 @@ module OpenAI attr_writer :tools # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or temperature but not both. + # We generally recommend altering this or temperature but not both. sig { returns(T.nilable(Float)) } attr_accessor :top_p @@ -226,10 +226,10 @@ module OpenAI def to_hash; end # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. module Model extend OpenAI::Internal::Type::Union @@ -318,9 +318,9 @@ module OpenAI attr_writer :file_search # A set of resources that are used by the assistant's tools. The resources are - # specific to the type of tool. For example, the `code_interpreter` tool requires - # a list of file IDs, while the `file_search` tool requires a list of vector store - # IDs. + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. sig do params( code_interpreter: T.any( @@ -346,9 +346,9 @@ module OpenAI class CodeInterpreter < OpenAI::Internal::Type::BaseModel # Overrides the list of - # [file](https://platform.openai.com/docs/api-reference/files) IDs made available - # to the `code_interpreter` tool. There can be a maximum of 20 files associated - # with the tool. + # [file](https://platform.openai.com/docs/api-reference/files) IDs made available + # to the `code_interpreter` tool. There can be a maximum of 20 files associated + # with the tool. sig { returns(T.nilable(T::Array[String])) } attr_reader :file_ids @@ -364,9 +364,9 @@ module OpenAI class FileSearch < OpenAI::Internal::Type::BaseModel # Overrides the - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # attached to this assistant. There can be a maximum of 1 vector store attached to - # the assistant. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this assistant. There can be a maximum of 1 vector store attached to + # the assistant. sig { returns(T.nilable(T::Array[String])) } attr_reader :vector_store_ids diff --git a/rbi/lib/openai/models/beta/file_search_tool.rbi b/rbi/lib/openai/models/beta/file_search_tool.rbi index 6914e6bb..c3df4c59 100644 --- a/rbi/lib/openai/models/beta/file_search_tool.rbi +++ b/rbi/lib/openai/models/beta/file_search_tool.rbi @@ -32,13 +32,13 @@ module OpenAI class FileSearch < OpenAI::Internal::Type::BaseModel # The maximum number of results the file search tool should output. The default is - # 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between - # 1 and 50 inclusive. + # 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between + # 1 and 50 inclusive. # - # Note that the file search tool may output fewer than `max_num_results` results. - # See the - # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) - # for more information. + # Note that the file search tool may output fewer than `max_num_results` results. + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. sig { returns(T.nilable(Integer)) } attr_reader :max_num_results @@ -46,11 +46,11 @@ module OpenAI attr_writer :max_num_results # The ranking options for the file search. If not specified, the file search tool - # will use the `auto` ranker and a score_threshold of 0. + # will use the `auto` ranker and a score_threshold of 0. # - # See the - # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) - # for more information. + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. sig { returns(T.nilable(OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions)) } attr_reader :ranking_options @@ -82,12 +82,12 @@ module OpenAI class RankingOptions < OpenAI::Internal::Type::BaseModel # The score threshold for the file search. All values must be a floating point - # number between 0 and 1. + # number between 0 and 1. sig { returns(Float) } attr_accessor :score_threshold # The ranker to use for the file search. If not specified will use the `auto` - # ranker. + # ranker. sig { returns(T.nilable(OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions::Ranker::OrSymbol)) } attr_reader :ranker @@ -95,11 +95,11 @@ module OpenAI attr_writer :ranker # The ranking options for the file search. If not specified, the file search tool - # will use the `auto` ranker and a score_threshold of 0. + # will use the `auto` ranker and a score_threshold of 0. # - # See the - # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) - # for more information. + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. sig do params( score_threshold: Float, @@ -121,7 +121,7 @@ module OpenAI def to_hash; end # The ranker to use for the file search. If not specified will use the `auto` - # ranker. + # ranker. module Ranker extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/beta/message_stream_event.rbi b/rbi/lib/openai/models/beta/message_stream_event.rbi index c312e9f2..3f070ad2 100644 --- a/rbi/lib/openai/models/beta/message_stream_event.rbi +++ b/rbi/lib/openai/models/beta/message_stream_event.rbi @@ -4,14 +4,14 @@ module OpenAI module Models module Beta # Occurs when a - # [message](https://platform.openai.com/docs/api-reference/messages/object) is - # created. + # [message](https://platform.openai.com/docs/api-reference/messages/object) is + # created. module MessageStreamEvent extend OpenAI::Internal::Type::Union class ThreadMessageCreated < OpenAI::Internal::Type::BaseModel # Represents a message within a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Message) } attr_reader :data @@ -22,8 +22,8 @@ module OpenAI attr_accessor :event # Occurs when a - # [message](https://platform.openai.com/docs/api-reference/messages/object) is - # created. + # [message](https://platform.openai.com/docs/api-reference/messages/object) is + # created. sig do params(data: T.any(OpenAI::Models::Beta::Threads::Message, OpenAI::Internal::AnyHash), event: Symbol) .returns(T.attached_class) @@ -36,7 +36,7 @@ module OpenAI class ThreadMessageInProgress < OpenAI::Internal::Type::BaseModel # Represents a message within a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Message) } attr_reader :data @@ -47,8 +47,8 @@ module OpenAI attr_accessor :event # Occurs when a - # [message](https://platform.openai.com/docs/api-reference/messages/object) moves - # to an `in_progress` state. + # [message](https://platform.openai.com/docs/api-reference/messages/object) moves + # to an `in_progress` state. sig do params(data: T.any(OpenAI::Models::Beta::Threads::Message, OpenAI::Internal::AnyHash), event: Symbol) .returns(T.attached_class) @@ -61,7 +61,7 @@ module OpenAI class ThreadMessageDelta < OpenAI::Internal::Type::BaseModel # Represents a message delta i.e. any changed fields on a message during - # streaming. + # streaming. sig { returns(OpenAI::Models::Beta::Threads::MessageDeltaEvent) } attr_reader :data @@ -72,8 +72,8 @@ module OpenAI attr_accessor :event # Occurs when parts of a - # [Message](https://platform.openai.com/docs/api-reference/messages/object) are - # being streamed. + # [Message](https://platform.openai.com/docs/api-reference/messages/object) are + # being streamed. sig do params( data: T.any(OpenAI::Models::Beta::Threads::MessageDeltaEvent, OpenAI::Internal::AnyHash), @@ -89,7 +89,7 @@ module OpenAI class ThreadMessageCompleted < OpenAI::Internal::Type::BaseModel # Represents a message within a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Message) } attr_reader :data @@ -100,8 +100,8 @@ module OpenAI attr_accessor :event # Occurs when a - # [message](https://platform.openai.com/docs/api-reference/messages/object) is - # completed. + # [message](https://platform.openai.com/docs/api-reference/messages/object) is + # completed. sig do params(data: T.any(OpenAI::Models::Beta::Threads::Message, OpenAI::Internal::AnyHash), event: Symbol) .returns(T.attached_class) @@ -114,7 +114,7 @@ module OpenAI class ThreadMessageIncomplete < OpenAI::Internal::Type::BaseModel # Represents a message within a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Message) } attr_reader :data @@ -125,8 +125,8 @@ module OpenAI attr_accessor :event # Occurs when a - # [message](https://platform.openai.com/docs/api-reference/messages/object) ends - # before it is completed. + # [message](https://platform.openai.com/docs/api-reference/messages/object) ends + # before it is completed. sig do params(data: T.any(OpenAI::Models::Beta::Threads::Message, OpenAI::Internal::AnyHash), event: Symbol) .returns(T.attached_class) diff --git a/rbi/lib/openai/models/beta/run_step_stream_event.rbi b/rbi/lib/openai/models/beta/run_step_stream_event.rbi index 7aff0d4d..c004aab8 100644 --- a/rbi/lib/openai/models/beta/run_step_stream_event.rbi +++ b/rbi/lib/openai/models/beta/run_step_stream_event.rbi @@ -4,8 +4,8 @@ module OpenAI module Models module Beta # Occurs when a - # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # is created. + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is created. module RunStepStreamEvent extend OpenAI::Internal::Type::Union @@ -21,8 +21,8 @@ module OpenAI attr_accessor :event # Occurs when a - # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # is created. + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is created. sig do params( data: T.any(OpenAI::Models::Beta::Threads::Runs::RunStep, OpenAI::Internal::AnyHash), @@ -48,8 +48,8 @@ module OpenAI attr_accessor :event # Occurs when a - # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # moves to an `in_progress` state. + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # moves to an `in_progress` state. sig do params( data: T.any(OpenAI::Models::Beta::Threads::Runs::RunStep, OpenAI::Internal::AnyHash), @@ -65,7 +65,7 @@ module OpenAI class ThreadRunStepDelta < OpenAI::Internal::Type::BaseModel # Represents a run step delta i.e. any changed fields on a run step during - # streaming. + # streaming. sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent) } attr_reader :data @@ -78,8 +78,8 @@ module OpenAI attr_accessor :event # Occurs when parts of a - # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # are being streamed. + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # are being streamed. sig do params( data: T.any(OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent, OpenAI::Internal::AnyHash), @@ -105,8 +105,8 @@ module OpenAI attr_accessor :event # Occurs when a - # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # is completed. + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is completed. sig do params( data: T.any(OpenAI::Models::Beta::Threads::Runs::RunStep, OpenAI::Internal::AnyHash), @@ -132,8 +132,8 @@ module OpenAI attr_accessor :event # Occurs when a - # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # fails. + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # fails. sig do params( data: T.any(OpenAI::Models::Beta::Threads::Runs::RunStep, OpenAI::Internal::AnyHash), @@ -159,8 +159,8 @@ module OpenAI attr_accessor :event # Occurs when a - # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # is cancelled. + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is cancelled. sig do params( data: T.any(OpenAI::Models::Beta::Threads::Runs::RunStep, OpenAI::Internal::AnyHash), @@ -186,8 +186,8 @@ module OpenAI attr_accessor :event # Occurs when a - # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # expires. + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # expires. sig do params( data: T.any(OpenAI::Models::Beta::Threads::Runs::RunStep, OpenAI::Internal::AnyHash), diff --git a/rbi/lib/openai/models/beta/run_stream_event.rbi b/rbi/lib/openai/models/beta/run_stream_event.rbi index de5bcb0f..29f03175 100644 --- a/rbi/lib/openai/models/beta/run_stream_event.rbi +++ b/rbi/lib/openai/models/beta/run_stream_event.rbi @@ -4,13 +4,13 @@ module OpenAI module Models module Beta # Occurs when a new - # [run](https://platform.openai.com/docs/api-reference/runs/object) is created. + # [run](https://platform.openai.com/docs/api-reference/runs/object) is created. module RunStreamEvent extend OpenAI::Internal::Type::Union class ThreadRunCreated < OpenAI::Internal::Type::BaseModel # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } attr_reader :data @@ -21,7 +21,7 @@ module OpenAI attr_accessor :event # Occurs when a new - # [run](https://platform.openai.com/docs/api-reference/runs/object) is created. + # [run](https://platform.openai.com/docs/api-reference/runs/object) is created. sig do params(data: T.any(OpenAI::Models::Beta::Threads::Run, OpenAI::Internal::AnyHash), event: Symbol) .returns(T.attached_class) @@ -34,7 +34,7 @@ module OpenAI class ThreadRunQueued < OpenAI::Internal::Type::BaseModel # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } attr_reader :data @@ -45,7 +45,7 @@ module OpenAI attr_accessor :event # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # moves to a `queued` status. + # moves to a `queued` status. sig do params(data: T.any(OpenAI::Models::Beta::Threads::Run, OpenAI::Internal::AnyHash), event: Symbol) .returns(T.attached_class) @@ -58,7 +58,7 @@ module OpenAI class ThreadRunInProgress < OpenAI::Internal::Type::BaseModel # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } attr_reader :data @@ -69,7 +69,7 @@ module OpenAI attr_accessor :event # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # moves to an `in_progress` status. + # moves to an `in_progress` status. sig do params(data: T.any(OpenAI::Models::Beta::Threads::Run, OpenAI::Internal::AnyHash), event: Symbol) .returns(T.attached_class) @@ -82,7 +82,7 @@ module OpenAI class ThreadRunRequiresAction < OpenAI::Internal::Type::BaseModel # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } attr_reader :data @@ -93,7 +93,7 @@ module OpenAI attr_accessor :event # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # moves to a `requires_action` status. + # moves to a `requires_action` status. sig do params(data: T.any(OpenAI::Models::Beta::Threads::Run, OpenAI::Internal::AnyHash), event: Symbol) .returns(T.attached_class) @@ -106,7 +106,7 @@ module OpenAI class ThreadRunCompleted < OpenAI::Internal::Type::BaseModel # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } attr_reader :data @@ -117,7 +117,7 @@ module OpenAI attr_accessor :event # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # is completed. + # is completed. sig do params(data: T.any(OpenAI::Models::Beta::Threads::Run, OpenAI::Internal::AnyHash), event: Symbol) .returns(T.attached_class) @@ -130,7 +130,7 @@ module OpenAI class ThreadRunIncomplete < OpenAI::Internal::Type::BaseModel # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } attr_reader :data @@ -141,7 +141,7 @@ module OpenAI attr_accessor :event # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # ends with status `incomplete`. + # ends with status `incomplete`. sig do params(data: T.any(OpenAI::Models::Beta::Threads::Run, OpenAI::Internal::AnyHash), event: Symbol) .returns(T.attached_class) @@ -154,7 +154,7 @@ module OpenAI class ThreadRunFailed < OpenAI::Internal::Type::BaseModel # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } attr_reader :data @@ -165,7 +165,7 @@ module OpenAI attr_accessor :event # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # fails. + # fails. sig do params(data: T.any(OpenAI::Models::Beta::Threads::Run, OpenAI::Internal::AnyHash), event: Symbol) .returns(T.attached_class) @@ -178,7 +178,7 @@ module OpenAI class ThreadRunCancelling < OpenAI::Internal::Type::BaseModel # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } attr_reader :data @@ -189,7 +189,7 @@ module OpenAI attr_accessor :event # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # moves to a `cancelling` status. + # moves to a `cancelling` status. sig do params(data: T.any(OpenAI::Models::Beta::Threads::Run, OpenAI::Internal::AnyHash), event: Symbol) .returns(T.attached_class) @@ -202,7 +202,7 @@ module OpenAI class ThreadRunCancelled < OpenAI::Internal::Type::BaseModel # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } attr_reader :data @@ -213,7 +213,7 @@ module OpenAI attr_accessor :event # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # is cancelled. + # is cancelled. sig do params(data: T.any(OpenAI::Models::Beta::Threads::Run, OpenAI::Internal::AnyHash), event: Symbol) .returns(T.attached_class) @@ -226,7 +226,7 @@ module OpenAI class ThreadRunExpired < OpenAI::Internal::Type::BaseModel # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } attr_reader :data @@ -237,7 +237,7 @@ module OpenAI attr_accessor :event # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # expires. + # expires. sig do params(data: T.any(OpenAI::Models::Beta::Threads::Run, OpenAI::Internal::AnyHash), event: Symbol) .returns(T.attached_class) diff --git a/rbi/lib/openai/models/beta/thread.rbi b/rbi/lib/openai/models/beta/thread.rbi index 0e294e8a..0a393255 100644 --- a/rbi/lib/openai/models/beta/thread.rbi +++ b/rbi/lib/openai/models/beta/thread.rbi @@ -13,11 +13,11 @@ module OpenAI attr_accessor :created_at # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata @@ -26,9 +26,9 @@ module OpenAI attr_accessor :object # A set of resources that are made available to the assistant's tools in this - # thread. The resources are specific to the type of tool. For example, the - # `code_interpreter` tool requires a list of file IDs, while the `file_search` - # tool requires a list of vector store IDs. + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. sig { returns(T.nilable(OpenAI::Models::Beta::Thread::ToolResources)) } attr_reader :tool_resources @@ -41,7 +41,7 @@ module OpenAI attr_writer :tool_resources # Represents a thread that contains - # [messages](https://platform.openai.com/docs/api-reference/messages). + # [messages](https://platform.openai.com/docs/api-reference/messages). sig do params( id: String, @@ -92,9 +92,9 @@ module OpenAI attr_writer :file_search # A set of resources that are made available to the assistant's tools in this - # thread. The resources are specific to the type of tool. For example, the - # `code_interpreter` tool requires a list of file IDs, while the `file_search` - # tool requires a list of vector store IDs. + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. sig do params( code_interpreter: T.any(OpenAI::Models::Beta::Thread::ToolResources::CodeInterpreter, OpenAI::Internal::AnyHash), @@ -117,8 +117,8 @@ module OpenAI class CodeInterpreter < OpenAI::Internal::Type::BaseModel # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made - # available to the `code_interpreter` tool. There can be a maximum of 20 files - # associated with the tool. + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. sig { returns(T.nilable(T::Array[String])) } attr_reader :file_ids @@ -134,9 +134,9 @@ module OpenAI class FileSearch < OpenAI::Internal::Type::BaseModel # The - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # attached to this thread. There can be a maximum of 1 vector store attached to - # the thread. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this thread. There can be a maximum of 1 vector store attached to + # the thread. sig { returns(T.nilable(T::Array[String])) } attr_reader :vector_store_ids diff --git a/rbi/lib/openai/models/beta/thread_create_and_run_params.rbi b/rbi/lib/openai/models/beta/thread_create_and_run_params.rbi index f9240bfe..bc0633ec 100644 --- a/rbi/lib/openai/models/beta/thread_create_and_run_params.rbi +++ b/rbi/lib/openai/models/beta/thread_create_and_run_params.rbi @@ -8,51 +8,51 @@ module OpenAI include OpenAI::Internal::Type::RequestParameters # The ID of the - # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to - # execute this run. + # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + # execute this run. sig { returns(String) } attr_accessor :assistant_id # Override the default system message of the assistant. This is useful for - # modifying the behavior on a per-run basis. + # modifying the behavior on a per-run basis. sig { returns(T.nilable(String)) } attr_accessor :instructions # The maximum number of completion tokens that may be used over the course of the - # run. The run will make a best effort to use only the number of completion tokens - # specified, across multiple turns of the run. If the run exceeds the number of - # completion tokens specified, the run will end with status `incomplete`. See - # `incomplete_details` for more info. + # run. The run will make a best effort to use only the number of completion tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # completion tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. sig { returns(T.nilable(Integer)) } attr_accessor :max_completion_tokens # The maximum number of prompt tokens that may be used over the course of the run. - # The run will make a best effort to use only the number of prompt tokens - # specified, across multiple turns of the run. If the run exceeds the number of - # prompt tokens specified, the run will end with status `incomplete`. See - # `incomplete_details` for more info. + # The run will make a best effort to use only the number of prompt tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # prompt tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. sig { returns(T.nilable(Integer)) } attr_accessor :max_prompt_tokens # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to - # be used to execute this run. If a value is provided here, it will override the - # model associated with the assistant. If not, the model associated with the - # assistant will be used. + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. sig { returns(T.nilable(T.any(String, OpenAI::Models::ChatModel::OrSymbol))) } attr_accessor :model # Whether to enable - # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) - # during tool use. + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. sig { returns(T.nilable(T::Boolean)) } attr_reader :parallel_tool_calls @@ -60,25 +60,25 @@ module OpenAI attr_writer :parallel_tool_calls # Specifies the format that the model must output. Compatible with - # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), - # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - # message the model generates is valid JSON. + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. # - # **Important:** when using JSON mode, you **must** also instruct the model to - # produce JSON yourself via a system or user message. Without this, the model may - # generate an unending stream of whitespace until the generation reaches the token - # limit, resulting in a long-running and seemingly "stuck" request. Also note that - # the message content may be partially cut off if `finish_reason="length"`, which - # indicates the generation exceeded `max_tokens` or the conversation exceeded the - # max context length. + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. sig do returns( T.nilable( @@ -94,13 +94,13 @@ module OpenAI attr_accessor :response_format # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. sig { returns(T.nilable(Float)) } attr_accessor :temperature # Options to create a new thread. If no thread is provided when running a request, - # an empty thread will be created. + # an empty thread will be created. sig { returns(T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread)) } attr_reader :thread @@ -111,12 +111,12 @@ module OpenAI attr_writer :thread # Controls which (if any) tool is called by the model. `none` means the model will - # not call any tools and instead generates a message. `auto` is the default value - # and means the model can pick between generating a message or calling one or more - # tools. `required` means the model must call one or more tools before responding - # to the user. Specifying a particular tool like `{"type": "file_search"}` or - # `{"type": "function", "function": {"name": "my_function"}}` forces the model to - # call that tool. + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. sig do returns( T.nilable( @@ -130,9 +130,9 @@ module OpenAI attr_accessor :tool_choice # A set of resources that are used by the assistant's tools. The resources are - # specific to the type of tool. For example, the `code_interpreter` tool requires - # a list of file IDs, while the `file_search` tool requires a list of vector store - # IDs. + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. sig { returns(T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources)) } attr_reader :tool_resources @@ -145,7 +145,7 @@ module OpenAI attr_writer :tool_resources # Override the tools the assistant can use for this run. This is useful for - # modifying the behavior on a per-run basis. + # modifying the behavior on a per-run basis. sig do returns( T.nilable( @@ -162,15 +162,15 @@ module OpenAI attr_accessor :tools # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or temperature but not both. + # We generally recommend altering this or temperature but not both. sig { returns(T.nilable(Float)) } attr_accessor :top_p # Controls for how a thread will be truncated prior to the run. Use this to - # control the intial context window of the run. + # control the intial context window of the run. sig { returns(T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy)) } attr_reader :truncation_strategy @@ -294,9 +294,9 @@ module OpenAI def to_hash; end # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to - # be used to execute this run. If a value is provided here, it will override the - # model associated with the assistant. If not, the model associated with the - # assistant will be used. + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. module Model extend OpenAI::Internal::Type::Union @@ -306,7 +306,7 @@ module OpenAI class Thread < OpenAI::Internal::Type::BaseModel # A list of [messages](https://platform.openai.com/docs/api-reference/messages) to - # start the thread with. + # start the thread with. sig { returns(T.nilable(T::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message])) } attr_reader :messages @@ -319,18 +319,18 @@ module OpenAI attr_writer :messages # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata # A set of resources that are made available to the assistant's tools in this - # thread. The resources are specific to the type of tool. For example, the - # `code_interpreter` tool requires a list of file IDs, while the `file_search` - # tool requires a list of vector store IDs. + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. sig { returns(T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources)) } attr_reader :tool_resources @@ -345,7 +345,7 @@ module OpenAI attr_writer :tool_resources # Options to create a new thread. If no thread is provided when running a request, - # an empty thread will be created. + # an empty thread will be created. sig do params( messages: T::Array[T.any(OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message, OpenAI::Internal::AnyHash)], @@ -390,10 +390,10 @@ module OpenAI # The role of the entity that is creating the message. Allowed values include: # - # - `user`: Indicates the message is sent by an actual user and should be used in - # most cases to represent user-generated messages. - # - `assistant`: Indicates the message is generated by the assistant. Use this - # value to insert messages from the assistant into the conversation. + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. sig { returns(OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Role::OrSymbol) } attr_accessor :role @@ -402,11 +402,11 @@ module OpenAI attr_accessor :attachments # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata @@ -490,10 +490,10 @@ module OpenAI # The role of the entity that is creating the message. Allowed values include: # - # - `user`: Indicates the message is sent by an actual user and should be used in - # most cases to represent user-generated messages. - # - `assistant`: Indicates the message is generated by the assistant. Use this - # value to insert messages from the assistant into the conversation. + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. module Role extend OpenAI::Internal::Type::Enum @@ -637,9 +637,9 @@ module OpenAI attr_writer :file_search # A set of resources that are made available to the assistant's tools in this - # thread. The resources are specific to the type of tool. For example, the - # `code_interpreter` tool requires a list of file IDs, while the `file_search` - # tool requires a list of vector store IDs. + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. sig do params( code_interpreter: T.any( @@ -668,8 +668,8 @@ module OpenAI class CodeInterpreter < OpenAI::Internal::Type::BaseModel # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made - # available to the `code_interpreter` tool. There can be a maximum of 20 files - # associated with the tool. + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. sig { returns(T.nilable(T::Array[String])) } attr_reader :file_ids @@ -685,9 +685,9 @@ module OpenAI class FileSearch < OpenAI::Internal::Type::BaseModel # The - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # attached to this thread. There can be a maximum of 1 vector store attached to - # the thread. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this thread. There can be a maximum of 1 vector store attached to + # the thread. sig { returns(T.nilable(T::Array[String])) } attr_reader :vector_store_ids @@ -695,9 +695,9 @@ module OpenAI attr_writer :vector_store_ids # A helper to create a - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # with file_ids and attach it to this thread. There can be a maximum of 1 vector - # store attached to the thread. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # with file_ids and attach it to this thread. There can be a maximum of 1 vector + # store attached to the thread. sig do returns( T.nilable( @@ -747,7 +747,7 @@ module OpenAI class VectorStore < OpenAI::Internal::Type::BaseModel # The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. + # strategy. sig do returns( T.nilable( @@ -773,8 +773,8 @@ module OpenAI attr_writer :chunking_strategy # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to - # add to the vector store. There can be a maximum of 10000 files in a vector - # store. + # add to the vector store. There can be a maximum of 10000 files in a vector + # store. sig { returns(T.nilable(T::Array[String])) } attr_reader :file_ids @@ -782,11 +782,11 @@ module OpenAI attr_writer :file_ids # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata @@ -820,7 +820,7 @@ module OpenAI def to_hash; end # The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. + # strategy. module ChunkingStrategy extend OpenAI::Internal::Type::Union @@ -830,7 +830,7 @@ module OpenAI attr_accessor :type # The default strategy. This strategy currently uses a `max_chunk_size_tokens` of - # `800` and `chunk_overlap_tokens` of `400`. + # `800` and `chunk_overlap_tokens` of `400`. sig { params(type: Symbol).returns(T.attached_class) } def self.new(type: :auto); end @@ -887,12 +887,12 @@ module OpenAI class Static < OpenAI::Internal::Type::BaseModel # The number of tokens that overlap between chunks. The default value is `400`. # - # Note that the overlap must not exceed half of `max_chunk_size_tokens`. + # Note that the overlap must not exceed half of `max_chunk_size_tokens`. sig { returns(Integer) } attr_accessor :chunk_overlap_tokens # The maximum number of tokens in each chunk. The default value is `800`. The - # minimum value is `100` and the maximum value is `4096`. + # minimum value is `100` and the maximum value is `4096`. sig { returns(Integer) } attr_accessor :max_chunk_size_tokens @@ -954,9 +954,9 @@ module OpenAI attr_writer :file_search # A set of resources that are used by the assistant's tools. The resources are - # specific to the type of tool. For example, the `code_interpreter` tool requires - # a list of file IDs, while the `file_search` tool requires a list of vector store - # IDs. + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. sig do params( code_interpreter: T.any( @@ -985,8 +985,8 @@ module OpenAI class CodeInterpreter < OpenAI::Internal::Type::BaseModel # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made - # available to the `code_interpreter` tool. There can be a maximum of 20 files - # associated with the tool. + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. sig { returns(T.nilable(T::Array[String])) } attr_reader :file_ids @@ -1002,9 +1002,9 @@ module OpenAI class FileSearch < OpenAI::Internal::Type::BaseModel # The ID of the - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # attached to this assistant. There can be a maximum of 1 vector store attached to - # the assistant. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this assistant. There can be a maximum of 1 vector store attached to + # the assistant. sig { returns(T.nilable(T::Array[String])) } attr_reader :vector_store_ids @@ -1033,19 +1033,19 @@ module OpenAI class TruncationStrategy < OpenAI::Internal::Type::BaseModel # The truncation strategy to use for the thread. The default is `auto`. If set to - # `last_messages`, the thread will be truncated to the n most recent messages in - # the thread. When set to `auto`, messages in the middle of the thread will be - # dropped to fit the context length of the model, `max_prompt_tokens`. + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. sig { returns(OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy::Type::OrSymbol) } attr_accessor :type # The number of most recent messages from the thread when constructing the context - # for the run. + # for the run. sig { returns(T.nilable(Integer)) } attr_accessor :last_messages # Controls for how a thread will be truncated prior to the run. Use this to - # control the intial context window of the run. + # control the intial context window of the run. sig do params( type: OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy::Type::OrSymbol, @@ -1067,9 +1067,9 @@ module OpenAI def to_hash; end # The truncation strategy to use for the thread. The default is `auto`. If set to - # `last_messages`, the thread will be truncated to the n most recent messages in - # the thread. When set to `auto`, messages in the middle of the thread will be - # dropped to fit the context length of the model, `max_prompt_tokens`. + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. module Type extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/beta/thread_create_params.rbi b/rbi/lib/openai/models/beta/thread_create_params.rbi index 285edbe9..9d73e499 100644 --- a/rbi/lib/openai/models/beta/thread_create_params.rbi +++ b/rbi/lib/openai/models/beta/thread_create_params.rbi @@ -8,7 +8,7 @@ module OpenAI include OpenAI::Internal::Type::RequestParameters # A list of [messages](https://platform.openai.com/docs/api-reference/messages) to - # start the thread with. + # start the thread with. sig { returns(T.nilable(T::Array[OpenAI::Models::Beta::ThreadCreateParams::Message])) } attr_reader :messages @@ -21,18 +21,18 @@ module OpenAI attr_writer :messages # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata # A set of resources that are made available to the assistant's tools in this - # thread. The resources are specific to the type of tool. For example, the - # `code_interpreter` tool requires a list of file IDs, while the `file_search` - # tool requires a list of vector store IDs. + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. sig { returns(T.nilable(OpenAI::Models::Beta::ThreadCreateParams::ToolResources)) } attr_reader :tool_resources @@ -88,10 +88,10 @@ module OpenAI # The role of the entity that is creating the message. Allowed values include: # - # - `user`: Indicates the message is sent by an actual user and should be used in - # most cases to represent user-generated messages. - # - `assistant`: Indicates the message is generated by the assistant. Use this - # value to insert messages from the assistant into the conversation. + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. sig { returns(OpenAI::Models::Beta::ThreadCreateParams::Message::Role::OrSymbol) } attr_accessor :role @@ -100,11 +100,11 @@ module OpenAI attr_accessor :attachments # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata @@ -183,10 +183,10 @@ module OpenAI # The role of the entity that is creating the message. Allowed values include: # - # - `user`: Indicates the message is sent by an actual user and should be used in - # most cases to represent user-generated messages. - # - `assistant`: Indicates the message is generated by the assistant. Use this - # value to insert messages from the assistant into the conversation. + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. module Role extend OpenAI::Internal::Type::Enum @@ -319,9 +319,9 @@ module OpenAI attr_writer :file_search # A set of resources that are made available to the assistant's tools in this - # thread. The resources are specific to the type of tool. For example, the - # `code_interpreter` tool requires a list of file IDs, while the `file_search` - # tool requires a list of vector store IDs. + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. sig do params( code_interpreter: T.any(OpenAI::Models::Beta::ThreadCreateParams::ToolResources::CodeInterpreter, OpenAI::Internal::AnyHash), @@ -344,8 +344,8 @@ module OpenAI class CodeInterpreter < OpenAI::Internal::Type::BaseModel # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made - # available to the `code_interpreter` tool. There can be a maximum of 20 files - # associated with the tool. + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. sig { returns(T.nilable(T::Array[String])) } attr_reader :file_ids @@ -361,9 +361,9 @@ module OpenAI class FileSearch < OpenAI::Internal::Type::BaseModel # The - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # attached to this thread. There can be a maximum of 1 vector store attached to - # the thread. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this thread. There can be a maximum of 1 vector store attached to + # the thread. sig { returns(T.nilable(T::Array[String])) } attr_reader :vector_store_ids @@ -371,9 +371,9 @@ module OpenAI attr_writer :vector_store_ids # A helper to create a - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # with file_ids and attach it to this thread. There can be a maximum of 1 vector - # store attached to the thread. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # with file_ids and attach it to this thread. There can be a maximum of 1 vector + # store attached to the thread. sig do returns( T.nilable(T::Array[OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore]) @@ -421,7 +421,7 @@ module OpenAI class VectorStore < OpenAI::Internal::Type::BaseModel # The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. + # strategy. sig do returns( T.nilable( @@ -447,8 +447,8 @@ module OpenAI attr_writer :chunking_strategy # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to - # add to the vector store. There can be a maximum of 10000 files in a vector - # store. + # add to the vector store. There can be a maximum of 10000 files in a vector + # store. sig { returns(T.nilable(T::Array[String])) } attr_reader :file_ids @@ -456,11 +456,11 @@ module OpenAI attr_writer :file_ids # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata @@ -494,7 +494,7 @@ module OpenAI def to_hash; end # The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. + # strategy. module ChunkingStrategy extend OpenAI::Internal::Type::Union @@ -504,7 +504,7 @@ module OpenAI attr_accessor :type # The default strategy. This strategy currently uses a `max_chunk_size_tokens` of - # `800` and `chunk_overlap_tokens` of `400`. + # `800` and `chunk_overlap_tokens` of `400`. sig { params(type: Symbol).returns(T.attached_class) } def self.new(type: :auto); end @@ -561,12 +561,12 @@ module OpenAI class Static < OpenAI::Internal::Type::BaseModel # The number of tokens that overlap between chunks. The default value is `400`. # - # Note that the overlap must not exceed half of `max_chunk_size_tokens`. + # Note that the overlap must not exceed half of `max_chunk_size_tokens`. sig { returns(Integer) } attr_accessor :chunk_overlap_tokens # The maximum number of tokens in each chunk. The default value is `800`. The - # minimum value is `100` and the maximum value is `4096`. + # minimum value is `100` and the maximum value is `4096`. sig { returns(Integer) } attr_accessor :max_chunk_size_tokens diff --git a/rbi/lib/openai/models/beta/thread_stream_event.rbi b/rbi/lib/openai/models/beta/thread_stream_event.rbi index e2268f47..f169b98c 100644 --- a/rbi/lib/openai/models/beta/thread_stream_event.rbi +++ b/rbi/lib/openai/models/beta/thread_stream_event.rbi @@ -5,7 +5,7 @@ module OpenAI module Beta class ThreadStreamEvent < OpenAI::Internal::Type::BaseModel # Represents a thread that contains - # [messages](https://platform.openai.com/docs/api-reference/messages). + # [messages](https://platform.openai.com/docs/api-reference/messages). sig { returns(OpenAI::Models::Beta::Thread) } attr_reader :data @@ -23,8 +23,8 @@ module OpenAI attr_writer :enabled # Occurs when a new - # [thread](https://platform.openai.com/docs/api-reference/threads/object) is - # created. + # [thread](https://platform.openai.com/docs/api-reference/threads/object) is + # created. sig do params( data: T.any(OpenAI::Models::Beta::Thread, OpenAI::Internal::AnyHash), diff --git a/rbi/lib/openai/models/beta/thread_update_params.rbi b/rbi/lib/openai/models/beta/thread_update_params.rbi index 4ec25c94..1eb3f670 100644 --- a/rbi/lib/openai/models/beta/thread_update_params.rbi +++ b/rbi/lib/openai/models/beta/thread_update_params.rbi @@ -8,18 +8,18 @@ module OpenAI include OpenAI::Internal::Type::RequestParameters # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata # A set of resources that are made available to the assistant's tools in this - # thread. The resources are specific to the type of tool. For example, the - # `code_interpreter` tool requires a list of file IDs, while the `file_search` - # tool requires a list of vector store IDs. + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. sig { returns(T.nilable(OpenAI::Models::Beta::ThreadUpdateParams::ToolResources)) } attr_reader :tool_resources @@ -77,9 +77,9 @@ module OpenAI attr_writer :file_search # A set of resources that are made available to the assistant's tools in this - # thread. The resources are specific to the type of tool. For example, the - # `code_interpreter` tool requires a list of file IDs, while the `file_search` - # tool requires a list of vector store IDs. + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. sig do params( code_interpreter: T.any(OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter, OpenAI::Internal::AnyHash), @@ -102,8 +102,8 @@ module OpenAI class CodeInterpreter < OpenAI::Internal::Type::BaseModel # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made - # available to the `code_interpreter` tool. There can be a maximum of 20 files - # associated with the tool. + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. sig { returns(T.nilable(T::Array[String])) } attr_reader :file_ids @@ -119,9 +119,9 @@ module OpenAI class FileSearch < OpenAI::Internal::Type::BaseModel # The - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # attached to this thread. There can be a maximum of 1 vector store attached to - # the thread. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this thread. There can be a maximum of 1 vector store attached to + # the thread. sig { returns(T.nilable(T::Array[String])) } attr_reader :vector_store_ids diff --git a/rbi/lib/openai/models/beta/threads/annotation.rbi b/rbi/lib/openai/models/beta/threads/annotation.rbi index 0b4b796a..6ed86777 100644 --- a/rbi/lib/openai/models/beta/threads/annotation.rbi +++ b/rbi/lib/openai/models/beta/threads/annotation.rbi @@ -5,8 +5,8 @@ module OpenAI module Beta module Threads # A citation within the message that points to a specific quote from a specific - # File associated with the assistant or the message. Generated when the assistant - # uses the "file_search" tool to search files. + # File associated with the assistant or the message. Generated when the assistant + # uses the "file_search" tool to search files. module Annotation extend OpenAI::Internal::Type::Union diff --git a/rbi/lib/openai/models/beta/threads/annotation_delta.rbi b/rbi/lib/openai/models/beta/threads/annotation_delta.rbi index 26a9fe9b..deb39180 100644 --- a/rbi/lib/openai/models/beta/threads/annotation_delta.rbi +++ b/rbi/lib/openai/models/beta/threads/annotation_delta.rbi @@ -5,8 +5,8 @@ module OpenAI module Beta module Threads # A citation within the message that points to a specific quote from a specific - # File associated with the assistant or the message. Generated when the assistant - # uses the "file_search" tool to search files. + # File associated with the assistant or the message. Generated when the assistant + # uses the "file_search" tool to search files. module AnnotationDelta extend OpenAI::Internal::Type::Union diff --git a/rbi/lib/openai/models/beta/threads/file_citation_annotation.rbi b/rbi/lib/openai/models/beta/threads/file_citation_annotation.rbi index e29ed266..a9b10bb9 100644 --- a/rbi/lib/openai/models/beta/threads/file_citation_annotation.rbi +++ b/rbi/lib/openai/models/beta/threads/file_citation_annotation.rbi @@ -31,8 +31,8 @@ module OpenAI attr_accessor :type # A citation within the message that points to a specific quote from a specific - # File associated with the assistant or the message. Generated when the assistant - # uses the "file_search" tool to search files. + # File associated with the assistant or the message. Generated when the assistant + # uses the "file_search" tool to search files. sig do params( end_index: Integer, diff --git a/rbi/lib/openai/models/beta/threads/file_citation_delta_annotation.rbi b/rbi/lib/openai/models/beta/threads/file_citation_delta_annotation.rbi index 3a09db06..1d28545a 100644 --- a/rbi/lib/openai/models/beta/threads/file_citation_delta_annotation.rbi +++ b/rbi/lib/openai/models/beta/threads/file_citation_delta_annotation.rbi @@ -44,8 +44,8 @@ module OpenAI attr_writer :text # A citation within the message that points to a specific quote from a specific - # File associated with the assistant or the message. Generated when the assistant - # uses the "file_search" tool to search files. + # File associated with the assistant or the message. Generated when the assistant + # uses the "file_search" tool to search files. sig do params( index: Integer, diff --git a/rbi/lib/openai/models/beta/threads/file_path_annotation.rbi b/rbi/lib/openai/models/beta/threads/file_path_annotation.rbi index 93c25e4e..87953109 100644 --- a/rbi/lib/openai/models/beta/threads/file_path_annotation.rbi +++ b/rbi/lib/openai/models/beta/threads/file_path_annotation.rbi @@ -31,7 +31,7 @@ module OpenAI attr_accessor :type # A URL for the file that's generated when the assistant used the - # `code_interpreter` tool to generate a file. + # `code_interpreter` tool to generate a file. sig do params( end_index: Integer, diff --git a/rbi/lib/openai/models/beta/threads/file_path_delta_annotation.rbi b/rbi/lib/openai/models/beta/threads/file_path_delta_annotation.rbi index e8f4b3d2..4c2c8c36 100644 --- a/rbi/lib/openai/models/beta/threads/file_path_delta_annotation.rbi +++ b/rbi/lib/openai/models/beta/threads/file_path_delta_annotation.rbi @@ -44,7 +44,7 @@ module OpenAI attr_writer :text # A URL for the file that's generated when the assistant used the - # `code_interpreter` tool to generate a file. + # `code_interpreter` tool to generate a file. sig do params( index: Integer, diff --git a/rbi/lib/openai/models/beta/threads/image_file.rbi b/rbi/lib/openai/models/beta/threads/image_file.rbi index 25343dd0..788ca5de 100644 --- a/rbi/lib/openai/models/beta/threads/image_file.rbi +++ b/rbi/lib/openai/models/beta/threads/image_file.rbi @@ -6,13 +6,13 @@ module OpenAI module Threads class ImageFile < OpenAI::Internal::Type::BaseModel # The [File](https://platform.openai.com/docs/api-reference/files) ID of the image - # in the message content. Set `purpose="vision"` when uploading the File if you - # need to later display the file content. + # in the message content. Set `purpose="vision"` when uploading the File if you + # need to later display the file content. sig { returns(String) } attr_accessor :file_id # Specifies the detail level of the image if specified by the user. `low` uses - # fewer tokens, you can opt in to high resolution using `high`. + # fewer tokens, you can opt in to high resolution using `high`. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::ImageFile::Detail::OrSymbol)) } attr_reader :detail @@ -29,7 +29,7 @@ module OpenAI def to_hash; end # Specifies the detail level of the image if specified by the user. `low` uses - # fewer tokens, you can opt in to high resolution using `high`. + # fewer tokens, you can opt in to high resolution using `high`. module Detail extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/beta/threads/image_file_content_block.rbi b/rbi/lib/openai/models/beta/threads/image_file_content_block.rbi index b50f4624..90524b05 100644 --- a/rbi/lib/openai/models/beta/threads/image_file_content_block.rbi +++ b/rbi/lib/openai/models/beta/threads/image_file_content_block.rbi @@ -16,7 +16,7 @@ module OpenAI attr_accessor :type # References an image [File](https://platform.openai.com/docs/api-reference/files) - # in the content of a message. + # in the content of a message. sig do params( image_file: T.any(OpenAI::Models::Beta::Threads::ImageFile, OpenAI::Internal::AnyHash), diff --git a/rbi/lib/openai/models/beta/threads/image_file_delta.rbi b/rbi/lib/openai/models/beta/threads/image_file_delta.rbi index da45783e..d0d866ed 100644 --- a/rbi/lib/openai/models/beta/threads/image_file_delta.rbi +++ b/rbi/lib/openai/models/beta/threads/image_file_delta.rbi @@ -6,7 +6,7 @@ module OpenAI module Threads class ImageFileDelta < OpenAI::Internal::Type::BaseModel # Specifies the detail level of the image if specified by the user. `low` uses - # fewer tokens, you can opt in to high resolution using `high`. + # fewer tokens, you can opt in to high resolution using `high`. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::ImageFileDelta::Detail::TaggedSymbol)) } attr_reader :detail @@ -14,8 +14,8 @@ module OpenAI attr_writer :detail # The [File](https://platform.openai.com/docs/api-reference/files) ID of the image - # in the message content. Set `purpose="vision"` when uploading the File if you - # need to later display the file content. + # in the message content. Set `purpose="vision"` when uploading the File if you + # need to later display the file content. sig { returns(T.nilable(String)) } attr_reader :file_id @@ -35,7 +35,7 @@ module OpenAI def to_hash; end # Specifies the detail level of the image if specified by the user. `low` uses - # fewer tokens, you can opt in to high resolution using `high`. + # fewer tokens, you can opt in to high resolution using `high`. module Detail extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/beta/threads/image_file_delta_block.rbi b/rbi/lib/openai/models/beta/threads/image_file_delta_block.rbi index 4dfa0c31..74be2ead 100644 --- a/rbi/lib/openai/models/beta/threads/image_file_delta_block.rbi +++ b/rbi/lib/openai/models/beta/threads/image_file_delta_block.rbi @@ -20,7 +20,7 @@ module OpenAI attr_writer :image_file # References an image [File](https://platform.openai.com/docs/api-reference/files) - # in the content of a message. + # in the content of a message. sig do params( index: Integer, diff --git a/rbi/lib/openai/models/beta/threads/image_url.rbi b/rbi/lib/openai/models/beta/threads/image_url.rbi index d0e2b929..27f59579 100644 --- a/rbi/lib/openai/models/beta/threads/image_url.rbi +++ b/rbi/lib/openai/models/beta/threads/image_url.rbi @@ -6,12 +6,12 @@ module OpenAI module Threads class ImageURL < OpenAI::Internal::Type::BaseModel # The external URL of the image, must be a supported image types: jpeg, jpg, png, - # gif, webp. + # gif, webp. sig { returns(String) } attr_accessor :url # Specifies the detail level of the image. `low` uses fewer tokens, you can opt in - # to high resolution using `high`. Default value is `auto` + # to high resolution using `high`. Default value is `auto` sig { returns(T.nilable(OpenAI::Models::Beta::Threads::ImageURL::Detail::OrSymbol)) } attr_reader :detail @@ -28,7 +28,7 @@ module OpenAI def to_hash; end # Specifies the detail level of the image. `low` uses fewer tokens, you can opt in - # to high resolution using `high`. Default value is `auto` + # to high resolution using `high`. Default value is `auto` module Detail extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/beta/threads/image_url_delta.rbi b/rbi/lib/openai/models/beta/threads/image_url_delta.rbi index 5efc1733..69c06976 100644 --- a/rbi/lib/openai/models/beta/threads/image_url_delta.rbi +++ b/rbi/lib/openai/models/beta/threads/image_url_delta.rbi @@ -6,7 +6,7 @@ module OpenAI module Threads class ImageURLDelta < OpenAI::Internal::Type::BaseModel # Specifies the detail level of the image. `low` uses fewer tokens, you can opt in - # to high resolution using `high`. + # to high resolution using `high`. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::ImageURLDelta::Detail::TaggedSymbol)) } attr_reader :detail @@ -14,7 +14,7 @@ module OpenAI attr_writer :detail # The URL of the image, must be a supported image types: jpeg, jpg, png, gif, - # webp. + # webp. sig { returns(T.nilable(String)) } attr_reader :url @@ -33,7 +33,7 @@ module OpenAI def to_hash; end # Specifies the detail level of the image. `low` uses fewer tokens, you can opt in - # to high resolution using `high`. + # to high resolution using `high`. module Detail extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/beta/threads/message.rbi b/rbi/lib/openai/models/beta/threads/message.rbi index 29e9be2a..ae809809 100644 --- a/rbi/lib/openai/models/beta/threads/message.rbi +++ b/rbi/lib/openai/models/beta/threads/message.rbi @@ -10,8 +10,8 @@ module OpenAI attr_accessor :id # If applicable, the ID of the - # [assistant](https://platform.openai.com/docs/api-reference/assistants) that - # authored this message. + # [assistant](https://platform.openai.com/docs/api-reference/assistants) that + # authored this message. sig { returns(T.nilable(String)) } attr_accessor :assistant_id @@ -59,11 +59,11 @@ module OpenAI attr_writer :incomplete_details # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata @@ -76,23 +76,23 @@ module OpenAI attr_accessor :role # The ID of the [run](https://platform.openai.com/docs/api-reference/runs) - # associated with the creation of this message. Value is `null` when messages are - # created manually using the create message or create thread endpoints. + # associated with the creation of this message. Value is `null` when messages are + # created manually using the create message or create thread endpoints. sig { returns(T.nilable(String)) } attr_accessor :run_id # The status of the message, which can be either `in_progress`, `incomplete`, or - # `completed`. + # `completed`. sig { returns(OpenAI::Models::Beta::Threads::Message::Status::TaggedSymbol) } attr_accessor :status # The [thread](https://platform.openai.com/docs/api-reference/threads) ID that - # this message belongs to. + # this message belongs to. sig { returns(String) } attr_accessor :thread_id # Represents a message within a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). sig do params( id: String, @@ -319,7 +319,7 @@ module OpenAI end # The status of the message, which can be either `in_progress`, `incomplete`, or - # `completed`. + # `completed`. module Status extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/beta/threads/message_content.rbi b/rbi/lib/openai/models/beta/threads/message_content.rbi index 148ce53f..99a0c488 100644 --- a/rbi/lib/openai/models/beta/threads/message_content.rbi +++ b/rbi/lib/openai/models/beta/threads/message_content.rbi @@ -5,7 +5,7 @@ module OpenAI module Beta module Threads # References an image [File](https://platform.openai.com/docs/api-reference/files) - # in the content of a message. + # in the content of a message. module MessageContent extend OpenAI::Internal::Type::Union diff --git a/rbi/lib/openai/models/beta/threads/message_content_delta.rbi b/rbi/lib/openai/models/beta/threads/message_content_delta.rbi index c1dba8a3..a6ad0c14 100644 --- a/rbi/lib/openai/models/beta/threads/message_content_delta.rbi +++ b/rbi/lib/openai/models/beta/threads/message_content_delta.rbi @@ -5,7 +5,7 @@ module OpenAI module Beta module Threads # References an image [File](https://platform.openai.com/docs/api-reference/files) - # in the content of a message. + # in the content of a message. module MessageContentDelta extend OpenAI::Internal::Type::Union diff --git a/rbi/lib/openai/models/beta/threads/message_content_part_param.rbi b/rbi/lib/openai/models/beta/threads/message_content_part_param.rbi index 012559c9..fbe37283 100644 --- a/rbi/lib/openai/models/beta/threads/message_content_part_param.rbi +++ b/rbi/lib/openai/models/beta/threads/message_content_part_param.rbi @@ -5,7 +5,7 @@ module OpenAI module Beta module Threads # References an image [File](https://platform.openai.com/docs/api-reference/files) - # in the content of a message. + # in the content of a message. module MessageContentPartParam extend OpenAI::Internal::Type::Union diff --git a/rbi/lib/openai/models/beta/threads/message_create_params.rbi b/rbi/lib/openai/models/beta/threads/message_create_params.rbi index 32a91cfd..94ce2228 100644 --- a/rbi/lib/openai/models/beta/threads/message_create_params.rbi +++ b/rbi/lib/openai/models/beta/threads/message_create_params.rbi @@ -27,10 +27,10 @@ module OpenAI # The role of the entity that is creating the message. Allowed values include: # - # - `user`: Indicates the message is sent by an actual user and should be used in - # most cases to represent user-generated messages. - # - `assistant`: Indicates the message is generated by the assistant. Use this - # value to insert messages from the assistant into the conversation. + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. sig { returns(OpenAI::Models::Beta::Threads::MessageCreateParams::Role::OrSymbol) } attr_accessor :role @@ -39,11 +39,11 @@ module OpenAI attr_accessor :attachments # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata @@ -124,10 +124,10 @@ module OpenAI # The role of the entity that is creating the message. Allowed values include: # - # - `user`: Indicates the message is sent by an actual user and should be used in - # most cases to represent user-generated messages. - # - `assistant`: Indicates the message is generated by the assistant. Use this - # value to insert messages from the assistant into the conversation. + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. module Role extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/beta/threads/message_delta_event.rbi b/rbi/lib/openai/models/beta/threads/message_delta_event.rbi index 359c70cd..f5e5b29b 100644 --- a/rbi/lib/openai/models/beta/threads/message_delta_event.rbi +++ b/rbi/lib/openai/models/beta/threads/message_delta_event.rbi @@ -21,7 +21,7 @@ module OpenAI attr_accessor :object # Represents a message delta i.e. any changed fields on a message during - # streaming. + # streaming. sig do params( id: String, diff --git a/rbi/lib/openai/models/beta/threads/message_list_params.rbi b/rbi/lib/openai/models/beta/threads/message_list_params.rbi index 66577b99..11ea50e8 100644 --- a/rbi/lib/openai/models/beta/threads/message_list_params.rbi +++ b/rbi/lib/openai/models/beta/threads/message_list_params.rbi @@ -9,9 +9,9 @@ module OpenAI include OpenAI::Internal::Type::RequestParameters # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. sig { returns(T.nilable(String)) } attr_reader :after @@ -19,9 +19,9 @@ module OpenAI attr_writer :after # A cursor for use in pagination. `before` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # starting with obj_foo, your subsequent call can include before=obj_foo in order - # to fetch the previous page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. sig { returns(T.nilable(String)) } attr_reader :before @@ -29,7 +29,7 @@ module OpenAI attr_writer :before # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. sig { returns(T.nilable(Integer)) } attr_reader :limit @@ -37,7 +37,7 @@ module OpenAI attr_writer :limit # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::MessageListParams::Order::OrSymbol)) } attr_reader :order @@ -80,7 +80,7 @@ module OpenAI def to_hash; end # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. module Order extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/beta/threads/message_update_params.rbi b/rbi/lib/openai/models/beta/threads/message_update_params.rbi index 90b4db86..09e67673 100644 --- a/rbi/lib/openai/models/beta/threads/message_update_params.rbi +++ b/rbi/lib/openai/models/beta/threads/message_update_params.rbi @@ -12,11 +12,11 @@ module OpenAI attr_accessor :thread_id # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata diff --git a/rbi/lib/openai/models/beta/threads/required_action_function_tool_call.rbi b/rbi/lib/openai/models/beta/threads/required_action_function_tool_call.rbi index 6aa38a1e..61ec2da8 100644 --- a/rbi/lib/openai/models/beta/threads/required_action_function_tool_call.rbi +++ b/rbi/lib/openai/models/beta/threads/required_action_function_tool_call.rbi @@ -6,9 +6,9 @@ module OpenAI module Threads class RequiredActionFunctionToolCall < OpenAI::Internal::Type::BaseModel # The ID of the tool call. This ID must be referenced when you submit the tool - # outputs in using the - # [Submit tool outputs to run](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) - # endpoint. + # outputs in using the + # [Submit tool outputs to run](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) + # endpoint. sig { returns(String) } attr_accessor :id @@ -25,7 +25,7 @@ module OpenAI attr_writer :function # The type of tool call the output is required for. For now, this is always - # `function`. + # `function`. sig { returns(Symbol) } attr_accessor :type diff --git a/rbi/lib/openai/models/beta/threads/run.rbi b/rbi/lib/openai/models/beta/threads/run.rbi index 7170660e..fd1569df 100644 --- a/rbi/lib/openai/models/beta/threads/run.rbi +++ b/rbi/lib/openai/models/beta/threads/run.rbi @@ -10,8 +10,8 @@ module OpenAI attr_accessor :id # The ID of the - # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for - # execution of this run. + # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for + # execution of this run. sig { returns(String) } attr_accessor :assistant_id @@ -36,7 +36,7 @@ module OpenAI attr_accessor :failed_at # Details on why the run is incomplete. Will be `null` if the run is not - # incomplete. + # incomplete. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Run::IncompleteDetails)) } attr_reader :incomplete_details @@ -49,8 +49,8 @@ module OpenAI attr_writer :incomplete_details # The instructions that the - # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for - # this run. + # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for + # this run. sig { returns(String) } attr_accessor :instructions @@ -67,27 +67,27 @@ module OpenAI attr_writer :last_error # The maximum number of completion tokens specified to have been used over the - # course of the run. + # course of the run. sig { returns(T.nilable(Integer)) } attr_accessor :max_completion_tokens # The maximum number of prompt tokens specified to have been used over the course - # of the run. + # of the run. sig { returns(T.nilable(Integer)) } attr_accessor :max_prompt_tokens # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata # The model that the - # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for - # this run. + # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for + # this run. sig { returns(String) } attr_accessor :model @@ -96,13 +96,13 @@ module OpenAI attr_accessor :object # Whether to enable - # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) - # during tool use. + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. sig { returns(T::Boolean) } attr_accessor :parallel_tool_calls # Details on the action required to continue the run. Will be `null` if no action - # is required. + # is required. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Run::RequiredAction)) } attr_reader :required_action @@ -115,25 +115,25 @@ module OpenAI attr_writer :required_action # Specifies the format that the model must output. Compatible with - # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), - # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - # message the model generates is valid JSON. + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. # - # **Important:** when using JSON mode, you **must** also instruct the model to - # produce JSON yourself via a system or user message. Without this, the model may - # generate an unending stream of whitespace until the generation reaches the token - # limit, resulting in a long-running and seemingly "stuck" request. Also note that - # the message content may be partially cut off if `finish_reason="length"`, which - # indicates the generation exceeded `max_tokens` or the conversation exceeded the - # max context length. + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. sig do returns( T.nilable( @@ -153,23 +153,23 @@ module OpenAI attr_accessor :started_at # The status of the run, which can be either `queued`, `in_progress`, - # `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, - # `incomplete`, or `expired`. + # `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, + # `incomplete`, or `expired`. sig { returns(OpenAI::Models::Beta::Threads::RunStatus::TaggedSymbol) } attr_accessor :status # The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) - # that was executed on as a part of this run. + # that was executed on as a part of this run. sig { returns(String) } attr_accessor :thread_id # Controls which (if any) tool is called by the model. `none` means the model will - # not call any tools and instead generates a message. `auto` is the default value - # and means the model can pick between generating a message or calling one or more - # tools. `required` means the model must call one or more tools before responding - # to the user. Specifying a particular tool like `{"type": "file_search"}` or - # `{"type": "function", "function": {"name": "my_function"}}` forces the model to - # call that tool. + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. sig do returns( T.nilable( @@ -183,8 +183,8 @@ module OpenAI attr_accessor :tool_choice # The list of tools that the - # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for - # this run. + # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for + # this run. sig do returns( T::Array[ @@ -199,7 +199,7 @@ module OpenAI attr_accessor :tools # Controls for how a thread will be truncated prior to the run. Use this to - # control the intial context window of the run. + # control the intial context window of the run. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Run::TruncationStrategy)) } attr_reader :truncation_strategy @@ -212,7 +212,7 @@ module OpenAI attr_writer :truncation_strategy # Usage statistics related to the run. This value will be `null` if the run is not - # in a terminal state (i.e. `in_progress`, `queued`, etc.). + # in a terminal state (i.e. `in_progress`, `queued`, etc.). sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Run::Usage)) } attr_reader :usage @@ -228,7 +228,7 @@ module OpenAI attr_accessor :top_p # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). sig do params( id: String, @@ -367,7 +367,7 @@ module OpenAI class IncompleteDetails < OpenAI::Internal::Type::BaseModel # The reason why the run is incomplete. This will point to which specific token - # limit was reached over the course of the run. + # limit was reached over the course of the run. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Run::IncompleteDetails::Reason::TaggedSymbol)) } attr_reader :reason @@ -375,7 +375,7 @@ module OpenAI attr_writer :reason # Details on why the run is incomplete. Will be `null` if the run is not - # incomplete. + # incomplete. sig do params(reason: OpenAI::Models::Beta::Threads::Run::IncompleteDetails::Reason::OrSymbol) .returns(T.attached_class) @@ -386,7 +386,7 @@ module OpenAI def to_hash; end # The reason why the run is incomplete. This will point to which specific token - # limit was reached over the course of the run. + # limit was reached over the course of the run. module Reason extend OpenAI::Internal::Type::Enum @@ -464,7 +464,7 @@ module OpenAI attr_accessor :type # Details on the action required to continue the run. Will be `null` if no action - # is required. + # is required. sig do params( submit_tool_outputs: T.any(OpenAI::Models::Beta::Threads::Run::RequiredAction::SubmitToolOutputs, OpenAI::Internal::AnyHash), @@ -503,19 +503,19 @@ module OpenAI class TruncationStrategy < OpenAI::Internal::Type::BaseModel # The truncation strategy to use for the thread. The default is `auto`. If set to - # `last_messages`, the thread will be truncated to the n most recent messages in - # the thread. When set to `auto`, messages in the middle of the thread will be - # dropped to fit the context length of the model, `max_prompt_tokens`. + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. sig { returns(OpenAI::Models::Beta::Threads::Run::TruncationStrategy::Type::TaggedSymbol) } attr_accessor :type # The number of most recent messages from the thread when constructing the context - # for the run. + # for the run. sig { returns(T.nilable(Integer)) } attr_accessor :last_messages # Controls for how a thread will be truncated prior to the run. Use this to - # control the intial context window of the run. + # control the intial context window of the run. sig do params( type: OpenAI::Models::Beta::Threads::Run::TruncationStrategy::Type::OrSymbol, @@ -537,9 +537,9 @@ module OpenAI def to_hash; end # The truncation strategy to use for the thread. The default is `auto`. If set to - # `last_messages`, the thread will be truncated to the n most recent messages in - # the thread. When set to `auto`, messages in the middle of the thread will be - # dropped to fit the context length of the model, `max_prompt_tokens`. + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. module Type extend OpenAI::Internal::Type::Enum @@ -571,7 +571,7 @@ module OpenAI attr_accessor :total_tokens # Usage statistics related to the run. This value will be `null` if the run is not - # in a terminal state (i.e. `in_progress`, `queued`, etc.). + # in a terminal state (i.e. `in_progress`, `queued`, etc.). sig do params( completion_tokens: Integer, diff --git a/rbi/lib/openai/models/beta/threads/run_create_params.rbi b/rbi/lib/openai/models/beta/threads/run_create_params.rbi index 58c097df..bfa2e42d 100644 --- a/rbi/lib/openai/models/beta/threads/run_create_params.rbi +++ b/rbi/lib/openai/models/beta/threads/run_create_params.rbi @@ -9,18 +9,18 @@ module OpenAI include OpenAI::Internal::Type::RequestParameters # The ID of the - # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to - # execute this run. + # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + # execute this run. sig { returns(String) } attr_accessor :assistant_id # A list of additional fields to include in the response. Currently the only - # supported value is `step_details.tool_calls[*].file_search.results[*].content` - # to fetch the file search result content. + # supported value is `step_details.tool_calls[*].file_search.results[*].content` + # to fetch the file search result content. # - # See the - # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) - # for more information. + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. sig { returns(T.nilable(T::Array[OpenAI::Models::Beta::Threads::Runs::RunStepInclude::OrSymbol])) } attr_reader :include @@ -28,8 +28,8 @@ module OpenAI attr_writer :include # Appends additional instructions at the end of the instructions for the run. This - # is useful for modifying the behavior on a per-run basis without overriding other - # instructions. + # is useful for modifying the behavior on a per-run basis without overriding other + # instructions. sig { returns(T.nilable(String)) } attr_accessor :additional_instructions @@ -38,46 +38,46 @@ module OpenAI attr_accessor :additional_messages # Overrides the - # [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) - # of the assistant. This is useful for modifying the behavior on a per-run basis. + # [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) + # of the assistant. This is useful for modifying the behavior on a per-run basis. sig { returns(T.nilable(String)) } attr_accessor :instructions # The maximum number of completion tokens that may be used over the course of the - # run. The run will make a best effort to use only the number of completion tokens - # specified, across multiple turns of the run. If the run exceeds the number of - # completion tokens specified, the run will end with status `incomplete`. See - # `incomplete_details` for more info. + # run. The run will make a best effort to use only the number of completion tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # completion tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. sig { returns(T.nilable(Integer)) } attr_accessor :max_completion_tokens # The maximum number of prompt tokens that may be used over the course of the run. - # The run will make a best effort to use only the number of prompt tokens - # specified, across multiple turns of the run. If the run exceeds the number of - # prompt tokens specified, the run will end with status `incomplete`. See - # `incomplete_details` for more info. + # The run will make a best effort to use only the number of prompt tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # prompt tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. sig { returns(T.nilable(Integer)) } attr_accessor :max_prompt_tokens # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to - # be used to execute this run. If a value is provided here, it will override the - # model associated with the assistant. If not, the model associated with the - # assistant will be used. + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. sig { returns(T.nilable(T.any(String, OpenAI::Models::ChatModel::OrSymbol))) } attr_accessor :model # Whether to enable - # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) - # during tool use. + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. sig { returns(T.nilable(T::Boolean)) } attr_reader :parallel_tool_calls @@ -86,33 +86,33 @@ module OpenAI # **o-series models only** # - # Constrains effort on reasoning for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. sig { returns(T.nilable(OpenAI::Models::ReasoningEffort::OrSymbol)) } attr_accessor :reasoning_effort # Specifies the format that the model must output. Compatible with - # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), - # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - # message the model generates is valid JSON. + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. # - # **Important:** when using JSON mode, you **must** also instruct the model to - # produce JSON yourself via a system or user message. Without this, the model may - # generate an unending stream of whitespace until the generation reaches the token - # limit, resulting in a long-running and seemingly "stuck" request. Also note that - # the message content may be partially cut off if `finish_reason="length"`, which - # indicates the generation exceeded `max_tokens` or the conversation exceeded the - # max context length. + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. sig do returns( T.nilable( @@ -128,18 +128,18 @@ module OpenAI attr_accessor :response_format # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. sig { returns(T.nilable(Float)) } attr_accessor :temperature # Controls which (if any) tool is called by the model. `none` means the model will - # not call any tools and instead generates a message. `auto` is the default value - # and means the model can pick between generating a message or calling one or more - # tools. `required` means the model must call one or more tools before responding - # to the user. Specifying a particular tool like `{"type": "file_search"}` or - # `{"type": "function", "function": {"name": "my_function"}}` forces the model to - # call that tool. + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. sig do returns( T.nilable( @@ -153,7 +153,7 @@ module OpenAI attr_accessor :tool_choice # Override the tools the assistant can use for this run. This is useful for - # modifying the behavior on a per-run basis. + # modifying the behavior on a per-run basis. sig do returns( T.nilable( @@ -170,15 +170,15 @@ module OpenAI attr_accessor :tools # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or temperature but not both. + # We generally recommend altering this or temperature but not both. sig { returns(T.nilable(Float)) } attr_accessor :top_p # Controls for how a thread will be truncated prior to the run. Use this to - # control the intial context window of the run. + # control the intial context window of the run. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy)) } attr_reader :truncation_strategy @@ -329,10 +329,10 @@ module OpenAI # The role of the entity that is creating the message. Allowed values include: # - # - `user`: Indicates the message is sent by an actual user and should be used in - # most cases to represent user-generated messages. - # - `assistant`: Indicates the message is generated by the assistant. Use this - # value to insert messages from the assistant into the conversation. + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. sig { returns(OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Role::OrSymbol) } attr_accessor :role @@ -345,11 +345,11 @@ module OpenAI attr_accessor :attachments # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata @@ -433,10 +433,10 @@ module OpenAI # The role of the entity that is creating the message. Allowed values include: # - # - `user`: Indicates the message is sent by an actual user and should be used in - # most cases to represent user-generated messages. - # - `assistant`: Indicates the message is generated by the assistant. Use this - # value to insert messages from the assistant into the conversation. + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. module Role extend OpenAI::Internal::Type::Enum @@ -558,9 +558,9 @@ module OpenAI end # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to - # be used to execute this run. If a value is provided here, it will override the - # model associated with the assistant. If not, the model associated with the - # assistant will be used. + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. module Model extend OpenAI::Internal::Type::Union @@ -570,19 +570,19 @@ module OpenAI class TruncationStrategy < OpenAI::Internal::Type::BaseModel # The truncation strategy to use for the thread. The default is `auto`. If set to - # `last_messages`, the thread will be truncated to the n most recent messages in - # the thread. When set to `auto`, messages in the middle of the thread will be - # dropped to fit the context length of the model, `max_prompt_tokens`. + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. sig { returns(OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy::Type::OrSymbol) } attr_accessor :type # The number of most recent messages from the thread when constructing the context - # for the run. + # for the run. sig { returns(T.nilable(Integer)) } attr_accessor :last_messages # Controls for how a thread will be truncated prior to the run. Use this to - # control the intial context window of the run. + # control the intial context window of the run. sig do params( type: OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy::Type::OrSymbol, @@ -604,9 +604,9 @@ module OpenAI def to_hash; end # The truncation strategy to use for the thread. The default is `auto`. If set to - # `last_messages`, the thread will be truncated to the n most recent messages in - # the thread. When set to `auto`, messages in the middle of the thread will be - # dropped to fit the context length of the model, `max_prompt_tokens`. + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. module Type extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/beta/threads/run_list_params.rbi b/rbi/lib/openai/models/beta/threads/run_list_params.rbi index e266f66c..0d5a35b2 100644 --- a/rbi/lib/openai/models/beta/threads/run_list_params.rbi +++ b/rbi/lib/openai/models/beta/threads/run_list_params.rbi @@ -9,9 +9,9 @@ module OpenAI include OpenAI::Internal::Type::RequestParameters # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. sig { returns(T.nilable(String)) } attr_reader :after @@ -19,9 +19,9 @@ module OpenAI attr_writer :after # A cursor for use in pagination. `before` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # starting with obj_foo, your subsequent call can include before=obj_foo in order - # to fetch the previous page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. sig { returns(T.nilable(String)) } attr_reader :before @@ -29,7 +29,7 @@ module OpenAI attr_writer :before # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. sig { returns(T.nilable(Integer)) } attr_reader :limit @@ -37,7 +37,7 @@ module OpenAI attr_writer :limit # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::RunListParams::Order::OrSymbol)) } attr_reader :order @@ -71,7 +71,7 @@ module OpenAI def to_hash; end # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. module Order extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/beta/threads/run_status.rbi b/rbi/lib/openai/models/beta/threads/run_status.rbi index 5de3efc2..b9c1490b 100644 --- a/rbi/lib/openai/models/beta/threads/run_status.rbi +++ b/rbi/lib/openai/models/beta/threads/run_status.rbi @@ -5,8 +5,8 @@ module OpenAI module Beta module Threads # The status of the run, which can be either `queued`, `in_progress`, - # `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, - # `incomplete`, or `expired`. + # `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, + # `incomplete`, or `expired`. module RunStatus extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/beta/threads/run_submit_tool_outputs_params.rbi b/rbi/lib/openai/models/beta/threads/run_submit_tool_outputs_params.rbi index 2ae4e56d..e347bb7b 100644 --- a/rbi/lib/openai/models/beta/threads/run_submit_tool_outputs_params.rbi +++ b/rbi/lib/openai/models/beta/threads/run_submit_tool_outputs_params.rbi @@ -46,7 +46,7 @@ module OpenAI attr_writer :output # The ID of the tool call in the `required_action` object within the run object - # the output is being submitted for. + # the output is being submitted for. sig { returns(T.nilable(String)) } attr_reader :tool_call_id diff --git a/rbi/lib/openai/models/beta/threads/run_update_params.rbi b/rbi/lib/openai/models/beta/threads/run_update_params.rbi index 28bb34de..9c4e4bb2 100644 --- a/rbi/lib/openai/models/beta/threads/run_update_params.rbi +++ b/rbi/lib/openai/models/beta/threads/run_update_params.rbi @@ -12,11 +12,11 @@ module OpenAI attr_accessor :thread_id # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata diff --git a/rbi/lib/openai/models/beta/threads/runs/code_interpreter_output_image.rbi b/rbi/lib/openai/models/beta/threads/runs/code_interpreter_output_image.rbi index 72557474..3cc75c96 100644 --- a/rbi/lib/openai/models/beta/threads/runs/code_interpreter_output_image.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/code_interpreter_output_image.rbi @@ -45,7 +45,7 @@ module OpenAI class Image < OpenAI::Internal::Type::BaseModel # The [file](https://platform.openai.com/docs/api-reference/files) ID of the - # image. + # image. sig { returns(T.nilable(String)) } attr_reader :file_id diff --git a/rbi/lib/openai/models/beta/threads/runs/code_interpreter_tool_call.rbi b/rbi/lib/openai/models/beta/threads/runs/code_interpreter_tool_call.rbi index 82827731..898db2f2 100644 --- a/rbi/lib/openai/models/beta/threads/runs/code_interpreter_tool_call.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/code_interpreter_tool_call.rbi @@ -26,7 +26,7 @@ module OpenAI attr_writer :code_interpreter # The type of tool call. This is always going to be `code_interpreter` for this - # type of tool call. + # type of tool call. sig { returns(Symbol) } attr_accessor :type @@ -62,8 +62,8 @@ module OpenAI attr_accessor :input # The outputs from the Code Interpreter tool call. Code Interpreter can output one - # or more items, including text (`logs`) or images (`image`). Each of these are - # represented by a different object type. + # or more items, including text (`logs`) or images (`image`). Each of these are + # represented by a different object type. sig do returns( T::Array[ @@ -177,7 +177,7 @@ module OpenAI class Image < OpenAI::Internal::Type::BaseModel # The [file](https://platform.openai.com/docs/api-reference/files) ID of the - # image. + # image. sig { returns(String) } attr_accessor :file_id diff --git a/rbi/lib/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rbi b/rbi/lib/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rbi index 0864612a..06162584 100644 --- a/rbi/lib/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rbi @@ -11,7 +11,7 @@ module OpenAI attr_accessor :index # The type of tool call. This is always going to be `code_interpreter` for this - # type of tool call. + # type of tool call. sig { returns(Symbol) } attr_accessor :type @@ -74,8 +74,8 @@ module OpenAI attr_writer :input # The outputs from the Code Interpreter tool call. Code Interpreter can output one - # or more items, including text (`logs`) or images (`image`). Each of these are - # represented by a different object type. + # or more items, including text (`logs`) or images (`image`). Each of these are + # represented by a different object type. sig do returns( T.nilable( diff --git a/rbi/lib/openai/models/beta/threads/runs/file_search_tool_call.rbi b/rbi/lib/openai/models/beta/threads/runs/file_search_tool_call.rbi index 871e455f..b11ca61e 100644 --- a/rbi/lib/openai/models/beta/threads/runs/file_search_tool_call.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/file_search_tool_call.rbi @@ -23,7 +23,7 @@ module OpenAI attr_writer :file_search # The type of tool call. This is always going to be `file_search` for this type of - # tool call. + # tool call. sig { returns(Symbol) } attr_accessor :type @@ -109,7 +109,7 @@ module OpenAI class RankingOptions < OpenAI::Internal::Type::BaseModel # The ranker to use for the file search. If not specified will use the `auto` - # ranker. + # ranker. sig do returns( OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions::Ranker::TaggedSymbol @@ -118,7 +118,7 @@ module OpenAI attr_accessor :ranker # The score threshold for the file search. All values must be a floating point - # number between 0 and 1. + # number between 0 and 1. sig { returns(Float) } attr_accessor :score_threshold @@ -144,7 +144,7 @@ module OpenAI def to_hash; end # The ranker to use for the file search. If not specified will use the `auto` - # ranker. + # ranker. module Ranker extend OpenAI::Internal::Type::Enum @@ -190,12 +190,12 @@ module OpenAI attr_accessor :file_name # The score of the result. All values must be a floating point number between 0 - # and 1. + # and 1. sig { returns(Float) } attr_accessor :score # The content of the result that was found. The content is only included if - # requested via the include query parameter. + # requested via the include query parameter. sig do returns( T.nilable(T::Array[OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content]) diff --git a/rbi/lib/openai/models/beta/threads/runs/file_search_tool_call_delta.rbi b/rbi/lib/openai/models/beta/threads/runs/file_search_tool_call_delta.rbi index 2714a161..80a2db89 100644 --- a/rbi/lib/openai/models/beta/threads/runs/file_search_tool_call_delta.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/file_search_tool_call_delta.rbi @@ -15,7 +15,7 @@ module OpenAI attr_accessor :index # The type of tool call. This is always going to be `file_search` for this type of - # tool call. + # tool call. sig { returns(Symbol) } attr_accessor :type diff --git a/rbi/lib/openai/models/beta/threads/runs/function_tool_call.rbi b/rbi/lib/openai/models/beta/threads/runs/function_tool_call.rbi index 2454344b..12d00e7e 100644 --- a/rbi/lib/openai/models/beta/threads/runs/function_tool_call.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/function_tool_call.rbi @@ -23,7 +23,7 @@ module OpenAI attr_writer :function # The type of tool call. This is always going to be `function` for this type of - # tool call. + # tool call. sig { returns(Symbol) } attr_accessor :type @@ -55,8 +55,8 @@ module OpenAI attr_accessor :name # The output of the function. This will be `null` if the outputs have not been - # [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) - # yet. + # [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) + # yet. sig { returns(T.nilable(String)) } attr_accessor :output diff --git a/rbi/lib/openai/models/beta/threads/runs/function_tool_call_delta.rbi b/rbi/lib/openai/models/beta/threads/runs/function_tool_call_delta.rbi index f275cddb..df5bcf34 100644 --- a/rbi/lib/openai/models/beta/threads/runs/function_tool_call_delta.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/function_tool_call_delta.rbi @@ -11,7 +11,7 @@ module OpenAI attr_accessor :index # The type of tool call. This is always going to be `function` for this type of - # tool call. + # tool call. sig { returns(Symbol) } attr_accessor :type @@ -74,8 +74,8 @@ module OpenAI attr_writer :name # The output of the function. This will be `null` if the outputs have not been - # [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) - # yet. + # [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) + # yet. sig { returns(T.nilable(String)) } attr_accessor :output diff --git a/rbi/lib/openai/models/beta/threads/runs/run_step.rbi b/rbi/lib/openai/models/beta/threads/runs/run_step.rbi index a5ddcb52..7519ba3f 100644 --- a/rbi/lib/openai/models/beta/threads/runs/run_step.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/run_step.rbi @@ -11,8 +11,8 @@ module OpenAI attr_accessor :id # The ID of the - # [assistant](https://platform.openai.com/docs/api-reference/assistants) - # associated with the run step. + # [assistant](https://platform.openai.com/docs/api-reference/assistants) + # associated with the run step. sig { returns(String) } attr_accessor :assistant_id @@ -29,7 +29,7 @@ module OpenAI attr_accessor :created_at # The Unix timestamp (in seconds) for when the run step expired. A step is - # considered expired if the parent run is expired. + # considered expired if the parent run is expired. sig { returns(T.nilable(Integer)) } attr_accessor :expired_at @@ -38,7 +38,7 @@ module OpenAI attr_accessor :failed_at # The last error associated with this run step. Will be `null` if there are no - # errors. + # errors. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Runs::RunStep::LastError)) } attr_reader :last_error @@ -51,11 +51,11 @@ module OpenAI attr_writer :last_error # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata @@ -64,12 +64,12 @@ module OpenAI attr_accessor :object # The ID of the [run](https://platform.openai.com/docs/api-reference/runs) that - # this run step is a part of. + # this run step is a part of. sig { returns(String) } attr_accessor :run_id # The status of the run step, which can be either `in_progress`, `cancelled`, - # `failed`, `completed`, or `expired`. + # `failed`, `completed`, or `expired`. sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep::Status::TaggedSymbol) } attr_accessor :status @@ -85,7 +85,7 @@ module OpenAI attr_accessor :step_details # The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) - # that was run. + # that was run. sig { returns(String) } attr_accessor :thread_id @@ -94,7 +94,7 @@ module OpenAI attr_accessor :type # Usage statistics related to the run step. This value will be `null` while the - # run step's status is `in_progress`. + # run step's status is `in_progress`. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Runs::RunStep::Usage)) } attr_reader :usage @@ -188,7 +188,7 @@ module OpenAI attr_accessor :message # The last error associated with this run step. Will be `null` if there are no - # errors. + # errors. sig do params(code: OpenAI::Models::Beta::Threads::Runs::RunStep::LastError::Code::OrSymbol, message: String) .returns(T.attached_class) @@ -223,7 +223,7 @@ module OpenAI end # The status of the run step, which can be either `in_progress`, `cancelled`, - # `failed`, `completed`, or `expired`. + # `failed`, `completed`, or `expired`. module Status extend OpenAI::Internal::Type::Enum @@ -284,7 +284,7 @@ module OpenAI attr_accessor :total_tokens # Usage statistics related to the run step. This value will be `null` while the - # run step's status is `in_progress`. + # run step's status is `in_progress`. sig do params( completion_tokens: Integer, diff --git a/rbi/lib/openai/models/beta/threads/runs/run_step_delta_event.rbi b/rbi/lib/openai/models/beta/threads/runs/run_step_delta_event.rbi index 4fb76535..e1124389 100644 --- a/rbi/lib/openai/models/beta/threads/runs/run_step_delta_event.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/run_step_delta_event.rbi @@ -22,7 +22,7 @@ module OpenAI attr_accessor :object # Represents a run step delta i.e. any changed fields on a run step during - # streaming. + # streaming. sig do params( id: String, diff --git a/rbi/lib/openai/models/beta/threads/runs/step_list_params.rbi b/rbi/lib/openai/models/beta/threads/runs/step_list_params.rbi index d9609bca..abba06e2 100644 --- a/rbi/lib/openai/models/beta/threads/runs/step_list_params.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/step_list_params.rbi @@ -13,9 +13,9 @@ module OpenAI attr_accessor :thread_id # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. sig { returns(T.nilable(String)) } attr_reader :after @@ -23,9 +23,9 @@ module OpenAI attr_writer :after # A cursor for use in pagination. `before` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # starting with obj_foo, your subsequent call can include before=obj_foo in order - # to fetch the previous page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. sig { returns(T.nilable(String)) } attr_reader :before @@ -33,12 +33,12 @@ module OpenAI attr_writer :before # A list of additional fields to include in the response. Currently the only - # supported value is `step_details.tool_calls[*].file_search.results[*].content` - # to fetch the file search result content. + # supported value is `step_details.tool_calls[*].file_search.results[*].content` + # to fetch the file search result content. # - # See the - # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) - # for more information. + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. sig { returns(T.nilable(T::Array[OpenAI::Models::Beta::Threads::Runs::RunStepInclude::OrSymbol])) } attr_reader :include @@ -46,7 +46,7 @@ module OpenAI attr_writer :include # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. sig { returns(T.nilable(Integer)) } attr_reader :limit @@ -54,7 +54,7 @@ module OpenAI attr_writer :limit # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Runs::StepListParams::Order::OrSymbol)) } attr_reader :order @@ -101,7 +101,7 @@ module OpenAI def to_hash; end # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. module Order extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/beta/threads/runs/step_retrieve_params.rbi b/rbi/lib/openai/models/beta/threads/runs/step_retrieve_params.rbi index 827fe79e..c4b7c54d 100644 --- a/rbi/lib/openai/models/beta/threads/runs/step_retrieve_params.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/step_retrieve_params.rbi @@ -16,12 +16,12 @@ module OpenAI attr_accessor :run_id # A list of additional fields to include in the response. Currently the only - # supported value is `step_details.tool_calls[*].file_search.results[*].content` - # to fetch the file search result content. + # supported value is `step_details.tool_calls[*].file_search.results[*].content` + # to fetch the file search result content. # - # See the - # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) - # for more information. + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. sig { returns(T.nilable(T::Array[OpenAI::Models::Beta::Threads::Runs::RunStepInclude::OrSymbol])) } attr_reader :include diff --git a/rbi/lib/openai/models/beta/threads/runs/tool_call_delta_object.rbi b/rbi/lib/openai/models/beta/threads/runs/tool_call_delta_object.rbi index 9b96f0e3..796b549d 100644 --- a/rbi/lib/openai/models/beta/threads/runs/tool_call_delta_object.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/tool_call_delta_object.rbi @@ -11,8 +11,8 @@ module OpenAI attr_accessor :type # An array of tool calls the run step was involved in. These can be associated - # with one of three types of tools: `code_interpreter`, `file_search`, or - # `function`. + # with one of three types of tools: `code_interpreter`, `file_search`, or + # `function`. sig do returns( T.nilable( diff --git a/rbi/lib/openai/models/beta/threads/runs/tool_calls_step_details.rbi b/rbi/lib/openai/models/beta/threads/runs/tool_calls_step_details.rbi index 7eb65800..34fdc65c 100644 --- a/rbi/lib/openai/models/beta/threads/runs/tool_calls_step_details.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/tool_calls_step_details.rbi @@ -7,8 +7,8 @@ module OpenAI module Runs class ToolCallsStepDetails < OpenAI::Internal::Type::BaseModel # An array of tool calls the run step was involved in. These can be associated - # with one of three types of tools: `code_interpreter`, `file_search`, or - # `function`. + # with one of three types of tools: `code_interpreter`, `file_search`, or + # `function`. sig do returns( T::Array[ diff --git a/rbi/lib/openai/models/chat/chat_completion.rbi b/rbi/lib/openai/models/chat/chat_completion.rbi index c871533c..e1e91dfc 100644 --- a/rbi/lib/openai/models/chat/chat_completion.rbi +++ b/rbi/lib/openai/models/chat/chat_completion.rbi @@ -9,7 +9,7 @@ module OpenAI attr_accessor :id # A list of chat completion choices. Can be more than one if `n` is greater - # than 1. + # than 1. sig { returns(T::Array[OpenAI::Models::Chat::ChatCompletion::Choice]) } attr_accessor :choices @@ -31,8 +31,8 @@ module OpenAI # This fingerprint represents the backend configuration that the model runs with. # - # Can be used in conjunction with the `seed` request parameter to understand when - # backend changes have been made that might impact determinism. + # Can be used in conjunction with the `seed` request parameter to understand when + # backend changes have been made that might impact determinism. sig { returns(T.nilable(String)) } attr_reader :system_fingerprint @@ -47,7 +47,7 @@ module OpenAI attr_writer :usage # Represents a chat completion response returned by model, based on the provided - # input. + # input. sig do params( id: String, @@ -90,11 +90,11 @@ module OpenAI class Choice < OpenAI::Internal::Type::BaseModel # The reason the model stopped generating tokens. This will be `stop` if the model - # hit a natural stop point or a provided stop sequence, `length` if the maximum - # number of tokens specified in the request was reached, `content_filter` if - # content was omitted due to a flag from our content filters, `tool_calls` if the - # model called a tool, or `function_call` (deprecated) if the model called a - # function. + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, `content_filter` if + # content was omitted due to a flag from our content filters, `tool_calls` if the + # model called a tool, or `function_call` (deprecated) if the model called a + # function. sig { returns(OpenAI::Models::Chat::ChatCompletion::Choice::FinishReason::TaggedSymbol) } attr_accessor :finish_reason @@ -146,11 +146,11 @@ module OpenAI def to_hash; end # The reason the model stopped generating tokens. This will be `stop` if the model - # hit a natural stop point or a provided stop sequence, `length` if the maximum - # number of tokens specified in the request was reached, `content_filter` if - # content was omitted due to a flag from our content filters, `tool_calls` if the - # model called a tool, or `function_call` (deprecated) if the model called a - # function. + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, `content_filter` if + # content was omitted due to a flag from our content filters, `tool_calls` if the + # model called a tool, or `function_call` (deprecated) if the model called a + # function. module FinishReason extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/chat/chat_completion_assistant_message_param.rbi b/rbi/lib/openai/models/chat/chat_completion_assistant_message_param.rbi index 8da1d510..436f9a60 100644 --- a/rbi/lib/openai/models/chat/chat_completion_assistant_message_param.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_assistant_message_param.rbi @@ -9,7 +9,7 @@ module OpenAI attr_accessor :role # Data about a previous audio response from the model. - # [Learn more](https://platform.openai.com/docs/guides/audio). + # [Learn more](https://platform.openai.com/docs/guides/audio). sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Audio)) } attr_reader :audio @@ -24,7 +24,7 @@ module OpenAI attr_writer :audio # The contents of the assistant message. Required unless `tool_calls` or - # `function_call` is specified. + # `function_call` is specified. sig do returns( T.nilable( @@ -43,7 +43,7 @@ module OpenAI attr_accessor :content # Deprecated and replaced by `tool_calls`. The name and arguments of a function - # that should be called, as generated by the model. + # that should be called, as generated by the model. sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::FunctionCall)) } attr_reader :function_call @@ -58,7 +58,7 @@ module OpenAI attr_writer :function_call # An optional name for the participant. Provides the model information to - # differentiate between participants of the same role. + # differentiate between participants of the same role. sig { returns(T.nilable(String)) } attr_reader :name @@ -152,7 +152,7 @@ module OpenAI attr_accessor :id # Data about a previous audio response from the model. - # [Learn more](https://platform.openai.com/docs/guides/audio). + # [Learn more](https://platform.openai.com/docs/guides/audio). sig { params(id: String).returns(T.attached_class) } def self.new(id:); end @@ -161,12 +161,12 @@ module OpenAI end # The contents of the assistant message. Required unless `tool_calls` or - # `function_call` is specified. + # `function_call` is specified. module Content extend OpenAI::Internal::Type::Union # Learn about - # [text inputs](https://platform.openai.com/docs/guides/text-generation). + # [text inputs](https://platform.openai.com/docs/guides/text-generation). module ArrayOfContentPart extend OpenAI::Internal::Type::Union @@ -204,9 +204,9 @@ module OpenAI class FunctionCall < OpenAI::Internal::Type::BaseModel # The arguments to call the function with, as generated by the model in JSON - # format. Note that the model does not always generate valid JSON, and may - # hallucinate parameters not defined by your function schema. Validate the - # arguments in your code before calling your function. + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. sig { returns(String) } attr_accessor :arguments @@ -215,7 +215,7 @@ module OpenAI attr_accessor :name # Deprecated and replaced by `tool_calls`. The name and arguments of a function - # that should be called, as generated by the model. + # that should be called, as generated by the model. sig { params(arguments: String, name: String).returns(T.attached_class) } def self.new(arguments:, name:); end diff --git a/rbi/lib/openai/models/chat/chat_completion_audio.rbi b/rbi/lib/openai/models/chat/chat_completion_audio.rbi index 66867567..77f2a8a9 100644 --- a/rbi/lib/openai/models/chat/chat_completion_audio.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_audio.rbi @@ -9,12 +9,12 @@ module OpenAI attr_accessor :id # Base64 encoded audio bytes generated by the model, in the format specified in - # the request. + # the request. sig { returns(String) } attr_accessor :data # The Unix timestamp (in seconds) for when this audio response will no longer be - # accessible on the server for use in multi-turn conversations. + # accessible on the server for use in multi-turn conversations. sig { returns(Integer) } attr_accessor :expires_at @@ -23,8 +23,8 @@ module OpenAI attr_accessor :transcript # If the audio output modality is requested, this object contains data about the - # audio response from the model. - # [Learn more](https://platform.openai.com/docs/guides/audio). + # audio response from the model. + # [Learn more](https://platform.openai.com/docs/guides/audio). sig do params(id: String, data: String, expires_at: Integer, transcript: String).returns(T.attached_class) end diff --git a/rbi/lib/openai/models/chat/chat_completion_audio_param.rbi b/rbi/lib/openai/models/chat/chat_completion_audio_param.rbi index e1aa3707..0755aab8 100644 --- a/rbi/lib/openai/models/chat/chat_completion_audio_param.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_audio_param.rbi @@ -5,18 +5,18 @@ module OpenAI module Chat class ChatCompletionAudioParam < OpenAI::Internal::Type::BaseModel # Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`, - # or `pcm16`. + # or `pcm16`. sig { returns(OpenAI::Models::Chat::ChatCompletionAudioParam::Format::OrSymbol) } attr_accessor :format_ # The voice the model uses to respond. Supported voices are `alloy`, `ash`, - # `ballad`, `coral`, `echo`, `sage`, and `shimmer`. + # `ballad`, `coral`, `echo`, `sage`, and `shimmer`. sig { returns(T.any(String, OpenAI::Models::Chat::ChatCompletionAudioParam::Voice::OrSymbol)) } attr_accessor :voice # Parameters for audio output. Required when audio output is requested with - # `modalities: ["audio"]`. - # [Learn more](https://platform.openai.com/docs/guides/audio). + # `modalities: ["audio"]`. + # [Learn more](https://platform.openai.com/docs/guides/audio). sig do params( format_: OpenAI::Models::Chat::ChatCompletionAudioParam::Format::OrSymbol, @@ -38,7 +38,7 @@ module OpenAI def to_hash; end # Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`, - # or `pcm16`. + # or `pcm16`. module Format extend OpenAI::Internal::Type::Enum @@ -57,7 +57,7 @@ module OpenAI end # The voice the model uses to respond. Supported voices are `alloy`, `ash`, - # `ballad`, `coral`, `echo`, `sage`, and `shimmer`. + # `ballad`, `coral`, `echo`, `sage`, and `shimmer`. module Voice extend OpenAI::Internal::Type::Union diff --git a/rbi/lib/openai/models/chat/chat_completion_chunk.rbi b/rbi/lib/openai/models/chat/chat_completion_chunk.rbi index f8eb2225..a7aaf04d 100644 --- a/rbi/lib/openai/models/chat/chat_completion_chunk.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_chunk.rbi @@ -9,13 +9,13 @@ module OpenAI attr_accessor :id # A list of chat completion choices. Can contain more than one elements if `n` is - # greater than 1. Can also be empty for the last chunk if you set - # `stream_options: {"include_usage": true}`. + # greater than 1. Can also be empty for the last chunk if you set + # `stream_options: {"include_usage": true}`. sig { returns(T::Array[OpenAI::Models::Chat::ChatCompletionChunk::Choice]) } attr_accessor :choices # The Unix timestamp (in seconds) of when the chat completion was created. Each - # chunk has the same timestamp. + # chunk has the same timestamp. sig { returns(Integer) } attr_accessor :created @@ -32,8 +32,8 @@ module OpenAI attr_accessor :service_tier # This fingerprint represents the backend configuration that the model runs with. - # Can be used in conjunction with the `seed` request parameter to understand when - # backend changes have been made that might impact determinism. + # Can be used in conjunction with the `seed` request parameter to understand when + # backend changes have been made that might impact determinism. sig { returns(T.nilable(String)) } attr_reader :system_fingerprint @@ -41,12 +41,12 @@ module OpenAI attr_writer :system_fingerprint # An optional field that will only be present when you set - # `stream_options: {"include_usage": true}` in your request. When present, it - # contains a null value **except for the last chunk** which contains the token - # usage statistics for the entire request. + # `stream_options: {"include_usage": true}` in your request. When present, it + # contains a null value **except for the last chunk** which contains the token + # usage statistics for the entire request. # - # **NOTE:** If the stream is interrupted or cancelled, you may not receive the - # final usage chunk which contains the total token usage for the request. + # **NOTE:** If the stream is interrupted or cancelled, you may not receive the + # final usage chunk which contains the total token usage for the request. sig { returns(T.nilable(OpenAI::Models::CompletionUsage)) } attr_reader :usage @@ -54,8 +54,8 @@ module OpenAI attr_writer :usage # Represents a streamed chunk of a chat completion response returned by the model, - # based on the provided input. - # [Learn more](https://platform.openai.com/docs/guides/streaming-responses). + # based on the provided input. + # [Learn more](https://platform.openai.com/docs/guides/streaming-responses). sig do params( id: String, @@ -108,11 +108,11 @@ module OpenAI attr_writer :delta # The reason the model stopped generating tokens. This will be `stop` if the model - # hit a natural stop point or a provided stop sequence, `length` if the maximum - # number of tokens specified in the request was reached, `content_filter` if - # content was omitted due to a flag from our content filters, `tool_calls` if the - # model called a tool, or `function_call` (deprecated) if the model called a - # function. + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, `content_filter` if + # content was omitted due to a flag from our content filters, `tool_calls` if the + # model called a tool, or `function_call` (deprecated) if the model called a + # function. sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionChunk::Choice::FinishReason::TaggedSymbol)) } attr_accessor :finish_reason @@ -162,7 +162,7 @@ module OpenAI attr_accessor :content # Deprecated and replaced by `tool_calls`. The name and arguments of a function - # that should be called, as generated by the model. + # that should be called, as generated by the model. sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall)) } attr_reader :function_call @@ -225,9 +225,9 @@ module OpenAI class FunctionCall < OpenAI::Internal::Type::BaseModel # The arguments to call the function with, as generated by the model in JSON - # format. Note that the model does not always generate valid JSON, and may - # hallucinate parameters not defined by your function schema. Validate the - # arguments in your code before calling your function. + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. sig { returns(T.nilable(String)) } attr_reader :arguments @@ -242,7 +242,7 @@ module OpenAI attr_writer :name # Deprecated and replaced by `tool_calls`. The name and arguments of a function - # that should be called, as generated by the model. + # that should be called, as generated by the model. sig { params(arguments: String, name: String).returns(T.attached_class) } def self.new(arguments: nil, name: nil); end @@ -332,9 +332,9 @@ module OpenAI class Function < OpenAI::Internal::Type::BaseModel # The arguments to call the function with, as generated by the model in JSON - # format. Note that the model does not always generate valid JSON, and may - # hallucinate parameters not defined by your function schema. Validate the - # arguments in your code before calling your function. + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. sig { returns(T.nilable(String)) } attr_reader :arguments @@ -383,11 +383,11 @@ module OpenAI end # The reason the model stopped generating tokens. This will be `stop` if the model - # hit a natural stop point or a provided stop sequence, `length` if the maximum - # number of tokens specified in the request was reached, `content_filter` if - # content was omitted due to a flag from our content filters, `tool_calls` if the - # model called a tool, or `function_call` (deprecated) if the model called a - # function. + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, `content_filter` if + # content was omitted due to a flag from our content filters, `tool_calls` if the + # model called a tool, or `function_call` (deprecated) if the model called a + # function. module FinishReason extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/chat/chat_completion_content_part.rbi b/rbi/lib/openai/models/chat/chat_completion_content_part.rbi index 3f40e799..ba9fd403 100644 --- a/rbi/lib/openai/models/chat/chat_completion_content_part.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_content_part.rbi @@ -4,7 +4,7 @@ module OpenAI module Models module Chat # Learn about - # [text inputs](https://platform.openai.com/docs/guides/text-generation). + # [text inputs](https://platform.openai.com/docs/guides/text-generation). module ChatCompletionContentPart extend OpenAI::Internal::Type::Union @@ -25,7 +25,7 @@ module OpenAI attr_accessor :type # Learn about [file inputs](https://platform.openai.com/docs/guides/text) for text - # generation. + # generation. sig do params( file: T.any(OpenAI::Models::Chat::ChatCompletionContentPart::File::File, OpenAI::Internal::AnyHash), @@ -40,7 +40,7 @@ module OpenAI class File < OpenAI::Internal::Type::BaseModel # The base64 encoded file data, used when passing the file to the model as a - # string. + # string. sig { returns(T.nilable(String)) } attr_reader :file_data diff --git a/rbi/lib/openai/models/chat/chat_completion_content_part_image.rbi b/rbi/lib/openai/models/chat/chat_completion_content_part_image.rbi index 8f1d2dba..e7e3a922 100644 --- a/rbi/lib/openai/models/chat/chat_completion_content_part_image.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_content_part_image.rbi @@ -40,7 +40,7 @@ module OpenAI attr_accessor :url # Specifies the detail level of the image. Learn more in the - # [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding). + # [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding). sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL::Detail::OrSymbol)) } attr_reader :detail @@ -65,7 +65,7 @@ module OpenAI def to_hash; end # Specifies the detail level of the image. Learn more in the - # [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding). + # [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding). module Detail extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/chat/chat_completion_content_part_text.rbi b/rbi/lib/openai/models/chat/chat_completion_content_part_text.rbi index e2fea7d5..ee4dcfaf 100644 --- a/rbi/lib/openai/models/chat/chat_completion_content_part_text.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_content_part_text.rbi @@ -13,7 +13,7 @@ module OpenAI attr_accessor :type # Learn about - # [text inputs](https://platform.openai.com/docs/guides/text-generation). + # [text inputs](https://platform.openai.com/docs/guides/text-generation). sig { params(text: String, type: Symbol).returns(T.attached_class) } def self.new(text:, type: :text); end diff --git a/rbi/lib/openai/models/chat/chat_completion_developer_message_param.rbi b/rbi/lib/openai/models/chat/chat_completion_developer_message_param.rbi index d5eeea91..6e56c125 100644 --- a/rbi/lib/openai/models/chat/chat_completion_developer_message_param.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_developer_message_param.rbi @@ -13,7 +13,7 @@ module OpenAI attr_accessor :role # An optional name for the participant. Provides the model information to - # differentiate between participants of the same role. + # differentiate between participants of the same role. sig { returns(T.nilable(String)) } attr_reader :name @@ -21,8 +21,8 @@ module OpenAI attr_writer :name # Developer-provided instructions that the model should follow, regardless of - # messages sent by the user. With o1 models and newer, `developer` messages - # replace the previous `system` messages. + # messages sent by the user. With o1 models and newer, `developer` messages + # replace the previous `system` messages. sig do params( content: T.any( diff --git a/rbi/lib/openai/models/chat/chat_completion_function_call_option.rbi b/rbi/lib/openai/models/chat/chat_completion_function_call_option.rbi index d63a2e1c..5dcea933 100644 --- a/rbi/lib/openai/models/chat/chat_completion_function_call_option.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_function_call_option.rbi @@ -9,7 +9,7 @@ module OpenAI attr_accessor :name # Specifying a particular function via `{"name": "my_function"}` forces the model - # to call that function. + # to call that function. sig { params(name: String).returns(T.attached_class) } def self.new(name:); end diff --git a/rbi/lib/openai/models/chat/chat_completion_message.rbi b/rbi/lib/openai/models/chat/chat_completion_message.rbi index 6c785895..f70217f5 100644 --- a/rbi/lib/openai/models/chat/chat_completion_message.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_message.rbi @@ -17,7 +17,7 @@ module OpenAI attr_accessor :role # Annotations for the message, when applicable, as when using the - # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). sig { returns(T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionMessage::Annotation])) } attr_reader :annotations @@ -30,8 +30,8 @@ module OpenAI attr_writer :annotations # If the audio output modality is requested, this object contains data about the - # audio response from the model. - # [Learn more](https://platform.openai.com/docs/guides/audio). + # audio response from the model. + # [Learn more](https://platform.openai.com/docs/guides/audio). sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionAudio)) } attr_reader :audio @@ -39,7 +39,7 @@ module OpenAI attr_writer :audio # Deprecated and replaced by `tool_calls`. The name and arguments of a function - # that should be called, as generated by the model. + # that should be called, as generated by the model. sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionMessage::FunctionCall)) } attr_reader :function_call @@ -173,9 +173,9 @@ module OpenAI class FunctionCall < OpenAI::Internal::Type::BaseModel # The arguments to call the function with, as generated by the model in JSON - # format. Note that the model does not always generate valid JSON, and may - # hallucinate parameters not defined by your function schema. Validate the - # arguments in your code before calling your function. + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. sig { returns(String) } attr_accessor :arguments @@ -184,7 +184,7 @@ module OpenAI attr_accessor :name # Deprecated and replaced by `tool_calls`. The name and arguments of a function - # that should be called, as generated by the model. + # that should be called, as generated by the model. sig { params(arguments: String, name: String).returns(T.attached_class) } def self.new(arguments:, name:); end diff --git a/rbi/lib/openai/models/chat/chat_completion_message_param.rbi b/rbi/lib/openai/models/chat/chat_completion_message_param.rbi index 6cf5ae54..4c9296f5 100644 --- a/rbi/lib/openai/models/chat/chat_completion_message_param.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_message_param.rbi @@ -4,8 +4,8 @@ module OpenAI module Models module Chat # Developer-provided instructions that the model should follow, regardless of - # messages sent by the user. With o1 models and newer, `developer` messages - # replace the previous `system` messages. + # messages sent by the user. With o1 models and newer, `developer` messages + # replace the previous `system` messages. module ChatCompletionMessageParam extend OpenAI::Internal::Type::Union diff --git a/rbi/lib/openai/models/chat/chat_completion_message_tool_call.rbi b/rbi/lib/openai/models/chat/chat_completion_message_tool_call.rbi index b4bfcbdd..85b1dc6e 100644 --- a/rbi/lib/openai/models/chat/chat_completion_message_tool_call.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_message_tool_call.rbi @@ -44,9 +44,9 @@ module OpenAI class Function < OpenAI::Internal::Type::BaseModel # The arguments to call the function with, as generated by the model in JSON - # format. Note that the model does not always generate valid JSON, and may - # hallucinate parameters not defined by your function schema. Validate the - # arguments in your code before calling your function. + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. sig { returns(String) } attr_accessor :arguments diff --git a/rbi/lib/openai/models/chat/chat_completion_named_tool_choice.rbi b/rbi/lib/openai/models/chat/chat_completion_named_tool_choice.rbi index be71706f..041eea0a 100644 --- a/rbi/lib/openai/models/chat/chat_completion_named_tool_choice.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_named_tool_choice.rbi @@ -20,7 +20,7 @@ module OpenAI attr_accessor :type # Specifies a tool the model should use. Use to force the model to call a specific - # function. + # function. sig do params( function: T.any(OpenAI::Models::Chat::ChatCompletionNamedToolChoice::Function, OpenAI::Internal::AnyHash), diff --git a/rbi/lib/openai/models/chat/chat_completion_prediction_content.rbi b/rbi/lib/openai/models/chat/chat_completion_prediction_content.rbi index c6f72148..a993e33a 100644 --- a/rbi/lib/openai/models/chat/chat_completion_prediction_content.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_prediction_content.rbi @@ -5,18 +5,18 @@ module OpenAI module Chat class ChatCompletionPredictionContent < OpenAI::Internal::Type::BaseModel # The content that should be matched when generating a model response. If - # generated tokens would match this content, the entire model response can be - # returned much more quickly. + # generated tokens would match this content, the entire model response can be + # returned much more quickly. sig { returns(T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText])) } attr_accessor :content # The type of the predicted content you want to provide. This type is currently - # always `content`. + # always `content`. sig { returns(Symbol) } attr_accessor :type # Static predicted output content, such as the content of a text file that is - # being regenerated. + # being regenerated. sig do params( content: T.any( @@ -38,8 +38,8 @@ module OpenAI def to_hash; end # The content that should be matched when generating a model response. If - # generated tokens would match this content, the entire model response can be - # returned much more quickly. + # generated tokens would match this content, the entire model response can be + # returned much more quickly. module Content extend OpenAI::Internal::Type::Union diff --git a/rbi/lib/openai/models/chat/chat_completion_stream_options.rbi b/rbi/lib/openai/models/chat/chat_completion_stream_options.rbi index 51d36cd9..771ab84e 100644 --- a/rbi/lib/openai/models/chat/chat_completion_stream_options.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_stream_options.rbi @@ -5,12 +5,12 @@ module OpenAI module Chat class ChatCompletionStreamOptions < OpenAI::Internal::Type::BaseModel # If set, an additional chunk will be streamed before the `data: [DONE]` message. - # The `usage` field on this chunk shows the token usage statistics for the entire - # request, and the `choices` field will always be an empty array. + # The `usage` field on this chunk shows the token usage statistics for the entire + # request, and the `choices` field will always be an empty array. # - # All other chunks will also include a `usage` field, but with a null value. - # **NOTE:** If the stream is interrupted, you may not receive the final usage - # chunk which contains the total token usage for the request. + # All other chunks will also include a `usage` field, but with a null value. + # **NOTE:** If the stream is interrupted, you may not receive the final usage + # chunk which contains the total token usage for the request. sig { returns(T.nilable(T::Boolean)) } attr_reader :include_usage diff --git a/rbi/lib/openai/models/chat/chat_completion_system_message_param.rbi b/rbi/lib/openai/models/chat/chat_completion_system_message_param.rbi index 44c63247..b21a46af 100644 --- a/rbi/lib/openai/models/chat/chat_completion_system_message_param.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_system_message_param.rbi @@ -13,7 +13,7 @@ module OpenAI attr_accessor :role # An optional name for the participant. Provides the model information to - # differentiate between participants of the same role. + # differentiate between participants of the same role. sig { returns(T.nilable(String)) } attr_reader :name @@ -21,8 +21,8 @@ module OpenAI attr_writer :name # Developer-provided instructions that the model should follow, regardless of - # messages sent by the user. With o1 models and newer, use `developer` messages - # for this purpose instead. + # messages sent by the user. With o1 models and newer, use `developer` messages + # for this purpose instead. sig do params( content: T.any( diff --git a/rbi/lib/openai/models/chat/chat_completion_token_logprob.rbi b/rbi/lib/openai/models/chat/chat_completion_token_logprob.rbi index 4763d686..0f4594a5 100644 --- a/rbi/lib/openai/models/chat/chat_completion_token_logprob.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_token_logprob.rbi @@ -9,21 +9,21 @@ module OpenAI attr_accessor :token # A list of integers representing the UTF-8 bytes representation of the token. - # Useful in instances where characters are represented by multiple tokens and - # their byte representations must be combined to generate the correct text - # representation. Can be `null` if there is no bytes representation for the token. + # Useful in instances where characters are represented by multiple tokens and + # their byte representations must be combined to generate the correct text + # representation. Can be `null` if there is no bytes representation for the token. sig { returns(T.nilable(T::Array[Integer])) } attr_accessor :bytes # The log probability of this token, if it is within the top 20 most likely - # tokens. Otherwise, the value `-9999.0` is used to signify that the token is very - # unlikely. + # tokens. Otherwise, the value `-9999.0` is used to signify that the token is very + # unlikely. sig { returns(Float) } attr_accessor :logprob # List of the most likely tokens and their log probability, at this token - # position. In rare cases, there may be fewer than the number of requested - # `top_logprobs` returned. + # position. In rare cases, there may be fewer than the number of requested + # `top_logprobs` returned. sig { returns(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob::TopLogprob]) } attr_accessor :top_logprobs @@ -57,15 +57,15 @@ module OpenAI attr_accessor :token # A list of integers representing the UTF-8 bytes representation of the token. - # Useful in instances where characters are represented by multiple tokens and - # their byte representations must be combined to generate the correct text - # representation. Can be `null` if there is no bytes representation for the token. + # Useful in instances where characters are represented by multiple tokens and + # their byte representations must be combined to generate the correct text + # representation. Can be `null` if there is no bytes representation for the token. sig { returns(T.nilable(T::Array[Integer])) } attr_accessor :bytes # The log probability of this token, if it is within the top 20 most likely - # tokens. Otherwise, the value `-9999.0` is used to signify that the token is very - # unlikely. + # tokens. Otherwise, the value `-9999.0` is used to signify that the token is very + # unlikely. sig { returns(Float) } attr_accessor :logprob diff --git a/rbi/lib/openai/models/chat/chat_completion_tool_choice_option.rbi b/rbi/lib/openai/models/chat/chat_completion_tool_choice_option.rbi index a840bdf9..b7979106 100644 --- a/rbi/lib/openai/models/chat/chat_completion_tool_choice_option.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_tool_choice_option.rbi @@ -4,20 +4,20 @@ module OpenAI module Models module Chat # Controls which (if any) tool is called by the model. `none` means the model will - # not call any tool and instead generates a message. `auto` means the model can - # pick between generating a message or calling one or more tools. `required` means - # the model must call one or more tools. Specifying a particular tool via - # `{"type": "function", "function": {"name": "my_function"}}` forces the model to - # call that tool. + # not call any tool and instead generates a message. `auto` means the model can + # pick between generating a message or calling one or more tools. `required` means + # the model must call one or more tools. Specifying a particular tool via + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. # - # `none` is the default when no tools are present. `auto` is the default if tools - # are present. + # `none` is the default when no tools are present. `auto` is the default if tools + # are present. module ChatCompletionToolChoiceOption extend OpenAI::Internal::Type::Union # `none` means the model will not call any tool and instead generates a message. - # `auto` means the model can pick between generating a message or calling one or - # more tools. `required` means the model must call one or more tools. + # `auto` means the model can pick between generating a message or calling one or + # more tools. `required` means the model must call one or more tools. module Auto extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/chat/chat_completion_user_message_param.rbi b/rbi/lib/openai/models/chat/chat_completion_user_message_param.rbi index 312f7dfb..24c806a5 100644 --- a/rbi/lib/openai/models/chat/chat_completion_user_message_param.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_user_message_param.rbi @@ -27,7 +27,7 @@ module OpenAI attr_accessor :role # An optional name for the participant. Provides the model information to - # differentiate between participants of the same role. + # differentiate between participants of the same role. sig { returns(T.nilable(String)) } attr_reader :name @@ -35,7 +35,7 @@ module OpenAI attr_writer :name # Messages sent by an end user, containing prompts or additional context - # information. + # information. sig do params( content: T.any( diff --git a/rbi/lib/openai/models/chat/completion_create_params.rbi b/rbi/lib/openai/models/chat/completion_create_params.rbi index 0afa33d1..c284fa11 100644 --- a/rbi/lib/openai/models/chat/completion_create_params.rbi +++ b/rbi/lib/openai/models/chat/completion_create_params.rbi @@ -8,11 +8,11 @@ module OpenAI include OpenAI::Internal::Type::RequestParameters # A list of messages comprising the conversation so far. Depending on the - # [model](https://platform.openai.com/docs/models) you use, different message - # types (modalities) are supported, like - # [text](https://platform.openai.com/docs/guides/text-generation), - # [images](https://platform.openai.com/docs/guides/vision), and - # [audio](https://platform.openai.com/docs/guides/audio). + # [model](https://platform.openai.com/docs/models) you use, different message + # types (modalities) are supported, like + # [text](https://platform.openai.com/docs/guides/text-generation), + # [images](https://platform.openai.com/docs/guides/vision), and + # [audio](https://platform.openai.com/docs/guides/audio). sig do returns( T::Array[ @@ -30,16 +30,16 @@ module OpenAI attr_accessor :messages # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a - # wide range of models with different capabilities, performance characteristics, - # and price points. Refer to the - # [model guide](https://platform.openai.com/docs/models) to browse and compare - # available models. + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. sig { returns(T.any(String, OpenAI::Models::ChatModel::OrSymbol)) } attr_accessor :model # Parameters for audio output. Required when audio output is requested with - # `modalities: ["audio"]`. - # [Learn more](https://platform.openai.com/docs/guides/audio). + # `modalities: ["audio"]`. + # [Learn more](https://platform.openai.com/docs/guides/audio). sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionAudioParam)) } attr_reader :audio @@ -50,25 +50,25 @@ module OpenAI attr_writer :audio # Number between -2.0 and 2.0. Positive values penalize new tokens based on their - # existing frequency in the text so far, decreasing the model's likelihood to - # repeat the same line verbatim. + # existing frequency in the text so far, decreasing the model's likelihood to + # repeat the same line verbatim. sig { returns(T.nilable(Float)) } attr_accessor :frequency_penalty # Deprecated in favor of `tool_choice`. # - # Controls which (if any) function is called by the model. + # Controls which (if any) function is called by the model. # - # `none` means the model will not call a function and instead generates a message. + # `none` means the model will not call a function and instead generates a message. # - # `auto` means the model can pick between generating a message or calling a - # function. + # `auto` means the model can pick between generating a message or calling a + # function. # - # Specifying a particular function via `{"name": "my_function"}` forces the model - # to call that function. + # Specifying a particular function via `{"name": "my_function"}` forces the model + # to call that function. # - # `none` is the default when no functions are present. `auto` is the default if - # functions are present. + # `none` is the default when no functions are present. `auto` is the default if + # functions are present. sig do returns( T.nilable( @@ -95,7 +95,7 @@ module OpenAI # Deprecated in favor of `tools`. # - # A list of functions the model may generate JSON inputs for. + # A list of functions the model may generate JSON inputs for. sig { returns(T.nilable(T::Array[OpenAI::Models::Chat::CompletionCreateParams::Function])) } attr_reader :functions @@ -109,68 +109,68 @@ module OpenAI # Modify the likelihood of specified tokens appearing in the completion. # - # Accepts a JSON object that maps tokens (specified by their token ID in the - # tokenizer) to an associated bias value from -100 to 100. Mathematically, the - # bias is added to the logits generated by the model prior to sampling. The exact - # effect will vary per model, but values between -1 and 1 should decrease or - # increase likelihood of selection; values like -100 or 100 should result in a ban - # or exclusive selection of the relevant token. + # Accepts a JSON object that maps tokens (specified by their token ID in the + # tokenizer) to an associated bias value from -100 to 100. Mathematically, the + # bias is added to the logits generated by the model prior to sampling. The exact + # effect will vary per model, but values between -1 and 1 should decrease or + # increase likelihood of selection; values like -100 or 100 should result in a ban + # or exclusive selection of the relevant token. sig { returns(T.nilable(T::Hash[Symbol, Integer])) } attr_accessor :logit_bias # Whether to return log probabilities of the output tokens or not. If true, - # returns the log probabilities of each output token returned in the `content` of - # `message`. + # returns the log probabilities of each output token returned in the `content` of + # `message`. sig { returns(T.nilable(T::Boolean)) } attr_accessor :logprobs # An upper bound for the number of tokens that can be generated for a completion, - # including visible output tokens and - # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). sig { returns(T.nilable(Integer)) } attr_accessor :max_completion_tokens # The maximum number of [tokens](/tokenizer) that can be generated in the chat - # completion. This value can be used to control - # [costs](https://openai.com/api/pricing/) for text generated via API. + # completion. This value can be used to control + # [costs](https://openai.com/api/pricing/) for text generated via API. # - # This value is now deprecated in favor of `max_completion_tokens`, and is not - # compatible with - # [o1 series models](https://platform.openai.com/docs/guides/reasoning). + # This value is now deprecated in favor of `max_completion_tokens`, and is not + # compatible with + # [o1 series models](https://platform.openai.com/docs/guides/reasoning). sig { returns(T.nilable(Integer)) } attr_accessor :max_tokens # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata # Output types that you would like the model to generate. Most models are capable - # of generating text, which is the default: + # of generating text, which is the default: # - # `["text"]` + # `["text"]` # - # The `gpt-4o-audio-preview` model can also be used to - # [generate audio](https://platform.openai.com/docs/guides/audio). To request that - # this model generate both text and audio responses, you can use: + # The `gpt-4o-audio-preview` model can also be used to + # [generate audio](https://platform.openai.com/docs/guides/audio). To request that + # this model generate both text and audio responses, you can use: # - # `["text", "audio"]` + # `["text", "audio"]` sig { returns(T.nilable(T::Array[OpenAI::Models::Chat::CompletionCreateParams::Modality::OrSymbol])) } attr_accessor :modalities # How many chat completion choices to generate for each input message. Note that - # you will be charged based on the number of generated tokens across all of the - # choices. Keep `n` as `1` to minimize costs. + # you will be charged based on the number of generated tokens across all of the + # choices. Keep `n` as `1` to minimize costs. sig { returns(T.nilable(Integer)) } attr_accessor :n # Whether to enable - # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) - # during tool use. + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. sig { returns(T.nilable(T::Boolean)) } attr_reader :parallel_tool_calls @@ -178,7 +178,7 @@ module OpenAI attr_writer :parallel_tool_calls # Static predicted output content, such as the content of a text file that is - # being regenerated. + # being regenerated. sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionPredictionContent)) } attr_reader :prediction @@ -191,30 +191,30 @@ module OpenAI attr_writer :prediction # Number between -2.0 and 2.0. Positive values penalize new tokens based on - # whether they appear in the text so far, increasing the model's likelihood to - # talk about new topics. + # whether they appear in the text so far, increasing the model's likelihood to + # talk about new topics. sig { returns(T.nilable(Float)) } attr_accessor :presence_penalty # **o-series models only** # - # Constrains effort on reasoning for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. sig { returns(T.nilable(OpenAI::Models::ReasoningEffort::OrSymbol)) } attr_accessor :reasoning_effort # An object specifying the format that the model must output. # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # Setting to `{ "type": "json_object" }` enables the older JSON mode, which - # ensures the message the model generates is valid JSON. Using `json_schema` is - # preferred for models that support it. + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. sig do returns( T.nilable( @@ -242,38 +242,38 @@ module OpenAI attr_writer :response_format # This feature is in Beta. If specified, our system will make a best effort to - # sample deterministically, such that repeated requests with the same `seed` and - # parameters should return the same result. Determinism is not guaranteed, and you - # should refer to the `system_fingerprint` response parameter to monitor changes - # in the backend. + # sample deterministically, such that repeated requests with the same `seed` and + # parameters should return the same result. Determinism is not guaranteed, and you + # should refer to the `system_fingerprint` response parameter to monitor changes + # in the backend. sig { returns(T.nilable(Integer)) } attr_accessor :seed # Specifies the latency tier to use for processing the request. This parameter is - # relevant for customers subscribed to the scale tier service: + # relevant for customers subscribed to the scale tier service: # - # - If set to 'auto', and the Project is Scale tier enabled, the system will - # utilize scale tier credits until they are exhausted. - # - If set to 'auto', and the Project is not Scale tier enabled, the request will - # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. - # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. - # - When not set, the default behavior is 'auto'. + # - If set to 'auto', and the Project is Scale tier enabled, the system will + # utilize scale tier credits until they are exhausted. + # - If set to 'auto', and the Project is not Scale tier enabled, the request will + # be processed using the default service tier with a lower uptime SLA and no + # latency guarentee. + # - If set to 'default', the request will be processed using the default service + # tier with a lower uptime SLA and no latency guarentee. + # - When not set, the default behavior is 'auto'. # - # When this parameter is set, the response body will include the `service_tier` - # utilized. + # When this parameter is set, the response body will include the `service_tier` + # utilized. sig { returns(T.nilable(OpenAI::Models::Chat::CompletionCreateParams::ServiceTier::OrSymbol)) } attr_accessor :service_tier # Up to 4 sequences where the API will stop generating further tokens. The - # returned text will not contain the stop sequence. + # returned text will not contain the stop sequence. sig { returns(T.nilable(T.any(String, T::Array[String]))) } attr_accessor :stop # Whether or not to store the output of this chat completion request for use in - # our [model distillation](https://platform.openai.com/docs/guides/distillation) - # or [evals](https://platform.openai.com/docs/guides/evals) products. + # our [model distillation](https://platform.openai.com/docs/guides/distillation) + # or [evals](https://platform.openai.com/docs/guides/evals) products. sig { returns(T.nilable(T::Boolean)) } attr_accessor :store @@ -290,21 +290,21 @@ module OpenAI attr_writer :stream_options # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. We generally recommend altering this or `top_p` but - # not both. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. sig { returns(T.nilable(Float)) } attr_accessor :temperature # Controls which (if any) tool is called by the model. `none` means the model will - # not call any tool and instead generates a message. `auto` means the model can - # pick between generating a message or calling one or more tools. `required` means - # the model must call one or more tools. Specifying a particular tool via - # `{"type": "function", "function": {"name": "my_function"}}` forces the model to - # call that tool. + # not call any tool and instead generates a message. `auto` means the model can + # pick between generating a message or calling one or more tools. `required` means + # the model must call one or more tools. Specifying a particular tool via + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. # - # `none` is the default when no tools are present. `auto` is the default if tools - # are present. + # `none` is the default when no tools are present. `auto` is the default if tools + # are present. sig do returns( T.nilable( @@ -330,8 +330,8 @@ module OpenAI attr_writer :tool_choice # A list of tools the model may call. Currently, only functions are supported as a - # tool. Use this to provide a list of functions the model may generate JSON inputs - # for. A max of 128 functions are supported. + # tool. Use this to provide a list of functions the model may generate JSON inputs + # for. A max of 128 functions are supported. sig { returns(T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTool])) } attr_reader :tools @@ -339,22 +339,22 @@ module OpenAI attr_writer :tools # An integer between 0 and 20 specifying the number of most likely tokens to - # return at each token position, each with an associated log probability. - # `logprobs` must be set to `true` if this parameter is used. + # return at each token position, each with an associated log probability. + # `logprobs` must be set to `true` if this parameter is used. sig { returns(T.nilable(Integer)) } attr_accessor :top_logprobs # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or `temperature` but not both. + # We generally recommend altering this or `temperature` but not both. sig { returns(T.nilable(Float)) } attr_accessor :top_p # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). sig { returns(T.nilable(String)) } attr_reader :user @@ -362,8 +362,8 @@ module OpenAI attr_writer :user # This tool searches the web for relevant results to use in a response. Learn more - # about the - # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + # about the + # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). sig { returns(T.nilable(OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions)) } attr_reader :web_search_options @@ -527,10 +527,10 @@ module OpenAI def to_hash; end # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a - # wide range of models with different capabilities, performance characteristics, - # and price points. Refer to the - # [model guide](https://platform.openai.com/docs/models) to browse and compare - # available models. + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. module Model extend OpenAI::Internal::Type::Union @@ -540,24 +540,24 @@ module OpenAI # Deprecated in favor of `tool_choice`. # - # Controls which (if any) function is called by the model. + # Controls which (if any) function is called by the model. # - # `none` means the model will not call a function and instead generates a message. + # `none` means the model will not call a function and instead generates a message. # - # `auto` means the model can pick between generating a message or calling a - # function. + # `auto` means the model can pick between generating a message or calling a + # function. # - # Specifying a particular function via `{"name": "my_function"}` forces the model - # to call that function. + # Specifying a particular function via `{"name": "my_function"}` forces the model + # to call that function. # - # `none` is the default when no functions are present. `auto` is the default if - # functions are present. + # `none` is the default when no functions are present. `auto` is the default if + # functions are present. module FunctionCall extend OpenAI::Internal::Type::Union # `none` means the model will not call a function and instead generates a message. - # `auto` means the model can pick between generating a message or calling a - # function. + # `auto` means the model can pick between generating a message or calling a + # function. module FunctionCallMode extend OpenAI::Internal::Type::Enum @@ -597,12 +597,12 @@ module OpenAI class Function < OpenAI::Internal::Type::BaseModel # The name of the function to be called. Must be a-z, A-Z, 0-9, or contain - # underscores and dashes, with a maximum length of 64. + # underscores and dashes, with a maximum length of 64. sig { returns(String) } attr_accessor :name # A description of what the function does, used by the model to choose when and - # how to call the function. + # how to call the function. sig { returns(T.nilable(String)) } attr_reader :description @@ -610,12 +610,12 @@ module OpenAI attr_writer :description # The parameters the functions accepts, described as a JSON Schema object. See the - # [guide](https://platform.openai.com/docs/guides/function-calling) for examples, - # and the - # [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for - # documentation about the format. + # [guide](https://platform.openai.com/docs/guides/function-calling) for examples, + # and the + # [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for + # documentation about the format. # - # Omitting `parameters` defines a function with an empty parameter list. + # Omitting `parameters` defines a function with an empty parameter list. sig { returns(T.nilable(T::Hash[Symbol, T.anything])) } attr_reader :parameters @@ -650,14 +650,14 @@ module OpenAI # An object specifying the format that the model must output. # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # Setting to `{ "type": "json_object" }` enables the older JSON mode, which - # ensures the message the model generates is valid JSON. Using `json_schema` is - # preferred for models that support it. + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. module ResponseFormat extend OpenAI::Internal::Type::Union @@ -671,19 +671,19 @@ module OpenAI end # Specifies the latency tier to use for processing the request. This parameter is - # relevant for customers subscribed to the scale tier service: + # relevant for customers subscribed to the scale tier service: # - # - If set to 'auto', and the Project is Scale tier enabled, the system will - # utilize scale tier credits until they are exhausted. - # - If set to 'auto', and the Project is not Scale tier enabled, the request will - # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. - # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. - # - When not set, the default behavior is 'auto'. + # - If set to 'auto', and the Project is Scale tier enabled, the system will + # utilize scale tier credits until they are exhausted. + # - If set to 'auto', and the Project is not Scale tier enabled, the request will + # be processed using the default service tier with a lower uptime SLA and no + # latency guarentee. + # - If set to 'default', the request will be processed using the default service + # tier with a lower uptime SLA and no latency guarentee. + # - When not set, the default behavior is 'auto'. # - # When this parameter is set, the response body will include the `service_tier` - # utilized. + # When this parameter is set, the response body will include the `service_tier` + # utilized. module ServiceTier extend OpenAI::Internal::Type::Enum @@ -699,7 +699,7 @@ module OpenAI end # Up to 4 sequences where the API will stop generating further tokens. The - # returned text will not contain the stop sequence. + # returned text will not contain the stop sequence. module Stop extend OpenAI::Internal::Type::Union @@ -711,7 +711,7 @@ module OpenAI class WebSearchOptions < OpenAI::Internal::Type::BaseModel # High level guidance for the amount of context window space to use for the - # search. One of `low`, `medium`, or `high`. `medium` is the default. + # search. One of `low`, `medium`, or `high`. `medium` is the default. sig do returns( T.nilable(OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::SearchContextSize::OrSymbol) @@ -745,8 +745,8 @@ module OpenAI attr_writer :user_location # This tool searches the web for relevant results to use in a response. Learn more - # about the - # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + # about the + # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). sig do params( search_context_size: OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::SearchContextSize::OrSymbol, @@ -773,7 +773,7 @@ module OpenAI def to_hash; end # High level guidance for the amount of context window space to use for the - # search. One of `low`, `medium`, or `high`. `medium` is the default. + # search. One of `low`, `medium`, or `high`. `medium` is the default. module SearchContextSize extend OpenAI::Internal::Type::Enum @@ -866,7 +866,7 @@ module OpenAI attr_writer :city # The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of - # the user, e.g. `US`. + # the user, e.g. `US`. sig { returns(T.nilable(String)) } attr_reader :country @@ -881,7 +881,7 @@ module OpenAI attr_writer :region # The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the - # user, e.g. `America/Los_Angeles`. + # user, e.g. `America/Los_Angeles`. sig { returns(T.nilable(String)) } attr_reader :timezone diff --git a/rbi/lib/openai/models/chat/completion_list_params.rbi b/rbi/lib/openai/models/chat/completion_list_params.rbi index c90d051b..eec03610 100644 --- a/rbi/lib/openai/models/chat/completion_list_params.rbi +++ b/rbi/lib/openai/models/chat/completion_list_params.rbi @@ -23,7 +23,7 @@ module OpenAI # A list of metadata keys to filter the Chat Completions by. Example: # - # `metadata[key1]=value1&metadata[key2]=value2` + # `metadata[key1]=value1&metadata[key2]=value2` sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata @@ -35,7 +35,7 @@ module OpenAI attr_writer :model # Sort order for Chat Completions by timestamp. Use `asc` for ascending order or - # `desc` for descending order. Defaults to `asc`. + # `desc` for descending order. Defaults to `asc`. sig { returns(T.nilable(OpenAI::Models::Chat::CompletionListParams::Order::OrSymbol)) } attr_reader :order @@ -71,7 +71,7 @@ module OpenAI def to_hash; end # Sort order for Chat Completions by timestamp. Use `asc` for ascending order or - # `desc` for descending order. Defaults to `asc`. + # `desc` for descending order. Defaults to `asc`. module Order extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/chat/completion_update_params.rbi b/rbi/lib/openai/models/chat/completion_update_params.rbi index 49a8c260..b85b54da 100644 --- a/rbi/lib/openai/models/chat/completion_update_params.rbi +++ b/rbi/lib/openai/models/chat/completion_update_params.rbi @@ -8,11 +8,11 @@ module OpenAI include OpenAI::Internal::Type::RequestParameters # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata diff --git a/rbi/lib/openai/models/chat/completions/message_list_params.rbi b/rbi/lib/openai/models/chat/completions/message_list_params.rbi index 85aa9aba..8a39c287 100644 --- a/rbi/lib/openai/models/chat/completions/message_list_params.rbi +++ b/rbi/lib/openai/models/chat/completions/message_list_params.rbi @@ -23,7 +23,7 @@ module OpenAI attr_writer :limit # Sort order for messages by timestamp. Use `asc` for ascending order or `desc` - # for descending order. Defaults to `asc`. + # for descending order. Defaults to `asc`. sig { returns(T.nilable(OpenAI::Models::Chat::Completions::MessageListParams::Order::OrSymbol)) } attr_reader :order @@ -55,7 +55,7 @@ module OpenAI def to_hash; end # Sort order for messages by timestamp. Use `asc` for ascending order or `desc` - # for descending order. Defaults to `asc`. + # for descending order. Defaults to `asc`. module Order extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/comparison_filter.rbi b/rbi/lib/openai/models/comparison_filter.rbi index 2129e61f..15d111b8 100644 --- a/rbi/lib/openai/models/comparison_filter.rbi +++ b/rbi/lib/openai/models/comparison_filter.rbi @@ -9,22 +9,22 @@ module OpenAI # Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`. # - # - `eq`: equals - # - `ne`: not equal - # - `gt`: greater than - # - `gte`: greater than or equal - # - `lt`: less than - # - `lte`: less than or equal + # - `eq`: equals + # - `ne`: not equal + # - `gt`: greater than + # - `gte`: greater than or equal + # - `lt`: less than + # - `lte`: less than or equal sig { returns(OpenAI::Models::ComparisonFilter::Type::OrSymbol) } attr_accessor :type # The value to compare against the attribute key; supports string, number, or - # boolean types. + # boolean types. sig { returns(T.any(String, Float, T::Boolean)) } attr_accessor :value # A filter used to compare a specified attribute key to a given value using a - # defined comparison operation. + # defined comparison operation. sig do params( key: String, @@ -45,12 +45,12 @@ module OpenAI # Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`. # - # - `eq`: equals - # - `ne`: not equal - # - `gt`: greater than - # - `gte`: greater than or equal - # - `lt`: less than - # - `lte`: less than or equal + # - `eq`: equals + # - `ne`: not equal + # - `gt`: greater than + # - `gte`: greater than or equal + # - `lt`: less than + # - `lte`: less than or equal module Type extend OpenAI::Internal::Type::Enum @@ -69,7 +69,7 @@ module OpenAI end # The value to compare against the attribute key; supports string, number, or - # boolean types. + # boolean types. module Value extend OpenAI::Internal::Type::Union diff --git a/rbi/lib/openai/models/completion.rbi b/rbi/lib/openai/models/completion.rbi index d24d8268..aa47ab99 100644 --- a/rbi/lib/openai/models/completion.rbi +++ b/rbi/lib/openai/models/completion.rbi @@ -25,8 +25,8 @@ module OpenAI # This fingerprint represents the backend configuration that the model runs with. # - # Can be used in conjunction with the `seed` request parameter to understand when - # backend changes have been made that might impact determinism. + # Can be used in conjunction with the `seed` request parameter to understand when + # backend changes have been made that might impact determinism. sig { returns(T.nilable(String)) } attr_reader :system_fingerprint @@ -41,7 +41,7 @@ module OpenAI attr_writer :usage # Represents a completion response from the API. Note: both the streamed and - # non-streamed response objects share the same shape (unlike the chat endpoint). + # non-streamed response objects share the same shape (unlike the chat endpoint). sig do params( id: String, diff --git a/rbi/lib/openai/models/completion_choice.rbi b/rbi/lib/openai/models/completion_choice.rbi index 9833bea3..375563ef 100644 --- a/rbi/lib/openai/models/completion_choice.rbi +++ b/rbi/lib/openai/models/completion_choice.rbi @@ -4,9 +4,9 @@ module OpenAI module Models class CompletionChoice < OpenAI::Internal::Type::BaseModel # The reason the model stopped generating tokens. This will be `stop` if the model - # hit a natural stop point or a provided stop sequence, `length` if the maximum - # number of tokens specified in the request was reached, or `content_filter` if - # content was omitted due to a flag from our content filters. + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, or `content_filter` if + # content was omitted due to a flag from our content filters. sig { returns(OpenAI::Models::CompletionChoice::FinishReason::TaggedSymbol) } attr_accessor :finish_reason @@ -50,9 +50,9 @@ module OpenAI def to_hash; end # The reason the model stopped generating tokens. This will be `stop` if the model - # hit a natural stop point or a provided stop sequence, `length` if the maximum - # number of tokens specified in the request was reached, or `content_filter` if - # content was omitted due to a flag from our content filters. + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, or `content_filter` if + # content was omitted due to a flag from our content filters. module FinishReason extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/completion_create_params.rbi b/rbi/lib/openai/models/completion_create_params.rbi index 01701e36..cdda2364 100644 --- a/rbi/lib/openai/models/completion_create_params.rbi +++ b/rbi/lib/openai/models/completion_create_params.rbi @@ -7,33 +7,33 @@ module OpenAI include OpenAI::Internal::Type::RequestParameters # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. sig { returns(T.any(String, OpenAI::Models::CompletionCreateParams::Model::OrSymbol)) } attr_accessor :model # The prompt(s) to generate completions for, encoded as a string, array of - # strings, array of tokens, or array of token arrays. + # strings, array of tokens, or array of token arrays. # - # Note that <|endoftext|> is the document separator that the model sees during - # training, so if a prompt is not specified the model will generate as if from the - # beginning of a new document. + # Note that <|endoftext|> is the document separator that the model sees during + # training, so if a prompt is not specified the model will generate as if from the + # beginning of a new document. sig do returns(T.nilable(T.any(String, T::Array[String], T::Array[Integer], T::Array[T::Array[Integer]]))) end attr_accessor :prompt # Generates `best_of` completions server-side and returns the "best" (the one with - # the highest log probability per token). Results cannot be streamed. + # the highest log probability per token). Results cannot be streamed. # - # When used with `n`, `best_of` controls the number of candidate completions and - # `n` specifies how many to return – `best_of` must be greater than `n`. + # When used with `n`, `best_of` controls the number of candidate completions and + # `n` specifies how many to return – `best_of` must be greater than `n`. # - # **Note:** Because this parameter generates many completions, it can quickly - # consume your token quota. Use carefully and ensure that you have reasonable - # settings for `max_tokens` and `stop`. + # **Note:** Because this parameter generates many completions, it can quickly + # consume your token quota. Use carefully and ensure that you have reasonable + # settings for `max_tokens` and `stop`. sig { returns(T.nilable(Integer)) } attr_accessor :best_of @@ -42,74 +42,74 @@ module OpenAI attr_accessor :echo # Number between -2.0 and 2.0. Positive values penalize new tokens based on their - # existing frequency in the text so far, decreasing the model's likelihood to - # repeat the same line verbatim. + # existing frequency in the text so far, decreasing the model's likelihood to + # repeat the same line verbatim. # - # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) + # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) sig { returns(T.nilable(Float)) } attr_accessor :frequency_penalty # Modify the likelihood of specified tokens appearing in the completion. # - # Accepts a JSON object that maps tokens (specified by their token ID in the GPT - # tokenizer) to an associated bias value from -100 to 100. You can use this - # [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. - # Mathematically, the bias is added to the logits generated by the model prior to - # sampling. The exact effect will vary per model, but values between -1 and 1 - # should decrease or increase likelihood of selection; values like -100 or 100 - # should result in a ban or exclusive selection of the relevant token. + # Accepts a JSON object that maps tokens (specified by their token ID in the GPT + # tokenizer) to an associated bias value from -100 to 100. You can use this + # [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. + # Mathematically, the bias is added to the logits generated by the model prior to + # sampling. The exact effect will vary per model, but values between -1 and 1 + # should decrease or increase likelihood of selection; values like -100 or 100 + # should result in a ban or exclusive selection of the relevant token. # - # As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token - # from being generated. + # As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token + # from being generated. sig { returns(T.nilable(T::Hash[Symbol, Integer])) } attr_accessor :logit_bias # Include the log probabilities on the `logprobs` most likely output tokens, as - # well the chosen tokens. For example, if `logprobs` is 5, the API will return a - # list of the 5 most likely tokens. The API will always return the `logprob` of - # the sampled token, so there may be up to `logprobs+1` elements in the response. + # well the chosen tokens. For example, if `logprobs` is 5, the API will return a + # list of the 5 most likely tokens. The API will always return the `logprob` of + # the sampled token, so there may be up to `logprobs+1` elements in the response. # - # The maximum value for `logprobs` is 5. + # The maximum value for `logprobs` is 5. sig { returns(T.nilable(Integer)) } attr_accessor :logprobs # The maximum number of [tokens](/tokenizer) that can be generated in the - # completion. + # completion. # - # The token count of your prompt plus `max_tokens` cannot exceed the model's - # context length. - # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - # for counting tokens. + # The token count of your prompt plus `max_tokens` cannot exceed the model's + # context length. + # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + # for counting tokens. sig { returns(T.nilable(Integer)) } attr_accessor :max_tokens # How many completions to generate for each prompt. # - # **Note:** Because this parameter generates many completions, it can quickly - # consume your token quota. Use carefully and ensure that you have reasonable - # settings for `max_tokens` and `stop`. + # **Note:** Because this parameter generates many completions, it can quickly + # consume your token quota. Use carefully and ensure that you have reasonable + # settings for `max_tokens` and `stop`. sig { returns(T.nilable(Integer)) } attr_accessor :n # Number between -2.0 and 2.0. Positive values penalize new tokens based on - # whether they appear in the text so far, increasing the model's likelihood to - # talk about new topics. + # whether they appear in the text so far, increasing the model's likelihood to + # talk about new topics. # - # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) + # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) sig { returns(T.nilable(Float)) } attr_accessor :presence_penalty # If specified, our system will make a best effort to sample deterministically, - # such that repeated requests with the same `seed` and parameters should return - # the same result. + # such that repeated requests with the same `seed` and parameters should return + # the same result. # - # Determinism is not guaranteed, and you should refer to the `system_fingerprint` - # response parameter to monitor changes in the backend. + # Determinism is not guaranteed, and you should refer to the `system_fingerprint` + # response parameter to monitor changes in the backend. sig { returns(T.nilable(Integer)) } attr_accessor :seed # Up to 4 sequences where the API will stop generating further tokens. The - # returned text will not contain the stop sequence. + # returned text will not contain the stop sequence. sig { returns(T.nilable(T.any(String, T::Array[String]))) } attr_accessor :stop @@ -127,29 +127,29 @@ module OpenAI # The suffix that comes after a completion of inserted text. # - # This parameter is only supported for `gpt-3.5-turbo-instruct`. + # This parameter is only supported for `gpt-3.5-turbo-instruct`. sig { returns(T.nilable(String)) } attr_accessor :suffix # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. # - # We generally recommend altering this or `top_p` but not both. + # We generally recommend altering this or `top_p` but not both. sig { returns(T.nilable(Float)) } attr_accessor :temperature # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or `temperature` but not both. + # We generally recommend altering this or `temperature` but not both. sig { returns(T.nilable(Float)) } attr_accessor :top_p # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). sig { returns(T.nilable(String)) } attr_reader :user @@ -234,10 +234,10 @@ module OpenAI def to_hash; end # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. module Model extend OpenAI::Internal::Type::Union @@ -255,11 +255,11 @@ module OpenAI end # The prompt(s) to generate completions for, encoded as a string, array of - # strings, array of tokens, or array of token arrays. + # strings, array of tokens, or array of token arrays. # - # Note that <|endoftext|> is the document separator that the model sees during - # training, so if a prompt is not specified the model will generate as if from the - # beginning of a new document. + # Note that <|endoftext|> is the document separator that the model sees during + # training, so if a prompt is not specified the model will generate as if from the + # beginning of a new document. module Prompt extend OpenAI::Internal::Type::Union @@ -278,7 +278,7 @@ module OpenAI end # Up to 4 sequences where the API will stop generating further tokens. The - # returned text will not contain the stop sequence. + # returned text will not contain the stop sequence. module Stop extend OpenAI::Internal::Type::Union diff --git a/rbi/lib/openai/models/completion_usage.rbi b/rbi/lib/openai/models/completion_usage.rbi index e0fbddc4..2872756d 100644 --- a/rbi/lib/openai/models/completion_usage.rbi +++ b/rbi/lib/openai/models/completion_usage.rbi @@ -73,7 +73,7 @@ module OpenAI class CompletionTokensDetails < OpenAI::Internal::Type::BaseModel # When using Predicted Outputs, the number of tokens in the prediction that - # appeared in the completion. + # appeared in the completion. sig { returns(T.nilable(Integer)) } attr_reader :accepted_prediction_tokens @@ -95,9 +95,9 @@ module OpenAI attr_writer :reasoning_tokens # When using Predicted Outputs, the number of tokens in the prediction that did - # not appear in the completion. However, like reasoning tokens, these tokens are - # still counted in the total completion tokens for purposes of billing, output, - # and context window limits. + # not appear in the completion. However, like reasoning tokens, these tokens are + # still counted in the total completion tokens for purposes of billing, output, + # and context window limits. sig { returns(T.nilable(Integer)) } attr_reader :rejected_prediction_tokens diff --git a/rbi/lib/openai/models/compound_filter.rbi b/rbi/lib/openai/models/compound_filter.rbi index 214f5113..5eee5410 100644 --- a/rbi/lib/openai/models/compound_filter.rbi +++ b/rbi/lib/openai/models/compound_filter.rbi @@ -4,7 +4,7 @@ module OpenAI module Models class CompoundFilter < OpenAI::Internal::Type::BaseModel # Array of filters to combine. Items can be `ComparisonFilter` or - # `CompoundFilter`. + # `CompoundFilter`. sig { returns(T::Array[T.any(OpenAI::Models::ComparisonFilter, T.anything)]) } attr_accessor :filters @@ -34,7 +34,7 @@ module OpenAI def to_hash; end # A filter used to compare a specified attribute key to a given value using a - # defined comparison operation. + # defined comparison operation. module Filter extend OpenAI::Internal::Type::Union diff --git a/rbi/lib/openai/models/embedding.rbi b/rbi/lib/openai/models/embedding.rbi index 0567898b..ac1fc9ab 100644 --- a/rbi/lib/openai/models/embedding.rbi +++ b/rbi/lib/openai/models/embedding.rbi @@ -4,8 +4,8 @@ module OpenAI module Models class Embedding < OpenAI::Internal::Type::BaseModel # The embedding vector, which is a list of floats. The length of vector depends on - # the model as listed in the - # [embedding guide](https://platform.openai.com/docs/guides/embeddings). + # the model as listed in the + # [embedding guide](https://platform.openai.com/docs/guides/embeddings). sig { returns(T::Array[Float]) } attr_accessor :embedding diff --git a/rbi/lib/openai/models/embedding_create_params.rbi b/rbi/lib/openai/models/embedding_create_params.rbi index 6e763395..0869c887 100644 --- a/rbi/lib/openai/models/embedding_create_params.rbi +++ b/rbi/lib/openai/models/embedding_create_params.rbi @@ -7,26 +7,26 @@ module OpenAI include OpenAI::Internal::Type::RequestParameters # Input text to embed, encoded as a string or array of tokens. To embed multiple - # inputs in a single request, pass an array of strings or array of token arrays. - # The input must not exceed the max input tokens for the model (8192 tokens for - # `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 - # dimensions or less. - # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - # for counting tokens. Some models may also impose a limit on total number of - # tokens summed across inputs. + # inputs in a single request, pass an array of strings or array of token arrays. + # The input must not exceed the max input tokens for the model (8192 tokens for + # `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 + # dimensions or less. + # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + # for counting tokens. Some models may also impose a limit on total number of + # tokens summed across inputs. sig { returns(T.any(String, T::Array[String], T::Array[Integer], T::Array[T::Array[Integer]])) } attr_accessor :input # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. sig { returns(T.any(String, OpenAI::Models::EmbeddingModel::OrSymbol)) } attr_accessor :model # The number of dimensions the resulting output embeddings should have. Only - # supported in `text-embedding-3` and later models. + # supported in `text-embedding-3` and later models. sig { returns(T.nilable(Integer)) } attr_reader :dimensions @@ -34,7 +34,7 @@ module OpenAI attr_writer :dimensions # The format to return the embeddings in. Can be either `float` or - # [`base64`](https://pypi.org/project/pybase64/). + # [`base64`](https://pypi.org/project/pybase64/). sig { returns(T.nilable(OpenAI::Models::EmbeddingCreateParams::EncodingFormat::OrSymbol)) } attr_reader :encoding_format @@ -42,8 +42,8 @@ module OpenAI attr_writer :encoding_format # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). sig { returns(T.nilable(String)) } attr_reader :user @@ -79,13 +79,13 @@ module OpenAI def to_hash; end # Input text to embed, encoded as a string or array of tokens. To embed multiple - # inputs in a single request, pass an array of strings or array of token arrays. - # The input must not exceed the max input tokens for the model (8192 tokens for - # `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 - # dimensions or less. - # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - # for counting tokens. Some models may also impose a limit on total number of - # tokens summed across inputs. + # inputs in a single request, pass an array of strings or array of token arrays. + # The input must not exceed the max input tokens for the model (8192 tokens for + # `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 + # dimensions or less. + # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + # for counting tokens. Some models may also impose a limit on total number of + # tokens summed across inputs. module Input extend OpenAI::Internal::Type::Union @@ -104,10 +104,10 @@ module OpenAI end # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. module Model extend OpenAI::Internal::Type::Union @@ -116,7 +116,7 @@ module OpenAI end # The format to return the embeddings in. Can be either `float` or - # [`base64`](https://pypi.org/project/pybase64/). + # [`base64`](https://pypi.org/project/pybase64/). module EncodingFormat extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/file_chunking_strategy_param.rbi b/rbi/lib/openai/models/file_chunking_strategy_param.rbi index 346a7b5a..ca7b56c4 100644 --- a/rbi/lib/openai/models/file_chunking_strategy_param.rbi +++ b/rbi/lib/openai/models/file_chunking_strategy_param.rbi @@ -3,7 +3,7 @@ module OpenAI module Models # The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. Only applicable if `file_ids` is non-empty. + # strategy. Only applicable if `file_ids` is non-empty. module FileChunkingStrategyParam extend OpenAI::Internal::Type::Union diff --git a/rbi/lib/openai/models/file_create_params.rbi b/rbi/lib/openai/models/file_create_params.rbi index fb50d427..c3d5af72 100644 --- a/rbi/lib/openai/models/file_create_params.rbi +++ b/rbi/lib/openai/models/file_create_params.rbi @@ -11,9 +11,9 @@ module OpenAI attr_accessor :file # The intended purpose of the uploaded file. One of: - `assistants`: Used in the - # Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for - # fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: - # Flexible file type for any purpose - `evals`: Used for eval data sets + # Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for + # fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: + # Flexible file type for any purpose - `evals`: Used for eval data sets sig { returns(OpenAI::Models::FilePurpose::OrSymbol) } attr_accessor :purpose diff --git a/rbi/lib/openai/models/file_list_params.rbi b/rbi/lib/openai/models/file_list_params.rbi index e89cec8f..daad5aea 100644 --- a/rbi/lib/openai/models/file_list_params.rbi +++ b/rbi/lib/openai/models/file_list_params.rbi @@ -7,9 +7,9 @@ module OpenAI include OpenAI::Internal::Type::RequestParameters # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. sig { returns(T.nilable(String)) } attr_reader :after @@ -17,7 +17,7 @@ module OpenAI attr_writer :after # A limit on the number of objects to be returned. Limit can range between 1 and - # 10,000, and the default is 10,000. + # 10,000, and the default is 10,000. sig { returns(T.nilable(Integer)) } attr_reader :limit @@ -25,7 +25,7 @@ module OpenAI attr_writer :limit # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. sig { returns(T.nilable(OpenAI::Models::FileListParams::Order::OrSymbol)) } attr_reader :order @@ -66,7 +66,7 @@ module OpenAI def to_hash; end # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. module Order extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/file_object.rbi b/rbi/lib/openai/models/file_object.rbi index 34dd1542..15e6c24d 100644 --- a/rbi/lib/openai/models/file_object.rbi +++ b/rbi/lib/openai/models/file_object.rbi @@ -24,13 +24,13 @@ module OpenAI attr_accessor :object # The intended purpose of the file. Supported values are `assistants`, - # `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` - # and `vision`. + # `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` + # and `vision`. sig { returns(OpenAI::Models::FileObject::Purpose::TaggedSymbol) } attr_accessor :purpose # Deprecated. The current status of the file, which can be either `uploaded`, - # `processed`, or `error`. + # `processed`, or `error`. sig { returns(OpenAI::Models::FileObject::Status::TaggedSymbol) } attr_accessor :status @@ -42,7 +42,7 @@ module OpenAI attr_writer :expires_at # Deprecated. For details on why a fine-tuning training file failed validation, - # see the `error` field on `fine_tuning.job`. + # see the `error` field on `fine_tuning.job`. sig { returns(T.nilable(String)) } attr_reader :status_details @@ -94,8 +94,8 @@ module OpenAI def to_hash; end # The intended purpose of the file. Supported values are `assistants`, - # `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` - # and `vision`. + # `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` + # and `vision`. module Purpose extend OpenAI::Internal::Type::Enum @@ -115,7 +115,7 @@ module OpenAI end # Deprecated. The current status of the file, which can be either `uploaded`, - # `processed`, or `error`. + # `processed`, or `error`. module Status extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/file_purpose.rbi b/rbi/lib/openai/models/file_purpose.rbi index a1e5347b..274edce7 100644 --- a/rbi/lib/openai/models/file_purpose.rbi +++ b/rbi/lib/openai/models/file_purpose.rbi @@ -3,9 +3,9 @@ module OpenAI module Models # The intended purpose of the uploaded file. One of: - `assistants`: Used in the - # Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for - # fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: - # Flexible file type for any purpose - `evals`: Used for eval data sets + # Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for + # fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: + # Flexible file type for any purpose - `evals`: Used for eval data sets module FilePurpose extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/fine_tuning/fine_tuning_job.rbi b/rbi/lib/openai/models/fine_tuning/fine_tuning_job.rbi index 666480f0..b12fca64 100644 --- a/rbi/lib/openai/models/fine_tuning/fine_tuning_job.rbi +++ b/rbi/lib/openai/models/fine_tuning/fine_tuning_job.rbi @@ -13,7 +13,7 @@ module OpenAI attr_accessor :created_at # For fine-tuning jobs that have `failed`, this will contain more information on - # the cause of the failure. + # the cause of the failure. sig { returns(T.nilable(OpenAI::Models::FineTuning::FineTuningJob::Error)) } attr_reader :error @@ -26,17 +26,17 @@ module OpenAI attr_writer :error # The name of the fine-tuned model that is being created. The value will be null - # if the fine-tuning job is still running. + # if the fine-tuning job is still running. sig { returns(T.nilable(String)) } attr_accessor :fine_tuned_model # The Unix timestamp (in seconds) for when the fine-tuning job was finished. The - # value will be null if the fine-tuning job is still running. + # value will be null if the fine-tuning job is still running. sig { returns(T.nilable(Integer)) } attr_accessor :finished_at # The hyperparameters used for the fine-tuning job. This value will only be - # returned when running `supervised` jobs. + # returned when running `supervised` jobs. sig { returns(OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters) } attr_reader :hyperparameters @@ -61,8 +61,8 @@ module OpenAI attr_accessor :organization_id # The compiled results file ID(s) for the fine-tuning job. You can retrieve the - # results with the - # [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + # results with the + # [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). sig { returns(T::Array[String]) } attr_accessor :result_files @@ -71,28 +71,28 @@ module OpenAI attr_accessor :seed # The current status of the fine-tuning job, which can be either - # `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. + # `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. sig { returns(OpenAI::Models::FineTuning::FineTuningJob::Status::TaggedSymbol) } attr_accessor :status # The total number of billable tokens processed by this fine-tuning job. The value - # will be null if the fine-tuning job is still running. + # will be null if the fine-tuning job is still running. sig { returns(T.nilable(Integer)) } attr_accessor :trained_tokens # The file ID used for training. You can retrieve the training data with the - # [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + # [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). sig { returns(String) } attr_accessor :training_file # The file ID used for validation. You can retrieve the validation results with - # the - # [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + # the + # [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). sig { returns(T.nilable(String)) } attr_accessor :validation_file # The Unix timestamp (in seconds) for when the fine-tuning job is estimated to - # finish. The value will be null if the fine-tuning job is not running. + # finish. The value will be null if the fine-tuning job is not running. sig { returns(T.nilable(Integer)) } attr_accessor :estimated_finish @@ -101,11 +101,11 @@ module OpenAI attr_accessor :integrations # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata @@ -117,7 +117,7 @@ module OpenAI attr_writer :method_ # The `fine_tuning.job` object represents a fine-tuning job that has been created - # through the API. + # through the API. sig do params( id: String, @@ -203,12 +203,12 @@ module OpenAI attr_accessor :message # The parameter that was invalid, usually `training_file` or `validation_file`. - # This field will be null if the failure was not parameter-specific. + # This field will be null if the failure was not parameter-specific. sig { returns(T.nilable(String)) } attr_accessor :param # For fine-tuning jobs that have `failed`, this will contain more information on - # the cause of the failure. + # the cause of the failure. sig { params(code: String, message: String, param: T.nilable(String)).returns(T.attached_class) } def self.new(code:, message:, param:); end @@ -218,7 +218,7 @@ module OpenAI class Hyperparameters < OpenAI::Internal::Type::BaseModel # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. + # parameters are updated less frequently, but with lower variance. sig { returns(T.nilable(T.any(Symbol, Integer))) } attr_reader :batch_size @@ -226,7 +226,7 @@ module OpenAI attr_writer :batch_size # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. + # avoid overfitting. sig { returns(T.nilable(T.any(Symbol, Float))) } attr_reader :learning_rate_multiplier @@ -234,7 +234,7 @@ module OpenAI attr_writer :learning_rate_multiplier # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. + # through the training dataset. sig { returns(T.nilable(T.any(Symbol, Integer))) } attr_reader :n_epochs @@ -242,7 +242,7 @@ module OpenAI attr_writer :n_epochs # The hyperparameters used for the fine-tuning job. This value will only be - # returned when running `supervised` jobs. + # returned when running `supervised` jobs. sig do params( batch_size: T.any(Symbol, Integer), @@ -266,7 +266,7 @@ module OpenAI def to_hash; end # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. + # parameters are updated less frequently, but with lower variance. module BatchSize extend OpenAI::Internal::Type::Union @@ -275,7 +275,7 @@ module OpenAI end # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. + # avoid overfitting. module LearningRateMultiplier extend OpenAI::Internal::Type::Union @@ -284,7 +284,7 @@ module OpenAI end # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. + # through the training dataset. module NEpochs extend OpenAI::Internal::Type::Union @@ -294,7 +294,7 @@ module OpenAI end # The current status of the fine-tuning job, which can be either - # `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. + # `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. module Status extend OpenAI::Internal::Type::Enum @@ -394,7 +394,7 @@ module OpenAI class Hyperparameters < OpenAI::Internal::Type::BaseModel # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. + # parameters are updated less frequently, but with lower variance. sig { returns(T.nilable(T.any(Symbol, Integer))) } attr_reader :batch_size @@ -402,7 +402,7 @@ module OpenAI attr_writer :batch_size # The beta value for the DPO method. A higher beta value will increase the weight - # of the penalty between the policy and reference model. + # of the penalty between the policy and reference model. sig { returns(T.nilable(T.any(Symbol, Float))) } attr_reader :beta @@ -410,7 +410,7 @@ module OpenAI attr_writer :beta # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. + # avoid overfitting. sig { returns(T.nilable(T.any(Symbol, Float))) } attr_reader :learning_rate_multiplier @@ -418,7 +418,7 @@ module OpenAI attr_writer :learning_rate_multiplier # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. + # through the training dataset. sig { returns(T.nilable(T.any(Symbol, Integer))) } attr_reader :n_epochs @@ -451,7 +451,7 @@ module OpenAI def to_hash; end # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. + # parameters are updated less frequently, but with lower variance. module BatchSize extend OpenAI::Internal::Type::Union @@ -460,7 +460,7 @@ module OpenAI end # The beta value for the DPO method. A higher beta value will increase the weight - # of the penalty between the policy and reference model. + # of the penalty between the policy and reference model. module Beta extend OpenAI::Internal::Type::Union @@ -469,7 +469,7 @@ module OpenAI end # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. + # avoid overfitting. module LearningRateMultiplier extend OpenAI::Internal::Type::Union @@ -478,7 +478,7 @@ module OpenAI end # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. + # through the training dataset. module NEpochs extend OpenAI::Internal::Type::Union @@ -524,7 +524,7 @@ module OpenAI class Hyperparameters < OpenAI::Internal::Type::BaseModel # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. + # parameters are updated less frequently, but with lower variance. sig { returns(T.nilable(T.any(Symbol, Integer))) } attr_reader :batch_size @@ -532,7 +532,7 @@ module OpenAI attr_writer :batch_size # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. + # avoid overfitting. sig { returns(T.nilable(T.any(Symbol, Float))) } attr_reader :learning_rate_multiplier @@ -540,7 +540,7 @@ module OpenAI attr_writer :learning_rate_multiplier # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. + # through the training dataset. sig { returns(T.nilable(T.any(Symbol, Integer))) } attr_reader :n_epochs @@ -571,7 +571,7 @@ module OpenAI def to_hash; end # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. + # parameters are updated less frequently, but with lower variance. module BatchSize extend OpenAI::Internal::Type::Union @@ -580,7 +580,7 @@ module OpenAI end # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. + # avoid overfitting. module LearningRateMultiplier extend OpenAI::Internal::Type::Union @@ -589,7 +589,7 @@ module OpenAI end # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. + # through the training dataset. module NEpochs extend OpenAI::Internal::Type::Union diff --git a/rbi/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration.rbi b/rbi/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration.rbi index 717d084a..ba0eda55 100644 --- a/rbi/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration.rbi +++ b/rbi/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration.rbi @@ -9,19 +9,19 @@ module OpenAI attr_accessor :project # The entity to use for the run. This allows you to set the team or username of - # the WandB user that you would like associated with the run. If not set, the - # default entity for the registered WandB API key is used. + # the WandB user that you would like associated with the run. If not set, the + # default entity for the registered WandB API key is used. sig { returns(T.nilable(String)) } attr_accessor :entity # A display name to set for the run. If not set, we will use the Job ID as the - # name. + # name. sig { returns(T.nilable(String)) } attr_accessor :name # A list of tags to be attached to the newly created run. These tags are passed - # through directly to WandB. Some default tags are generated by OpenAI: - # "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". + # through directly to WandB. Some default tags are generated by OpenAI: + # "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". sig { returns(T.nilable(T::Array[String])) } attr_reader :tags @@ -29,9 +29,9 @@ module OpenAI attr_writer :tags # The settings for your integration with Weights and Biases. This payload - # specifies the project that metrics will be sent to. Optionally, you can set an - # explicit display name for your run, add tags to your run, and set a default - # entity (team, username, etc) to be associated with your run. + # specifies the project that metrics will be sent to. Optionally, you can set an + # explicit display name for your run, add tags to your run, and set a default + # entity (team, username, etc) to be associated with your run. sig do params(project: String, entity: T.nilable(String), name: T.nilable(String), tags: T::Array[String]) .returns(T.attached_class) diff --git a/rbi/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration_object.rbi b/rbi/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration_object.rbi index a40e4f11..5c771091 100644 --- a/rbi/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration_object.rbi +++ b/rbi/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration_object.rbi @@ -9,9 +9,9 @@ module OpenAI attr_accessor :type # The settings for your integration with Weights and Biases. This payload - # specifies the project that metrics will be sent to. Optionally, you can set an - # explicit display name for your run, add tags to your run, and set a default - # entity (team, username, etc) to be associated with your run. + # specifies the project that metrics will be sent to. Optionally, you can set an + # explicit display name for your run, add tags to your run, and set a default + # entity (team, username, etc) to be associated with your run. sig { returns(OpenAI::Models::FineTuning::FineTuningJobWandbIntegration) } attr_reader :wandb diff --git a/rbi/lib/openai/models/fine_tuning/job_create_params.rbi b/rbi/lib/openai/models/fine_tuning/job_create_params.rbi index 8a30d533..4acc7f38 100644 --- a/rbi/lib/openai/models/fine_tuning/job_create_params.rbi +++ b/rbi/lib/openai/models/fine_tuning/job_create_params.rbi @@ -8,32 +8,32 @@ module OpenAI include OpenAI::Internal::Type::RequestParameters # The name of the model to fine-tune. You can select one of the - # [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). + # [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). sig { returns(T.any(String, OpenAI::Models::FineTuning::JobCreateParams::Model::OrSymbol)) } attr_accessor :model # The ID of an uploaded file that contains training data. # - # See [upload file](https://platform.openai.com/docs/api-reference/files/create) - # for how to upload a file. + # See [upload file](https://platform.openai.com/docs/api-reference/files/create) + # for how to upload a file. # - # Your dataset must be formatted as a JSONL file. Additionally, you must upload - # your file with the purpose `fine-tune`. + # Your dataset must be formatted as a JSONL file. Additionally, you must upload + # your file with the purpose `fine-tune`. # - # The contents of the file should differ depending on if the model uses the - # [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input), - # [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) - # format, or if the fine-tuning method uses the - # [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) - # format. + # The contents of the file should differ depending on if the model uses the + # [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input), + # [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + # format, or if the fine-tuning method uses the + # [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) + # format. # - # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) - # for more details. + # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) + # for more details. sig { returns(String) } attr_accessor :training_file # The hyperparameters used for the fine-tuning job. This value is now deprecated - # in favor of `method`, and should be passed in under the `method` parameter. + # in favor of `method`, and should be passed in under the `method` parameter. sig { returns(T.nilable(OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters)) } attr_reader :hyperparameters @@ -50,11 +50,11 @@ module OpenAI attr_accessor :integrations # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata @@ -68,31 +68,31 @@ module OpenAI attr_writer :method_ # The seed controls the reproducibility of the job. Passing in the same seed and - # job parameters should produce the same results, but may differ in rare cases. If - # a seed is not specified, one will be generated for you. + # job parameters should produce the same results, but may differ in rare cases. If + # a seed is not specified, one will be generated for you. sig { returns(T.nilable(Integer)) } attr_accessor :seed # A string of up to 64 characters that will be added to your fine-tuned model - # name. + # name. # - # For example, a `suffix` of "custom-model-name" would produce a model name like - # `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. + # For example, a `suffix` of "custom-model-name" would produce a model name like + # `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. sig { returns(T.nilable(String)) } attr_accessor :suffix # The ID of an uploaded file that contains validation data. # - # If you provide this file, the data is used to generate validation metrics - # periodically during fine-tuning. These metrics can be viewed in the fine-tuning - # results file. The same data should not be present in both train and validation - # files. + # If you provide this file, the data is used to generate validation metrics + # periodically during fine-tuning. These metrics can be viewed in the fine-tuning + # results file. The same data should not be present in both train and validation + # files. # - # Your dataset must be formatted as a JSONL file. You must upload your file with - # the purpose `fine-tune`. + # Your dataset must be formatted as a JSONL file. You must upload your file with + # the purpose `fine-tune`. # - # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) - # for more details. + # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) + # for more details. sig { returns(T.nilable(String)) } attr_accessor :validation_file @@ -145,7 +145,7 @@ module OpenAI def to_hash; end # The name of the model to fine-tune. You can select one of the - # [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). + # [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). module Model extend OpenAI::Internal::Type::Union @@ -164,7 +164,7 @@ module OpenAI class Hyperparameters < OpenAI::Internal::Type::BaseModel # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. + # parameters are updated less frequently, but with lower variance. sig { returns(T.nilable(T.any(Symbol, Integer))) } attr_reader :batch_size @@ -172,7 +172,7 @@ module OpenAI attr_writer :batch_size # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. + # avoid overfitting. sig { returns(T.nilable(T.any(Symbol, Float))) } attr_reader :learning_rate_multiplier @@ -180,7 +180,7 @@ module OpenAI attr_writer :learning_rate_multiplier # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. + # through the training dataset. sig { returns(T.nilable(T.any(Symbol, Integer))) } attr_reader :n_epochs @@ -188,7 +188,7 @@ module OpenAI attr_writer :n_epochs # The hyperparameters used for the fine-tuning job. This value is now deprecated - # in favor of `method`, and should be passed in under the `method` parameter. + # in favor of `method`, and should be passed in under the `method` parameter. sig do params( batch_size: T.any(Symbol, Integer), @@ -212,7 +212,7 @@ module OpenAI def to_hash; end # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. + # parameters are updated less frequently, but with lower variance. module BatchSize extend OpenAI::Internal::Type::Union @@ -221,7 +221,7 @@ module OpenAI end # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. + # avoid overfitting. module LearningRateMultiplier extend OpenAI::Internal::Type::Union @@ -230,7 +230,7 @@ module OpenAI end # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. + # through the training dataset. module NEpochs extend OpenAI::Internal::Type::Union @@ -241,14 +241,14 @@ module OpenAI class Integration < OpenAI::Internal::Type::BaseModel # The type of integration to enable. Currently, only "wandb" (Weights and Biases) - # is supported. + # is supported. sig { returns(Symbol) } attr_accessor :type # The settings for your integration with Weights and Biases. This payload - # specifies the project that metrics will be sent to. Optionally, you can set an - # explicit display name for your run, add tags to your run, and set a default - # entity (team, username, etc) to be associated with your run. + # specifies the project that metrics will be sent to. Optionally, you can set an + # explicit display name for your run, add tags to your run, and set a default + # entity (team, username, etc) to be associated with your run. sig { returns(OpenAI::Models::FineTuning::JobCreateParams::Integration::Wandb) } attr_reader :wandb @@ -278,19 +278,19 @@ module OpenAI attr_accessor :project # The entity to use for the run. This allows you to set the team or username of - # the WandB user that you would like associated with the run. If not set, the - # default entity for the registered WandB API key is used. + # the WandB user that you would like associated with the run. If not set, the + # default entity for the registered WandB API key is used. sig { returns(T.nilable(String)) } attr_accessor :entity # A display name to set for the run. If not set, we will use the Job ID as the - # name. + # name. sig { returns(T.nilable(String)) } attr_accessor :name # A list of tags to be attached to the newly created run. These tags are passed - # through directly to WandB. Some default tags are generated by OpenAI: - # "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". + # through directly to WandB. Some default tags are generated by OpenAI: + # "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". sig { returns(T.nilable(T::Array[String])) } attr_reader :tags @@ -298,9 +298,9 @@ module OpenAI attr_writer :tags # The settings for your integration with Weights and Biases. This payload - # specifies the project that metrics will be sent to. Optionally, you can set an - # explicit display name for your run, add tags to your run, and set a default - # entity (team, username, etc) to be associated with your run. + # specifies the project that metrics will be sent to. Optionally, you can set an + # explicit display name for your run, add tags to your run, and set a default + # entity (team, username, etc) to be associated with your run. sig do params( project: String, @@ -414,7 +414,7 @@ module OpenAI class Hyperparameters < OpenAI::Internal::Type::BaseModel # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. + # parameters are updated less frequently, but with lower variance. sig { returns(T.nilable(T.any(Symbol, Integer))) } attr_reader :batch_size @@ -422,7 +422,7 @@ module OpenAI attr_writer :batch_size # The beta value for the DPO method. A higher beta value will increase the weight - # of the penalty between the policy and reference model. + # of the penalty between the policy and reference model. sig { returns(T.nilable(T.any(Symbol, Float))) } attr_reader :beta @@ -430,7 +430,7 @@ module OpenAI attr_writer :beta # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. + # avoid overfitting. sig { returns(T.nilable(T.any(Symbol, Float))) } attr_reader :learning_rate_multiplier @@ -438,7 +438,7 @@ module OpenAI attr_writer :learning_rate_multiplier # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. + # through the training dataset. sig { returns(T.nilable(T.any(Symbol, Integer))) } attr_reader :n_epochs @@ -471,7 +471,7 @@ module OpenAI def to_hash; end # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. + # parameters are updated less frequently, but with lower variance. module BatchSize extend OpenAI::Internal::Type::Union @@ -480,7 +480,7 @@ module OpenAI end # The beta value for the DPO method. A higher beta value will increase the weight - # of the penalty between the policy and reference model. + # of the penalty between the policy and reference model. module Beta extend OpenAI::Internal::Type::Union @@ -489,7 +489,7 @@ module OpenAI end # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. + # avoid overfitting. module LearningRateMultiplier extend OpenAI::Internal::Type::Union @@ -498,7 +498,7 @@ module OpenAI end # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. + # through the training dataset. module NEpochs extend OpenAI::Internal::Type::Union @@ -546,7 +546,7 @@ module OpenAI class Hyperparameters < OpenAI::Internal::Type::BaseModel # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. + # parameters are updated less frequently, but with lower variance. sig { returns(T.nilable(T.any(Symbol, Integer))) } attr_reader :batch_size @@ -554,7 +554,7 @@ module OpenAI attr_writer :batch_size # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. + # avoid overfitting. sig { returns(T.nilable(T.any(Symbol, Float))) } attr_reader :learning_rate_multiplier @@ -562,7 +562,7 @@ module OpenAI attr_writer :learning_rate_multiplier # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. + # through the training dataset. sig { returns(T.nilable(T.any(Symbol, Integer))) } attr_reader :n_epochs @@ -593,7 +593,7 @@ module OpenAI def to_hash; end # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. + # parameters are updated less frequently, but with lower variance. module BatchSize extend OpenAI::Internal::Type::Union @@ -602,7 +602,7 @@ module OpenAI end # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. + # avoid overfitting. module LearningRateMultiplier extend OpenAI::Internal::Type::Union @@ -611,7 +611,7 @@ module OpenAI end # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. + # through the training dataset. module NEpochs extend OpenAI::Internal::Type::Union diff --git a/rbi/lib/openai/models/fine_tuning/job_list_params.rbi b/rbi/lib/openai/models/fine_tuning/job_list_params.rbi index 7b8bea48..49bc0f41 100644 --- a/rbi/lib/openai/models/fine_tuning/job_list_params.rbi +++ b/rbi/lib/openai/models/fine_tuning/job_list_params.rbi @@ -22,7 +22,7 @@ module OpenAI attr_writer :limit # Optional metadata filter. To filter, use the syntax `metadata[k]=v`. - # Alternatively, set `metadata=null` to indicate no metadata. + # Alternatively, set `metadata=null` to indicate no metadata. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata diff --git a/rbi/lib/openai/models/fine_tuning/jobs/fine_tuning_job_checkpoint.rbi b/rbi/lib/openai/models/fine_tuning/jobs/fine_tuning_job_checkpoint.rbi index 06fb98b6..7c253eca 100644 --- a/rbi/lib/openai/models/fine_tuning/jobs/fine_tuning_job_checkpoint.rbi +++ b/rbi/lib/openai/models/fine_tuning/jobs/fine_tuning_job_checkpoint.rbi @@ -42,7 +42,7 @@ module OpenAI attr_accessor :step_number # The `fine_tuning.job.checkpoint` object represents a model checkpoint for a - # fine-tuning job that is ready to use. + # fine-tuning job that is ready to use. sig do params( id: String, diff --git a/rbi/lib/openai/models/function_definition.rbi b/rbi/lib/openai/models/function_definition.rbi index cb9eeb56..0351f239 100644 --- a/rbi/lib/openai/models/function_definition.rbi +++ b/rbi/lib/openai/models/function_definition.rbi @@ -4,12 +4,12 @@ module OpenAI module Models class FunctionDefinition < OpenAI::Internal::Type::BaseModel # The name of the function to be called. Must be a-z, A-Z, 0-9, or contain - # underscores and dashes, with a maximum length of 64. + # underscores and dashes, with a maximum length of 64. sig { returns(String) } attr_accessor :name # A description of what the function does, used by the model to choose when and - # how to call the function. + # how to call the function. sig { returns(T.nilable(String)) } attr_reader :description @@ -17,12 +17,12 @@ module OpenAI attr_writer :description # The parameters the functions accepts, described as a JSON Schema object. See the - # [guide](https://platform.openai.com/docs/guides/function-calling) for examples, - # and the - # [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for - # documentation about the format. + # [guide](https://platform.openai.com/docs/guides/function-calling) for examples, + # and the + # [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for + # documentation about the format. # - # Omitting `parameters` defines a function with an empty parameter list. + # Omitting `parameters` defines a function with an empty parameter list. sig { returns(T.nilable(T::Hash[Symbol, T.anything])) } attr_reader :parameters @@ -30,10 +30,10 @@ module OpenAI attr_writer :parameters # Whether to enable strict schema adherence when generating the function call. If - # set to true, the model will follow the exact schema defined in the `parameters` - # field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn - # more about Structured Outputs in the - # [function calling guide](docs/guides/function-calling). + # set to true, the model will follow the exact schema defined in the `parameters` + # field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn + # more about Structured Outputs in the + # [function calling guide](docs/guides/function-calling). sig { returns(T.nilable(T::Boolean)) } attr_accessor :strict diff --git a/rbi/lib/openai/models/image.rbi b/rbi/lib/openai/models/image.rbi index 9469de4c..3a2f3c67 100644 --- a/rbi/lib/openai/models/image.rbi +++ b/rbi/lib/openai/models/image.rbi @@ -4,7 +4,7 @@ module OpenAI module Models class Image < OpenAI::Internal::Type::BaseModel # The base64-encoded JSON of the generated image, if `response_format` is - # `b64_json`. + # `b64_json`. sig { returns(T.nilable(String)) } attr_reader :b64_json @@ -12,7 +12,7 @@ module OpenAI attr_writer :b64_json # The prompt that was used to generate the image, if there was any revision to the - # prompt. + # prompt. sig { returns(T.nilable(String)) } attr_reader :revised_prompt diff --git a/rbi/lib/openai/models/image_create_variation_params.rbi b/rbi/lib/openai/models/image_create_variation_params.rbi index 9a1061c3..59f2bd84 100644 --- a/rbi/lib/openai/models/image_create_variation_params.rbi +++ b/rbi/lib/openai/models/image_create_variation_params.rbi @@ -7,34 +7,34 @@ module OpenAI include OpenAI::Internal::Type::RequestParameters # The image to use as the basis for the variation(s). Must be a valid PNG file, - # less than 4MB, and square. + # less than 4MB, and square. sig { returns(T.any(IO, StringIO)) } attr_accessor :image # The model to use for image generation. Only `dall-e-2` is supported at this - # time. + # time. sig { returns(T.nilable(T.any(String, OpenAI::Models::ImageModel::OrSymbol))) } attr_accessor :model # The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only - # `n=1` is supported. + # `n=1` is supported. sig { returns(T.nilable(Integer)) } attr_accessor :n # The format in which the generated images are returned. Must be one of `url` or - # `b64_json`. URLs are only valid for 60 minutes after the image has been - # generated. + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. sig { returns(T.nilable(OpenAI::Models::ImageCreateVariationParams::ResponseFormat::OrSymbol)) } attr_accessor :response_format # The size of the generated images. Must be one of `256x256`, `512x512`, or - # `1024x1024`. + # `1024x1024`. sig { returns(T.nilable(OpenAI::Models::ImageCreateVariationParams::Size::OrSymbol)) } attr_accessor :size # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). sig { returns(T.nilable(String)) } attr_reader :user @@ -81,7 +81,7 @@ module OpenAI def to_hash; end # The model to use for image generation. Only `dall-e-2` is supported at this - # time. + # time. module Model extend OpenAI::Internal::Type::Union @@ -90,8 +90,8 @@ module OpenAI end # The format in which the generated images are returned. Must be one of `url` or - # `b64_json`. URLs are only valid for 60 minutes after the image has been - # generated. + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. module ResponseFormat extend OpenAI::Internal::Type::Enum @@ -107,7 +107,7 @@ module OpenAI end # The size of the generated images. Must be one of `256x256`, `512x512`, or - # `1024x1024`. + # `1024x1024`. module Size extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/image_edit_params.rbi b/rbi/lib/openai/models/image_edit_params.rbi index 58bf5ddd..8d4b36be 100644 --- a/rbi/lib/openai/models/image_edit_params.rbi +++ b/rbi/lib/openai/models/image_edit_params.rbi @@ -7,18 +7,18 @@ module OpenAI include OpenAI::Internal::Type::RequestParameters # The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask - # is not provided, image must have transparency, which will be used as the mask. + # is not provided, image must have transparency, which will be used as the mask. sig { returns(T.any(IO, StringIO)) } attr_accessor :image # A text description of the desired image(s). The maximum length is 1000 - # characters. + # characters. sig { returns(String) } attr_accessor :prompt # An additional image whose fully transparent areas (e.g. where alpha is zero) - # indicate where `image` should be edited. Must be a valid PNG file, less than - # 4MB, and have the same dimensions as `image`. + # indicate where `image` should be edited. Must be a valid PNG file, less than + # 4MB, and have the same dimensions as `image`. sig { returns(T.nilable(T.any(IO, StringIO))) } attr_reader :mask @@ -26,7 +26,7 @@ module OpenAI attr_writer :mask # The model to use for image generation. Only `dall-e-2` is supported at this - # time. + # time. sig { returns(T.nilable(T.any(String, OpenAI::Models::ImageModel::OrSymbol))) } attr_accessor :model @@ -35,19 +35,19 @@ module OpenAI attr_accessor :n # The format in which the generated images are returned. Must be one of `url` or - # `b64_json`. URLs are only valid for 60 minutes after the image has been - # generated. + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. sig { returns(T.nilable(OpenAI::Models::ImageEditParams::ResponseFormat::OrSymbol)) } attr_accessor :response_format # The size of the generated images. Must be one of `256x256`, `512x512`, or - # `1024x1024`. + # `1024x1024`. sig { returns(T.nilable(OpenAI::Models::ImageEditParams::Size::OrSymbol)) } attr_accessor :size # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). sig { returns(T.nilable(String)) } attr_reader :user @@ -98,7 +98,7 @@ module OpenAI def to_hash; end # The model to use for image generation. Only `dall-e-2` is supported at this - # time. + # time. module Model extend OpenAI::Internal::Type::Union @@ -107,8 +107,8 @@ module OpenAI end # The format in which the generated images are returned. Must be one of `url` or - # `b64_json`. URLs are only valid for 60 minutes after the image has been - # generated. + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. module ResponseFormat extend OpenAI::Internal::Type::Enum @@ -124,7 +124,7 @@ module OpenAI end # The size of the generated images. Must be one of `256x256`, `512x512`, or - # `1024x1024`. + # `1024x1024`. module Size extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/image_generate_params.rbi b/rbi/lib/openai/models/image_generate_params.rbi index b03fd35e..c10c716c 100644 --- a/rbi/lib/openai/models/image_generate_params.rbi +++ b/rbi/lib/openai/models/image_generate_params.rbi @@ -7,7 +7,7 @@ module OpenAI include OpenAI::Internal::Type::RequestParameters # A text description of the desired image(s). The maximum length is 1000 - # characters for `dall-e-2` and 4000 characters for `dall-e-3`. + # characters for `dall-e-2` and 4000 characters for `dall-e-3`. sig { returns(String) } attr_accessor :prompt @@ -16,13 +16,13 @@ module OpenAI attr_accessor :model # The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only - # `n=1` is supported. + # `n=1` is supported. sig { returns(T.nilable(Integer)) } attr_accessor :n # The quality of the image that will be generated. `hd` creates images with finer - # details and greater consistency across the image. This param is only supported - # for `dall-e-3`. + # details and greater consistency across the image. This param is only supported + # for `dall-e-3`. sig { returns(T.nilable(OpenAI::Models::ImageGenerateParams::Quality::OrSymbol)) } attr_reader :quality @@ -30,27 +30,27 @@ module OpenAI attr_writer :quality # The format in which the generated images are returned. Must be one of `url` or - # `b64_json`. URLs are only valid for 60 minutes after the image has been - # generated. + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. sig { returns(T.nilable(OpenAI::Models::ImageGenerateParams::ResponseFormat::OrSymbol)) } attr_accessor :response_format # The size of the generated images. Must be one of `256x256`, `512x512`, or - # `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or - # `1024x1792` for `dall-e-3` models. + # `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or + # `1024x1792` for `dall-e-3` models. sig { returns(T.nilable(OpenAI::Models::ImageGenerateParams::Size::OrSymbol)) } attr_accessor :size # The style of the generated images. Must be one of `vivid` or `natural`. Vivid - # causes the model to lean towards generating hyper-real and dramatic images. - # Natural causes the model to produce more natural, less hyper-real looking - # images. This param is only supported for `dall-e-3`. + # causes the model to lean towards generating hyper-real and dramatic images. + # Natural causes the model to produce more natural, less hyper-real looking + # images. This param is only supported for `dall-e-3`. sig { returns(T.nilable(OpenAI::Models::ImageGenerateParams::Style::OrSymbol)) } attr_accessor :style # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). sig { returns(T.nilable(String)) } attr_reader :user @@ -109,8 +109,8 @@ module OpenAI end # The quality of the image that will be generated. `hd` creates images with finer - # details and greater consistency across the image. This param is only supported - # for `dall-e-3`. + # details and greater consistency across the image. This param is only supported + # for `dall-e-3`. module Quality extend OpenAI::Internal::Type::Enum @@ -126,8 +126,8 @@ module OpenAI end # The format in which the generated images are returned. Must be one of `url` or - # `b64_json`. URLs are only valid for 60 minutes after the image has been - # generated. + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. module ResponseFormat extend OpenAI::Internal::Type::Enum @@ -143,8 +143,8 @@ module OpenAI end # The size of the generated images. Must be one of `256x256`, `512x512`, or - # `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or - # `1024x1792` for `dall-e-3` models. + # `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or + # `1024x1792` for `dall-e-3` models. module Size extend OpenAI::Internal::Type::Enum @@ -163,9 +163,9 @@ module OpenAI end # The style of the generated images. Must be one of `vivid` or `natural`. Vivid - # causes the model to lean towards generating hyper-real and dramatic images. - # Natural causes the model to produce more natural, less hyper-real looking - # images. This param is only supported for `dall-e-3`. + # causes the model to lean towards generating hyper-real and dramatic images. + # Natural causes the model to produce more natural, less hyper-real looking + # images. This param is only supported for `dall-e-3`. module Style extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/moderation.rbi b/rbi/lib/openai/models/moderation.rbi index 0850f88c..03bc7cb0 100644 --- a/rbi/lib/openai/models/moderation.rbi +++ b/rbi/lib/openai/models/moderation.rbi @@ -59,59 +59,59 @@ module OpenAI class Categories < OpenAI::Internal::Type::BaseModel # Content that expresses, incites, or promotes harassing language towards any - # target. + # target. sig { returns(T::Boolean) } attr_accessor :harassment # Harassment content that also includes violence or serious harm towards any - # target. + # target. sig { returns(T::Boolean) } attr_accessor :harassment_threatening # Content that expresses, incites, or promotes hate based on race, gender, - # ethnicity, religion, nationality, sexual orientation, disability status, or - # caste. Hateful content aimed at non-protected groups (e.g., chess players) is - # harassment. + # ethnicity, religion, nationality, sexual orientation, disability status, or + # caste. Hateful content aimed at non-protected groups (e.g., chess players) is + # harassment. sig { returns(T::Boolean) } attr_accessor :hate # Hateful content that also includes violence or serious harm towards the targeted - # group based on race, gender, ethnicity, religion, nationality, sexual - # orientation, disability status, or caste. + # group based on race, gender, ethnicity, religion, nationality, sexual + # orientation, disability status, or caste. sig { returns(T::Boolean) } attr_accessor :hate_threatening # Content that includes instructions or advice that facilitate the planning or - # execution of wrongdoing, or that gives advice or instruction on how to commit - # illicit acts. For example, "how to shoplift" would fit this category. + # execution of wrongdoing, or that gives advice or instruction on how to commit + # illicit acts. For example, "how to shoplift" would fit this category. sig { returns(T.nilable(T::Boolean)) } attr_accessor :illicit # Content that includes instructions or advice that facilitate the planning or - # execution of wrongdoing that also includes violence, or that gives advice or - # instruction on the procurement of any weapon. + # execution of wrongdoing that also includes violence, or that gives advice or + # instruction on the procurement of any weapon. sig { returns(T.nilable(T::Boolean)) } attr_accessor :illicit_violent # Content that promotes, encourages, or depicts acts of self-harm, such as - # suicide, cutting, and eating disorders. + # suicide, cutting, and eating disorders. sig { returns(T::Boolean) } attr_accessor :self_harm # Content that encourages performing acts of self-harm, such as suicide, cutting, - # and eating disorders, or that gives instructions or advice on how to commit such - # acts. + # and eating disorders, or that gives instructions or advice on how to commit such + # acts. sig { returns(T::Boolean) } attr_accessor :self_harm_instructions # Content where the speaker expresses that they are engaging or intend to engage - # in acts of self-harm, such as suicide, cutting, and eating disorders. + # in acts of self-harm, such as suicide, cutting, and eating disorders. sig { returns(T::Boolean) } attr_accessor :self_harm_intent # Content meant to arouse sexual excitement, such as the description of sexual - # activity, or that promotes sexual services (excluding sex education and - # wellness). + # activity, or that promotes sexual services (excluding sex education and + # wellness). sig { returns(T::Boolean) } attr_accessor :sexual diff --git a/rbi/lib/openai/models/moderation_create_params.rbi b/rbi/lib/openai/models/moderation_create_params.rbi index 2c12c369..1e4e920e 100644 --- a/rbi/lib/openai/models/moderation_create_params.rbi +++ b/rbi/lib/openai/models/moderation_create_params.rbi @@ -7,7 +7,7 @@ module OpenAI include OpenAI::Internal::Type::RequestParameters # Input (or inputs) to classify. Can be a single string, an array of strings, or - # an array of multi-modal input objects similar to other models. + # an array of multi-modal input objects similar to other models. sig do returns( T.any( @@ -20,9 +20,9 @@ module OpenAI attr_accessor :input # The content moderation model you would like to use. Learn more in - # [the moderation guide](https://platform.openai.com/docs/guides/moderation), and - # learn about available models - # [here](https://platform.openai.com/docs/models#moderation). + # [the moderation guide](https://platform.openai.com/docs/guides/moderation), and + # learn about available models + # [here](https://platform.openai.com/docs/models#moderation). sig { returns(T.nilable(T.any(String, OpenAI::Models::ModerationModel::OrSymbol))) } attr_reader :model @@ -66,7 +66,7 @@ module OpenAI def to_hash; end # Input (or inputs) to classify. Can be a single string, an array of strings, or - # an array of multi-modal input objects similar to other models. + # an array of multi-modal input objects similar to other models. module Input extend OpenAI::Internal::Type::Union @@ -88,9 +88,9 @@ module OpenAI end # The content moderation model you would like to use. Learn more in - # [the moderation guide](https://platform.openai.com/docs/guides/moderation), and - # learn about available models - # [here](https://platform.openai.com/docs/models#moderation). + # [the moderation guide](https://platform.openai.com/docs/guides/moderation), and + # learn about available models + # [here](https://platform.openai.com/docs/models#moderation). module Model extend OpenAI::Internal::Type::Union diff --git a/rbi/lib/openai/models/other_file_chunking_strategy_object.rbi b/rbi/lib/openai/models/other_file_chunking_strategy_object.rbi index 10b6f90a..0d6d06b0 100644 --- a/rbi/lib/openai/models/other_file_chunking_strategy_object.rbi +++ b/rbi/lib/openai/models/other_file_chunking_strategy_object.rbi @@ -8,8 +8,8 @@ module OpenAI attr_accessor :type # This is returned when the chunking strategy is unknown. Typically, this is - # because the file was indexed before the `chunking_strategy` concept was - # introduced in the API. + # because the file was indexed before the `chunking_strategy` concept was + # introduced in the API. sig { params(type: Symbol).returns(T.attached_class) } def self.new(type: :other); end diff --git a/rbi/lib/openai/models/reasoning.rbi b/rbi/lib/openai/models/reasoning.rbi index f16ebf77..ec1c7ac3 100644 --- a/rbi/lib/openai/models/reasoning.rbi +++ b/rbi/lib/openai/models/reasoning.rbi @@ -5,25 +5,25 @@ module OpenAI class Reasoning < OpenAI::Internal::Type::BaseModel # **o-series models only** # - # Constrains effort on reasoning for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. sig { returns(T.nilable(OpenAI::Models::ReasoningEffort::OrSymbol)) } attr_accessor :effort # **computer_use_preview only** # - # A summary of the reasoning performed by the model. This can be useful for - # debugging and understanding the model's reasoning process. One of `concise` or - # `detailed`. + # A summary of the reasoning performed by the model. This can be useful for + # debugging and understanding the model's reasoning process. One of `concise` or + # `detailed`. sig { returns(T.nilable(OpenAI::Models::Reasoning::GenerateSummary::OrSymbol)) } attr_accessor :generate_summary # **o-series models only** # - # Configuration options for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). + # Configuration options for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). sig do params( effort: T.nilable(OpenAI::Models::ReasoningEffort::OrSymbol), @@ -46,9 +46,9 @@ module OpenAI # **computer_use_preview only** # - # A summary of the reasoning performed by the model. This can be useful for - # debugging and understanding the model's reasoning process. One of `concise` or - # `detailed`. + # A summary of the reasoning performed by the model. This can be useful for + # debugging and understanding the model's reasoning process. One of `concise` or + # `detailed`. module GenerateSummary extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/reasoning_effort.rbi b/rbi/lib/openai/models/reasoning_effort.rbi index 723cc106..1459f48b 100644 --- a/rbi/lib/openai/models/reasoning_effort.rbi +++ b/rbi/lib/openai/models/reasoning_effort.rbi @@ -4,10 +4,10 @@ module OpenAI module Models # **o-series models only** # - # Constrains effort on reasoning for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. module ReasoningEffort extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/response_format_json_object.rbi b/rbi/lib/openai/models/response_format_json_object.rbi index 707ef81c..caf63a9c 100644 --- a/rbi/lib/openai/models/response_format_json_object.rbi +++ b/rbi/lib/openai/models/response_format_json_object.rbi @@ -8,8 +8,8 @@ module OpenAI attr_accessor :type # JSON object response format. An older method of generating JSON responses. Using - # `json_schema` is recommended for models that support it. Note that the model - # will not generate JSON without a system or user message instructing it to do so. + # `json_schema` is recommended for models that support it. Note that the model + # will not generate JSON without a system or user message instructing it to do so. sig { params(type: Symbol).returns(T.attached_class) } def self.new(type: :json_object); end diff --git a/rbi/lib/openai/models/response_format_json_schema.rbi b/rbi/lib/openai/models/response_format_json_schema.rbi index 735f2c54..26bae2de 100644 --- a/rbi/lib/openai/models/response_format_json_schema.rbi +++ b/rbi/lib/openai/models/response_format_json_schema.rbi @@ -20,8 +20,8 @@ module OpenAI attr_accessor :type # JSON Schema response format. Used to generate structured JSON responses. Learn - # more about - # [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). + # more about + # [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). sig do params( json_schema: T.any(OpenAI::Models::ResponseFormatJSONSchema::JSONSchema, OpenAI::Internal::AnyHash), @@ -36,12 +36,12 @@ module OpenAI class JSONSchema < OpenAI::Internal::Type::BaseModel # The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores - # and dashes, with a maximum length of 64. + # and dashes, with a maximum length of 64. sig { returns(String) } attr_accessor :name # A description of what the response format is for, used by the model to determine - # how to respond in the format. + # how to respond in the format. sig { returns(T.nilable(String)) } attr_reader :description @@ -49,7 +49,7 @@ module OpenAI attr_writer :description # The schema for the response format, described as a JSON Schema object. Learn how - # to build JSON schemas [here](https://json-schema.org/). + # to build JSON schemas [here](https://json-schema.org/). sig { returns(T.nilable(T::Hash[Symbol, T.anything])) } attr_reader :schema @@ -57,10 +57,10 @@ module OpenAI attr_writer :schema # Whether to enable strict schema adherence when generating the output. If set to - # true, the model will always follow the exact schema defined in the `schema` - # field. Only a subset of JSON Schema is supported when `strict` is `true`. To - # learn more, read the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # true, the model will always follow the exact schema defined in the `schema` + # field. Only a subset of JSON Schema is supported when `strict` is `true`. To + # learn more, read the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). sig { returns(T.nilable(T::Boolean)) } attr_accessor :strict diff --git a/rbi/lib/openai/models/responses/computer_tool.rbi b/rbi/lib/openai/models/responses/computer_tool.rbi index 88894312..c20479e3 100644 --- a/rbi/lib/openai/models/responses/computer_tool.rbi +++ b/rbi/lib/openai/models/responses/computer_tool.rbi @@ -21,7 +21,7 @@ module OpenAI attr_accessor :type # A tool that controls a virtual computer. Learn more about the - # [computer tool](https://platform.openai.com/docs/guides/tools-computer-use). + # [computer tool](https://platform.openai.com/docs/guides/tools-computer-use). sig do params( display_height: Float, diff --git a/rbi/lib/openai/models/responses/easy_input_message.rbi b/rbi/lib/openai/models/responses/easy_input_message.rbi index e1819f46..6f2f6316 100644 --- a/rbi/lib/openai/models/responses/easy_input_message.rbi +++ b/rbi/lib/openai/models/responses/easy_input_message.rbi @@ -5,7 +5,7 @@ module OpenAI module Responses class EasyInputMessage < OpenAI::Internal::Type::BaseModel # Text, image, or audio input to the model, used to generate a response. Can also - # contain previous assistant responses. + # contain previous assistant responses. sig do returns( T.any( @@ -23,7 +23,7 @@ module OpenAI attr_accessor :content # The role of the message input. One of `user`, `assistant`, `system`, or - # `developer`. + # `developer`. sig { returns(OpenAI::Models::Responses::EasyInputMessage::Role::OrSymbol) } attr_accessor :role @@ -35,10 +35,10 @@ module OpenAI attr_writer :type # A message input to the model with a role indicating instruction following - # hierarchy. Instructions given with the `developer` or `system` role take - # precedence over instructions given with the `user` role. Messages with the - # `assistant` role are presumed to have been generated by the model in previous - # interactions. + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. sig do params( content: T.any( @@ -81,7 +81,7 @@ module OpenAI def to_hash; end # Text, image, or audio input to the model, used to generate a response. Can also - # contain previous assistant responses. + # contain previous assistant responses. module Content extend OpenAI::Internal::Type::Union @@ -104,7 +104,7 @@ module OpenAI end # The role of the message input. One of `user`, `assistant`, `system`, or - # `developer`. + # `developer`. module Role extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/responses/file_search_tool.rbi b/rbi/lib/openai/models/responses/file_search_tool.rbi index 37314e5d..b16985a4 100644 --- a/rbi/lib/openai/models/responses/file_search_tool.rbi +++ b/rbi/lib/openai/models/responses/file_search_tool.rbi @@ -25,7 +25,7 @@ module OpenAI attr_writer :filters # The maximum number of results to return. This number should be between 1 and 50 - # inclusive. + # inclusive. sig { returns(T.nilable(Integer)) } attr_reader :max_num_results @@ -45,8 +45,8 @@ module OpenAI attr_writer :ranking_options # A tool that searches for relevant content from uploaded files. Learn more about - # the - # [file search tool](https://platform.openai.com/docs/guides/tools-file-search). + # the + # [file search tool](https://platform.openai.com/docs/guides/tools-file-search). sig do params( vector_store_ids: T::Array[String], @@ -97,8 +97,8 @@ module OpenAI attr_writer :ranker # The score threshold for the file search, a number between 0 and 1. Numbers - # closer to 1 will attempt to return only the most relevant results, but may - # return fewer results. + # closer to 1 will attempt to return only the most relevant results, but may + # return fewer results. sig { returns(T.nilable(Float)) } attr_reader :score_threshold diff --git a/rbi/lib/openai/models/responses/function_tool.rbi b/rbi/lib/openai/models/responses/function_tool.rbi index 16ed2997..5d1008c2 100644 --- a/rbi/lib/openai/models/responses/function_tool.rbi +++ b/rbi/lib/openai/models/responses/function_tool.rbi @@ -21,13 +21,13 @@ module OpenAI attr_accessor :type # A description of the function. Used by the model to determine whether or not to - # call the function. + # call the function. sig { returns(T.nilable(String)) } attr_accessor :description # Defines a function in your own code the model can choose to call. Learn more - # about - # [function calling](https://platform.openai.com/docs/guides/function-calling). + # about + # [function calling](https://platform.openai.com/docs/guides/function-calling). sig do params( name: String, diff --git a/rbi/lib/openai/models/responses/input_item_list_params.rbi b/rbi/lib/openai/models/responses/input_item_list_params.rbi index 83fef973..302e530f 100644 --- a/rbi/lib/openai/models/responses/input_item_list_params.rbi +++ b/rbi/lib/openai/models/responses/input_item_list_params.rbi @@ -22,7 +22,7 @@ module OpenAI attr_writer :before # Additional fields to include in the response. See the `include` parameter for - # Response creation above for more information. + # Response creation above for more information. sig { returns(T.nilable(T::Array[OpenAI::Models::Responses::ResponseIncludable::OrSymbol])) } attr_reader :include @@ -30,7 +30,7 @@ module OpenAI attr_writer :include # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. sig { returns(T.nilable(Integer)) } attr_reader :limit @@ -39,8 +39,8 @@ module OpenAI # The order to return the input items in. Default is `asc`. # - # - `asc`: Return the input items in ascending order. - # - `desc`: Return the input items in descending order. + # - `asc`: Return the input items in ascending order. + # - `desc`: Return the input items in descending order. sig { returns(T.nilable(OpenAI::Models::Responses::InputItemListParams::Order::OrSymbol)) } attr_reader :order @@ -77,8 +77,8 @@ module OpenAI # The order to return the input items in. Default is `asc`. # - # - `asc`: Return the input items in ascending order. - # - `desc`: Return the input items in descending order. + # - `asc`: Return the input items in ascending order. + # - `desc`: Return the input items in descending order. module Order extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/responses/response.rbi b/rbi/lib/openai/models/responses/response.rbi index 99dd9aae..51372d8d 100644 --- a/rbi/lib/openai/models/responses/response.rbi +++ b/rbi/lib/openai/models/responses/response.rbi @@ -32,28 +32,28 @@ module OpenAI attr_writer :incomplete_details # Inserts a system (or developer) message as the first item in the model's - # context. + # context. # - # When using along with `previous_response_id`, the instructions from a previous - # response will not be carried over to the next response. This makes it simple to - # swap out system (or developer) messages in new responses. + # When using along with `previous_response_id`, the instructions from a previous + # response will not be carried over to the next response. This makes it simple to + # swap out system (or developer) messages in new responses. sig { returns(T.nilable(String)) } attr_accessor :instructions # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a - # wide range of models with different capabilities, performance characteristics, - # and price points. Refer to the - # [model guide](https://platform.openai.com/docs/models) to browse and compare - # available models. + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. sig do returns( T.any(String, OpenAI::Models::ChatModel::TaggedSymbol, OpenAI::Models::ResponsesModel::TaggedSymbol) @@ -67,11 +67,11 @@ module OpenAI # An array of content items generated by the model. # - # - The length and order of items in the `output` array is dependent on the - # model's response. - # - Rather than accessing the first item in the `output` array and assuming it's - # an `assistant` message with the content generated by the model, you might - # consider using the `output_text` property where supported in SDKs. + # - The length and order of items in the `output` array is dependent on the + # model's response. + # - Rather than accessing the first item in the `output` array and assuming it's + # an `assistant` message with the content generated by the model, you might + # consider using the `output_text` property where supported in SDKs. sig do returns( T::Array[ @@ -93,15 +93,15 @@ module OpenAI attr_accessor :parallel_tool_calls # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. We generally recommend altering this or `top_p` but - # not both. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. sig { returns(T.nilable(Float)) } attr_accessor :temperature # How the model should select which tool (or tools) to use when generating a - # response. See the `tools` parameter to see how to specify which tools the model - # can call. + # response. See the `tools` parameter to see how to specify which tools the model + # can call. sig do returns( T.any( @@ -114,19 +114,19 @@ module OpenAI attr_accessor :tool_choice # An array of tools the model may call while generating a response. You can - # specify which tool to use by setting the `tool_choice` parameter. + # specify which tool to use by setting the `tool_choice` parameter. # - # The two categories of tools you can provide the model are: + # The two categories of tools you can provide the model are: # - # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's - # capabilities, like - # [web search](https://platform.openai.com/docs/guides/tools-web-search) or - # [file search](https://platform.openai.com/docs/guides/tools-file-search). - # Learn more about - # [built-in tools](https://platform.openai.com/docs/guides/tools). - # - **Function calls (custom tools)**: Functions that are defined by you, enabling - # the model to call your own code. Learn more about - # [function calling](https://platform.openai.com/docs/guides/function-calling). + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code. Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). sig do returns( T::Array[ @@ -142,29 +142,29 @@ module OpenAI attr_accessor :tools # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or `temperature` but not both. + # We generally recommend altering this or `temperature` but not both. sig { returns(T.nilable(Float)) } attr_accessor :top_p # An upper bound for the number of tokens that can be generated for a response, - # including visible output tokens and - # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). sig { returns(T.nilable(Integer)) } attr_accessor :max_output_tokens # The unique ID of the previous response to the model. Use this to create - # multi-turn conversations. Learn more about - # [conversation state](https://platform.openai.com/docs/guides/conversation-state). + # multi-turn conversations. Learn more about + # [conversation state](https://platform.openai.com/docs/guides/conversation-state). sig { returns(T.nilable(String)) } attr_accessor :previous_response_id # **o-series models only** # - # Configuration options for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). + # Configuration options for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). sig { returns(T.nilable(OpenAI::Models::Reasoning)) } attr_reader :reasoning @@ -172,7 +172,7 @@ module OpenAI attr_writer :reasoning # The status of the response generation. One of `completed`, `failed`, - # `in_progress`, or `incomplete`. + # `in_progress`, or `incomplete`. sig { returns(T.nilable(OpenAI::Models::Responses::ResponseStatus::TaggedSymbol)) } attr_reader :status @@ -180,10 +180,10 @@ module OpenAI attr_writer :status # Configuration options for a text response from the model. Can be plain text or - # structured JSON data. Learn more: + # structured JSON data. Learn more: # - # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) sig { returns(T.nilable(OpenAI::Models::Responses::ResponseTextConfig)) } attr_reader :text @@ -192,16 +192,16 @@ module OpenAI # The truncation strategy to use for the model response. # - # - `auto`: If the context of this response and previous ones exceeds the model's - # context window size, the model will truncate the response to fit the context - # window by dropping input items in the middle of the conversation. - # - `disabled` (default): If a model response will exceed the context window size - # for a model, the request will fail with a 400 error. + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. sig { returns(T.nilable(OpenAI::Models::Responses::Response::Truncation::TaggedSymbol)) } attr_accessor :truncation # Represents token usage details including input tokens, output tokens, a - # breakdown of output tokens, and the total tokens used. + # breakdown of output tokens, and the total tokens used. sig { returns(T.nilable(OpenAI::Models::Responses::ResponseUsage)) } attr_reader :usage @@ -209,8 +209,8 @@ module OpenAI attr_writer :usage # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). sig { returns(T.nilable(String)) } attr_reader :user @@ -380,8 +380,8 @@ module OpenAI end # How the model should select which tool (or tools) to use when generating a - # response. See the `tools` parameter to see how to specify which tools the model - # can call. + # response. See the `tools` parameter to see how to specify which tools the model + # can call. module ToolChoice extend OpenAI::Internal::Type::Union @@ -396,11 +396,11 @@ module OpenAI # The truncation strategy to use for the model response. # - # - `auto`: If the context of this response and previous ones exceeds the model's - # context window size, the model will truncate the response to fit the context - # window by dropping input items in the middle of the conversation. - # - `disabled` (default): If a model response will exceed the context window size - # for a model, the request will fail with a 400 error. + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. module Truncation extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/responses/response_computer_tool_call.rbi b/rbi/lib/openai/models/responses/response_computer_tool_call.rbi index ccc7cf95..ba86d36e 100644 --- a/rbi/lib/openai/models/responses/response_computer_tool_call.rbi +++ b/rbi/lib/openai/models/responses/response_computer_tool_call.rbi @@ -35,7 +35,7 @@ module OpenAI attr_accessor :pending_safety_checks # The status of the item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. + # Populated when items are returned via API. sig { returns(OpenAI::Models::Responses::ResponseComputerToolCall::Status::OrSymbol) } attr_accessor :status @@ -44,8 +44,8 @@ module OpenAI attr_accessor :type # A tool call to a computer use tool. See the - # [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) - # for more information. + # [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) + # for more information. sig do params( id: String, @@ -101,12 +101,12 @@ module OpenAI class Click < OpenAI::Internal::Type::BaseModel # Indicates which mouse button was pressed during the click. One of `left`, - # `right`, `wheel`, `back`, or `forward`. + # `right`, `wheel`, `back`, or `forward`. sig { returns(OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click::Button::OrSymbol) } attr_accessor :button # Specifies the event type. For a click action, this property is always set to - # `click`. + # `click`. sig { returns(Symbol) } attr_accessor :type @@ -144,7 +144,7 @@ module OpenAI def to_hash; end # Indicates which mouse button was pressed during the click. One of `left`, - # `right`, `wheel`, `back`, or `forward`. + # `right`, `wheel`, `back`, or `forward`. module Button extend OpenAI::Internal::Type::Enum @@ -182,7 +182,7 @@ module OpenAI class DoubleClick < OpenAI::Internal::Type::BaseModel # Specifies the event type. For a double click action, this property is always set - # to `double_click`. + # to `double_click`. sig { returns(Symbol) } attr_accessor :type @@ -204,19 +204,19 @@ module OpenAI class Drag < OpenAI::Internal::Type::BaseModel # An array of coordinates representing the path of the drag action. Coordinates - # will appear as an array of objects, eg + # will appear as an array of objects, eg # - # ``` - # [ - # { x: 100, y: 200 }, - # { x: 200, y: 300 } - # ] - # ``` + # ``` + # [ + # { x: 100, y: 200 }, + # { x: 200, y: 300 } + # ] + # ``` sig { returns(T::Array[OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag::Path]) } attr_accessor :path # Specifies the event type. For a drag action, this property is always set to - # `drag`. + # `drag`. sig { returns(Symbol) } attr_accessor :type @@ -258,12 +258,12 @@ module OpenAI class Keypress < OpenAI::Internal::Type::BaseModel # The combination of keys the model is requesting to be pressed. This is an array - # of strings, each representing a key. + # of strings, each representing a key. sig { returns(T::Array[String]) } attr_accessor :keys # Specifies the event type. For a keypress action, this property is always set to - # `keypress`. + # `keypress`. sig { returns(Symbol) } attr_accessor :type @@ -277,7 +277,7 @@ module OpenAI class Move < OpenAI::Internal::Type::BaseModel # Specifies the event type. For a move action, this property is always set to - # `move`. + # `move`. sig { returns(Symbol) } attr_accessor :type @@ -299,7 +299,7 @@ module OpenAI class Screenshot < OpenAI::Internal::Type::BaseModel # Specifies the event type. For a screenshot action, this property is always set - # to `screenshot`. + # to `screenshot`. sig { returns(Symbol) } attr_accessor :type @@ -321,7 +321,7 @@ module OpenAI attr_accessor :scroll_y # Specifies the event type. For a scroll action, this property is always set to - # `scroll`. + # `scroll`. sig { returns(Symbol) } attr_accessor :type @@ -352,7 +352,7 @@ module OpenAI attr_accessor :text # Specifies the event type. For a type action, this property is always set to - # `type`. + # `type`. sig { returns(Symbol) } attr_accessor :type @@ -366,7 +366,7 @@ module OpenAI class Wait < OpenAI::Internal::Type::BaseModel # Specifies the event type. For a wait action, this property is always set to - # `wait`. + # `wait`. sig { returns(Symbol) } attr_accessor :type @@ -409,7 +409,7 @@ module OpenAI end # The status of the item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. + # Populated when items are returned via API. module Status extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/responses/response_computer_tool_call_output_item.rbi b/rbi/lib/openai/models/responses/response_computer_tool_call_output_item.rbi index cf267c21..64b2dc16 100644 --- a/rbi/lib/openai/models/responses/response_computer_tool_call_output_item.rbi +++ b/rbi/lib/openai/models/responses/response_computer_tool_call_output_item.rbi @@ -29,7 +29,7 @@ module OpenAI attr_accessor :type # The safety checks reported by the API that have been acknowledged by the - # developer. + # developer. sig do returns( T.nilable( @@ -53,7 +53,7 @@ module OpenAI attr_writer :acknowledged_safety_checks # The status of the message input. One of `in_progress`, `completed`, or - # `incomplete`. Populated when input items are returned via API. + # `incomplete`. Populated when input items are returned via API. sig { returns(T.nilable(OpenAI::Models::Responses::ResponseComputerToolCallOutputItem::Status::TaggedSymbol)) } attr_reader :status @@ -123,7 +123,7 @@ module OpenAI end # The status of the message input. One of `in_progress`, `completed`, or - # `incomplete`. Populated when input items are returned via API. + # `incomplete`. Populated when input items are returned via API. module Status extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/responses/response_computer_tool_call_output_screenshot.rbi b/rbi/lib/openai/models/responses/response_computer_tool_call_output_screenshot.rbi index 5ece9b13..c95e0378 100644 --- a/rbi/lib/openai/models/responses/response_computer_tool_call_output_screenshot.rbi +++ b/rbi/lib/openai/models/responses/response_computer_tool_call_output_screenshot.rbi @@ -5,7 +5,7 @@ module OpenAI module Responses class ResponseComputerToolCallOutputScreenshot < OpenAI::Internal::Type::BaseModel # Specifies the event type. For a computer screenshot, this property is always set - # to `computer_screenshot`. + # to `computer_screenshot`. sig { returns(Symbol) } attr_accessor :type diff --git a/rbi/lib/openai/models/responses/response_create_params.rbi b/rbi/lib/openai/models/responses/response_create_params.rbi index f45e18ea..4263b1b6 100644 --- a/rbi/lib/openai/models/responses/response_create_params.rbi +++ b/rbi/lib/openai/models/responses/response_create_params.rbi @@ -9,13 +9,13 @@ module OpenAI # Text, image, or file inputs to the model, used to generate a response. # - # Learn more: + # Learn more: # - # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - # - [Image inputs](https://platform.openai.com/docs/guides/images) - # - [File inputs](https://platform.openai.com/docs/guides/pdf-files) - # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) - # - [Function calling](https://platform.openai.com/docs/guides/function-calling) + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Image inputs](https://platform.openai.com/docs/guides/images) + # - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + # - [Function calling](https://platform.openai.com/docs/guides/function-calling) sig do returns( T.any( @@ -41,45 +41,45 @@ module OpenAI attr_accessor :input # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a - # wide range of models with different capabilities, performance characteristics, - # and price points. Refer to the - # [model guide](https://platform.openai.com/docs/models) to browse and compare - # available models. + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. sig { returns(T.any(String, OpenAI::Models::ChatModel::OrSymbol, OpenAI::Models::ResponsesModel::OrSymbol)) } attr_accessor :model # Specify additional output data to include in the model response. Currently - # supported values are: + # supported values are: # - # - `file_search_call.results`: Include the search results of the file search tool - # call. - # - `message.input_image.image_url`: Include image urls from the input message. - # - `computer_call_output.output.image_url`: Include image urls from the computer - # call output. + # - `file_search_call.results`: Include the search results of the file search tool + # call. + # - `message.input_image.image_url`: Include image urls from the input message. + # - `computer_call_output.output.image_url`: Include image urls from the computer + # call output. sig { returns(T.nilable(T::Array[OpenAI::Models::Responses::ResponseIncludable::OrSymbol])) } attr_accessor :include # Inserts a system (or developer) message as the first item in the model's - # context. + # context. # - # When using along with `previous_response_id`, the instructions from a previous - # response will not be carried over to the next response. This makes it simple to - # swap out system (or developer) messages in new responses. + # When using along with `previous_response_id`, the instructions from a previous + # response will not be carried over to the next response. This makes it simple to + # swap out system (or developer) messages in new responses. sig { returns(T.nilable(String)) } attr_accessor :instructions # An upper bound for the number of tokens that can be generated for a response, - # including visible output tokens and - # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). sig { returns(T.nilable(Integer)) } attr_accessor :max_output_tokens # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata @@ -88,15 +88,15 @@ module OpenAI attr_accessor :parallel_tool_calls # The unique ID of the previous response to the model. Use this to create - # multi-turn conversations. Learn more about - # [conversation state](https://platform.openai.com/docs/guides/conversation-state). + # multi-turn conversations. Learn more about + # [conversation state](https://platform.openai.com/docs/guides/conversation-state). sig { returns(T.nilable(String)) } attr_accessor :previous_response_id # **o-series models only** # - # Configuration options for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). + # Configuration options for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). sig { returns(T.nilable(OpenAI::Models::Reasoning)) } attr_reader :reasoning @@ -108,17 +108,17 @@ module OpenAI attr_accessor :store # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. We generally recommend altering this or `top_p` but - # not both. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. sig { returns(T.nilable(Float)) } attr_accessor :temperature # Configuration options for a text response from the model. Can be plain text or - # structured JSON data. Learn more: + # structured JSON data. Learn more: # - # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) sig { returns(T.nilable(OpenAI::Models::Responses::ResponseTextConfig)) } attr_reader :text @@ -126,8 +126,8 @@ module OpenAI attr_writer :text # How the model should select which tool (or tools) to use when generating a - # response. See the `tools` parameter to see how to specify which tools the model - # can call. + # response. See the `tools` parameter to see how to specify which tools the model + # can call. sig do returns( T.nilable( @@ -155,19 +155,19 @@ module OpenAI attr_writer :tool_choice # An array of tools the model may call while generating a response. You can - # specify which tool to use by setting the `tool_choice` parameter. + # specify which tool to use by setting the `tool_choice` parameter. # - # The two categories of tools you can provide the model are: + # The two categories of tools you can provide the model are: # - # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's - # capabilities, like - # [web search](https://platform.openai.com/docs/guides/tools-web-search) or - # [file search](https://platform.openai.com/docs/guides/tools-file-search). - # Learn more about - # [built-in tools](https://platform.openai.com/docs/guides/tools). - # - **Function calls (custom tools)**: Functions that are defined by you, enabling - # the model to call your own code. Learn more about - # [function calling](https://platform.openai.com/docs/guides/function-calling). + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code. Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). sig do returns( T.nilable( @@ -201,26 +201,26 @@ module OpenAI attr_writer :tools # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or `temperature` but not both. + # We generally recommend altering this or `temperature` but not both. sig { returns(T.nilable(Float)) } attr_accessor :top_p # The truncation strategy to use for the model response. # - # - `auto`: If the context of this response and previous ones exceeds the model's - # context window size, the model will truncate the response to fit the context - # window by dropping input items in the middle of the conversation. - # - `disabled` (default): If a model response will exceed the context window size - # for a model, the request will fail with a 400 error. + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. sig { returns(T.nilable(OpenAI::Models::Responses::ResponseCreateParams::Truncation::OrSymbol)) } attr_accessor :truncation # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). sig { returns(T.nilable(String)) } attr_reader :user @@ -358,13 +358,13 @@ module OpenAI # Text, image, or file inputs to the model, used to generate a response. # - # Learn more: + # Learn more: # - # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - # - [Image inputs](https://platform.openai.com/docs/guides/images) - # - [File inputs](https://platform.openai.com/docs/guides/pdf-files) - # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) - # - [Function calling](https://platform.openai.com/docs/guides/function-calling) + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Image inputs](https://platform.openai.com/docs/guides/images) + # - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + # - [Function calling](https://platform.openai.com/docs/guides/function-calling) module Input extend OpenAI::Internal::Type::Union @@ -395,8 +395,8 @@ module OpenAI end # How the model should select which tool (or tools) to use when generating a - # response. See the `tools` parameter to see how to specify which tools the model - # can call. + # response. See the `tools` parameter to see how to specify which tools the model + # can call. module ToolChoice extend OpenAI::Internal::Type::Union @@ -411,11 +411,11 @@ module OpenAI # The truncation strategy to use for the model response. # - # - `auto`: If the context of this response and previous ones exceeds the model's - # context window size, the model will truncate the response to fit the context - # window by dropping input items in the middle of the conversation. - # - `disabled` (default): If a model response will exceed the context window size - # for a model, the request will fail with a 400 error. + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. module Truncation extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/responses/response_file_search_tool_call.rbi b/rbi/lib/openai/models/responses/response_file_search_tool_call.rbi index 7181116b..10068e96 100644 --- a/rbi/lib/openai/models/responses/response_file_search_tool_call.rbi +++ b/rbi/lib/openai/models/responses/response_file_search_tool_call.rbi @@ -13,7 +13,7 @@ module OpenAI attr_accessor :queries # The status of the file search tool call. One of `in_progress`, `searching`, - # `incomplete` or `failed`, + # `incomplete` or `failed`, sig { returns(OpenAI::Models::Responses::ResponseFileSearchToolCall::Status::OrSymbol) } attr_accessor :status @@ -26,8 +26,8 @@ module OpenAI attr_accessor :results # The results of a file search tool call. See the - # [file search guide](https://platform.openai.com/docs/guides/tools-file-search) - # for more information. + # [file search guide](https://platform.openai.com/docs/guides/tools-file-search) + # for more information. sig do params( id: String, @@ -57,7 +57,7 @@ module OpenAI def to_hash; end # The status of the file search tool call. One of `in_progress`, `searching`, - # `incomplete` or `failed`, + # `incomplete` or `failed`, module Status extend OpenAI::Internal::Type::Enum @@ -82,10 +82,10 @@ module OpenAI class Result < OpenAI::Internal::Type::BaseModel # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. Keys are strings with a maximum - # length of 64 characters. Values are strings with a maximum length of 512 - # characters, booleans, or numbers. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. sig { returns(T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) } attr_accessor :attributes diff --git a/rbi/lib/openai/models/responses/response_format_text_config.rbi b/rbi/lib/openai/models/responses/response_format_text_config.rbi index 3fe12068..ae61d108 100644 --- a/rbi/lib/openai/models/responses/response_format_text_config.rbi +++ b/rbi/lib/openai/models/responses/response_format_text_config.rbi @@ -5,17 +5,17 @@ module OpenAI module Responses # An object specifying the format that the model must output. # - # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which - # ensures the model will match your supplied JSON schema. Learn more in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + # ensures the model will match your supplied JSON schema. Learn more in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # The default format is `{ "type": "text" }` with no additional options. + # The default format is `{ "type": "text" }` with no additional options. # - # **Not recommended for gpt-4o and newer models:** + # **Not recommended for gpt-4o and newer models:** # - # Setting to `{ "type": "json_object" }` enables the older JSON mode, which - # ensures the message the model generates is valid JSON. Using `json_schema` is - # preferred for models that support it. + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. module ResponseFormatTextConfig extend OpenAI::Internal::Type::Union diff --git a/rbi/lib/openai/models/responses/response_format_text_json_schema_config.rbi b/rbi/lib/openai/models/responses/response_format_text_json_schema_config.rbi index 8a1756b0..25a553e6 100644 --- a/rbi/lib/openai/models/responses/response_format_text_json_schema_config.rbi +++ b/rbi/lib/openai/models/responses/response_format_text_json_schema_config.rbi @@ -5,12 +5,12 @@ module OpenAI module Responses class ResponseFormatTextJSONSchemaConfig < OpenAI::Internal::Type::BaseModel # The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores - # and dashes, with a maximum length of 64. + # and dashes, with a maximum length of 64. sig { returns(String) } attr_accessor :name # The schema for the response format, described as a JSON Schema object. Learn how - # to build JSON schemas [here](https://json-schema.org/). + # to build JSON schemas [here](https://json-schema.org/). sig { returns(T::Hash[Symbol, T.anything]) } attr_accessor :schema @@ -19,7 +19,7 @@ module OpenAI attr_accessor :type # A description of what the response format is for, used by the model to determine - # how to respond in the format. + # how to respond in the format. sig { returns(T.nilable(String)) } attr_reader :description @@ -27,16 +27,16 @@ module OpenAI attr_writer :description # Whether to enable strict schema adherence when generating the output. If set to - # true, the model will always follow the exact schema defined in the `schema` - # field. Only a subset of JSON Schema is supported when `strict` is `true`. To - # learn more, read the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # true, the model will always follow the exact schema defined in the `schema` + # field. Only a subset of JSON Schema is supported when `strict` is `true`. To + # learn more, read the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). sig { returns(T.nilable(T::Boolean)) } attr_accessor :strict # JSON Schema response format. Used to generate structured JSON responses. Learn - # more about - # [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). + # more about + # [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). sig do params( name: String, diff --git a/rbi/lib/openai/models/responses/response_function_tool_call.rbi b/rbi/lib/openai/models/responses/response_function_tool_call.rbi index f0a465e7..769270c2 100644 --- a/rbi/lib/openai/models/responses/response_function_tool_call.rbi +++ b/rbi/lib/openai/models/responses/response_function_tool_call.rbi @@ -28,7 +28,7 @@ module OpenAI attr_writer :id # The status of the item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. + # Populated when items are returned via API. sig { returns(T.nilable(OpenAI::Models::Responses::ResponseFunctionToolCall::Status::OrSymbol)) } attr_reader :status @@ -36,8 +36,8 @@ module OpenAI attr_writer :status # A tool call to run a function. See the - # [function calling guide](https://platform.openai.com/docs/guides/function-calling) - # for more information. + # [function calling guide](https://platform.openai.com/docs/guides/function-calling) + # for more information. sig do params( arguments: String, @@ -67,7 +67,7 @@ module OpenAI def to_hash; end # The status of the item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. + # Populated when items are returned via API. module Status extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/responses/response_function_tool_call_item.rbi b/rbi/lib/openai/models/responses/response_function_tool_call_item.rbi index 79e881a7..7ed35c37 100644 --- a/rbi/lib/openai/models/responses/response_function_tool_call_item.rbi +++ b/rbi/lib/openai/models/responses/response_function_tool_call_item.rbi @@ -9,8 +9,8 @@ module OpenAI attr_accessor :id # A tool call to run a function. See the - # [function calling guide](https://platform.openai.com/docs/guides/function-calling) - # for more information. + # [function calling guide](https://platform.openai.com/docs/guides/function-calling) + # for more information. sig { params(id: String).returns(T.attached_class) } def self.new(id:); end diff --git a/rbi/lib/openai/models/responses/response_function_tool_call_output_item.rbi b/rbi/lib/openai/models/responses/response_function_tool_call_output_item.rbi index 398930d8..8f224c19 100644 --- a/rbi/lib/openai/models/responses/response_function_tool_call_output_item.rbi +++ b/rbi/lib/openai/models/responses/response_function_tool_call_output_item.rbi @@ -21,7 +21,7 @@ module OpenAI attr_accessor :type # The status of the item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. + # Populated when items are returned via API. sig { returns(T.nilable(OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem::Status::TaggedSymbol)) } attr_reader :status @@ -55,7 +55,7 @@ module OpenAI def to_hash; end # The status of the item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. + # Populated when items are returned via API. module Status extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/responses/response_function_web_search.rbi b/rbi/lib/openai/models/responses/response_function_web_search.rbi index 92543e70..5bd85d43 100644 --- a/rbi/lib/openai/models/responses/response_function_web_search.rbi +++ b/rbi/lib/openai/models/responses/response_function_web_search.rbi @@ -17,8 +17,8 @@ module OpenAI attr_accessor :type # The results of a web search tool call. See the - # [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for - # more information. + # [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for + # more information. sig do params( id: String, diff --git a/rbi/lib/openai/models/responses/response_includable.rbi b/rbi/lib/openai/models/responses/response_includable.rbi index 661b921f..dd8aebb9 100644 --- a/rbi/lib/openai/models/responses/response_includable.rbi +++ b/rbi/lib/openai/models/responses/response_includable.rbi @@ -4,13 +4,13 @@ module OpenAI module Models module Responses # Specify additional output data to include in the model response. Currently - # supported values are: + # supported values are: # - # - `file_search_call.results`: Include the search results of the file search tool - # call. - # - `message.input_image.image_url`: Include image urls from the input message. - # - `computer_call_output.output.image_url`: Include image urls from the computer - # call output. + # - `file_search_call.results`: Include the search results of the file search tool + # call. + # - `message.input_image.image_url`: Include image urls from the input message. + # - `computer_call_output.output.image_url`: Include image urls from the computer + # call output. module ResponseIncludable extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/responses/response_input_image.rbi b/rbi/lib/openai/models/responses/response_input_image.rbi index f6089f0f..f6f84eac 100644 --- a/rbi/lib/openai/models/responses/response_input_image.rbi +++ b/rbi/lib/openai/models/responses/response_input_image.rbi @@ -5,7 +5,7 @@ module OpenAI module Responses class ResponseInputImage < OpenAI::Internal::Type::BaseModel # The detail level of the image to be sent to the model. One of `high`, `low`, or - # `auto`. Defaults to `auto`. + # `auto`. Defaults to `auto`. sig { returns(OpenAI::Models::Responses::ResponseInputImage::Detail::OrSymbol) } attr_accessor :detail @@ -18,12 +18,12 @@ module OpenAI attr_accessor :file_id # The URL of the image to be sent to the model. A fully qualified URL or base64 - # encoded image in a data URL. + # encoded image in a data URL. sig { returns(T.nilable(String)) } attr_accessor :image_url # An image input to the model. Learn about - # [image inputs](https://platform.openai.com/docs/guides/vision). + # [image inputs](https://platform.openai.com/docs/guides/vision). sig do params( detail: OpenAI::Models::Responses::ResponseInputImage::Detail::OrSymbol, @@ -49,7 +49,7 @@ module OpenAI def to_hash; end # The detail level of the image to be sent to the model. One of `high`, `low`, or - # `auto`. Defaults to `auto`. + # `auto`. Defaults to `auto`. module Detail extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/responses/response_input_item.rbi b/rbi/lib/openai/models/responses/response_input_item.rbi index 6a70db99..cbcc9069 100644 --- a/rbi/lib/openai/models/responses/response_input_item.rbi +++ b/rbi/lib/openai/models/responses/response_input_item.rbi @@ -4,16 +4,16 @@ module OpenAI module Models module Responses # A message input to the model with a role indicating instruction following - # hierarchy. Instructions given with the `developer` or `system` role take - # precedence over instructions given with the `user` role. Messages with the - # `assistant` role are presumed to have been generated by the model in previous - # interactions. + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. module ResponseInputItem extend OpenAI::Internal::Type::Union class Message < OpenAI::Internal::Type::BaseModel # A list of one or many input items to the model, containing different content - # types. + # types. sig do returns( T::Array[ @@ -32,7 +32,7 @@ module OpenAI attr_accessor :role # The status of item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. + # Populated when items are returned via API. sig { returns(T.nilable(OpenAI::Models::Responses::ResponseInputItem::Message::Status::OrSymbol)) } attr_reader :status @@ -47,8 +47,8 @@ module OpenAI attr_writer :type # A message input to the model with a role indicating instruction following - # hierarchy. Instructions given with the `developer` or `system` role take - # precedence over instructions given with the `user` role. + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. sig do params( content: T::Array[ @@ -104,7 +104,7 @@ module OpenAI end # The status of item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. + # Populated when items are returned via API. module Status extend OpenAI::Internal::Type::Enum @@ -169,7 +169,7 @@ module OpenAI attr_writer :id # The safety checks reported by the API that have been acknowledged by the - # developer. + # developer. sig do returns( T.nilable( @@ -193,7 +193,7 @@ module OpenAI attr_writer :acknowledged_safety_checks # The status of the message input. One of `in_progress`, `completed`, or - # `incomplete`. Populated when input items are returned via API. + # `incomplete`. Populated when input items are returned via API. sig { returns(T.nilable(OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::Status::OrSymbol)) } attr_reader :status @@ -264,7 +264,7 @@ module OpenAI end # The status of the message input. One of `in_progress`, `completed`, or - # `incomplete`. Populated when input items are returned via API. + # `incomplete`. Populated when input items are returned via API. module Status extend OpenAI::Internal::Type::Enum @@ -311,7 +311,7 @@ module OpenAI attr_accessor :type # The unique ID of the function tool call output. Populated when this item is - # returned via API. + # returned via API. sig { returns(T.nilable(String)) } attr_reader :id @@ -319,7 +319,7 @@ module OpenAI attr_writer :id # The status of the item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. + # Populated when items are returned via API. sig { returns(T.nilable(OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput::Status::OrSymbol)) } attr_reader :status @@ -354,7 +354,7 @@ module OpenAI def to_hash; end # The status of the item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. + # Populated when items are returned via API. module Status extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/responses/response_input_message_item.rbi b/rbi/lib/openai/models/responses/response_input_message_item.rbi index a8b4a591..1eeebaa4 100644 --- a/rbi/lib/openai/models/responses/response_input_message_item.rbi +++ b/rbi/lib/openai/models/responses/response_input_message_item.rbi @@ -9,7 +9,7 @@ module OpenAI attr_accessor :id # A list of one or many input items to the model, containing different content - # types. + # types. sig do returns( T::Array[ @@ -28,7 +28,7 @@ module OpenAI attr_accessor :role # The status of item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. + # Populated when items are returned via API. sig { returns(T.nilable(OpenAI::Models::Responses::ResponseInputMessageItem::Status::TaggedSymbol)) } attr_reader :status @@ -98,7 +98,7 @@ module OpenAI end # The status of item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. + # Populated when items are returned via API. module Status extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/responses/response_output_message.rbi b/rbi/lib/openai/models/responses/response_output_message.rbi index ca050336..d7a588ea 100644 --- a/rbi/lib/openai/models/responses/response_output_message.rbi +++ b/rbi/lib/openai/models/responses/response_output_message.rbi @@ -21,7 +21,7 @@ module OpenAI attr_accessor :role # The status of the message input. One of `in_progress`, `completed`, or - # `incomplete`. Populated when input items are returned via API. + # `incomplete`. Populated when input items are returned via API. sig { returns(OpenAI::Models::Responses::ResponseOutputMessage::Status::OrSymbol) } attr_accessor :status @@ -74,7 +74,7 @@ module OpenAI end # The status of the message input. One of `in_progress`, `completed`, or - # `incomplete`. Populated when input items are returned via API. + # `incomplete`. Populated when input items are returned via API. module Status extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/responses/response_reasoning_item.rbi b/rbi/lib/openai/models/responses/response_reasoning_item.rbi index 1b8c7536..9d308e35 100644 --- a/rbi/lib/openai/models/responses/response_reasoning_item.rbi +++ b/rbi/lib/openai/models/responses/response_reasoning_item.rbi @@ -17,7 +17,7 @@ module OpenAI attr_accessor :type # The status of the item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. + # Populated when items are returned via API. sig { returns(T.nilable(OpenAI::Models::Responses::ResponseReasoningItem::Status::OrSymbol)) } attr_reader :status @@ -25,7 +25,7 @@ module OpenAI attr_writer :status # A description of the chain of thought used by a reasoning model while generating - # a response. + # a response. sig do params( id: String, @@ -67,7 +67,7 @@ module OpenAI end # The status of the item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. + # Populated when items are returned via API. module Status extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/responses/response_retrieve_params.rbi b/rbi/lib/openai/models/responses/response_retrieve_params.rbi index 9afae2e8..39ea7e1c 100644 --- a/rbi/lib/openai/models/responses/response_retrieve_params.rbi +++ b/rbi/lib/openai/models/responses/response_retrieve_params.rbi @@ -8,7 +8,7 @@ module OpenAI include OpenAI::Internal::Type::RequestParameters # Additional fields to include in the response. See the `include` parameter for - # Response creation above for more information. + # Response creation above for more information. sig { returns(T.nilable(T::Array[OpenAI::Models::Responses::ResponseIncludable::OrSymbol])) } attr_reader :include diff --git a/rbi/lib/openai/models/responses/response_status.rbi b/rbi/lib/openai/models/responses/response_status.rbi index 8768bb79..a887be4c 100644 --- a/rbi/lib/openai/models/responses/response_status.rbi +++ b/rbi/lib/openai/models/responses/response_status.rbi @@ -4,7 +4,7 @@ module OpenAI module Models module Responses # The status of the response generation. One of `completed`, `failed`, - # `in_progress`, or `incomplete`. + # `in_progress`, or `incomplete`. module ResponseStatus extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/responses/response_text_config.rbi b/rbi/lib/openai/models/responses/response_text_config.rbi index 1e338162..68a82916 100644 --- a/rbi/lib/openai/models/responses/response_text_config.rbi +++ b/rbi/lib/openai/models/responses/response_text_config.rbi @@ -6,17 +6,17 @@ module OpenAI class ResponseTextConfig < OpenAI::Internal::Type::BaseModel # An object specifying the format that the model must output. # - # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which - # ensures the model will match your supplied JSON schema. Learn more in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + # ensures the model will match your supplied JSON schema. Learn more in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # The default format is `{ "type": "text" }` with no additional options. + # The default format is `{ "type": "text" }` with no additional options. # - # **Not recommended for gpt-4o and newer models:** + # **Not recommended for gpt-4o and newer models:** # - # Setting to `{ "type": "json_object" }` enables the older JSON mode, which - # ensures the message the model generates is valid JSON. Using `json_schema` is - # preferred for models that support it. + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. sig do returns( T.nilable( @@ -44,10 +44,10 @@ module OpenAI attr_writer :format_ # Configuration options for a text response from the model. Can be plain text or - # structured JSON data. Learn more: + # structured JSON data. Learn more: # - # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) sig do params( format_: T.any( diff --git a/rbi/lib/openai/models/responses/response_usage.rbi b/rbi/lib/openai/models/responses/response_usage.rbi index 89b2102a..852d91c4 100644 --- a/rbi/lib/openai/models/responses/response_usage.rbi +++ b/rbi/lib/openai/models/responses/response_usage.rbi @@ -41,7 +41,7 @@ module OpenAI attr_accessor :total_tokens # Represents token usage details including input tokens, output tokens, a - # breakdown of output tokens, and the total tokens used. + # breakdown of output tokens, and the total tokens used. sig do params( input_tokens: Integer, @@ -77,7 +77,7 @@ module OpenAI class InputTokensDetails < OpenAI::Internal::Type::BaseModel # The number of tokens that were retrieved from the cache. - # [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching). + # [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching). sig { returns(Integer) } attr_accessor :cached_tokens diff --git a/rbi/lib/openai/models/responses/tool.rbi b/rbi/lib/openai/models/responses/tool.rbi index 9aab8bc4..292e8c9d 100644 --- a/rbi/lib/openai/models/responses/tool.rbi +++ b/rbi/lib/openai/models/responses/tool.rbi @@ -4,8 +4,8 @@ module OpenAI module Models module Responses # A tool that searches for relevant content from uploaded files. Learn more about - # the - # [file search tool](https://platform.openai.com/docs/guides/tools-file-search). + # the + # [file search tool](https://platform.openai.com/docs/guides/tools-file-search). module Tool extend OpenAI::Internal::Type::Union diff --git a/rbi/lib/openai/models/responses/tool_choice_options.rbi b/rbi/lib/openai/models/responses/tool_choice_options.rbi index 319448bc..ccc45284 100644 --- a/rbi/lib/openai/models/responses/tool_choice_options.rbi +++ b/rbi/lib/openai/models/responses/tool_choice_options.rbi @@ -5,12 +5,12 @@ module OpenAI module Responses # Controls which (if any) tool is called by the model. # - # `none` means the model will not call any tool and instead generates a message. + # `none` means the model will not call any tool and instead generates a message. # - # `auto` means the model can pick between generating a message or calling one or - # more tools. + # `auto` means the model can pick between generating a message or calling one or + # more tools. # - # `required` means the model must call one or more tools. + # `required` means the model must call one or more tools. module ToolChoiceOptions extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/responses/tool_choice_types.rbi b/rbi/lib/openai/models/responses/tool_choice_types.rbi index 82a4138e..ece62dd0 100644 --- a/rbi/lib/openai/models/responses/tool_choice_types.rbi +++ b/rbi/lib/openai/models/responses/tool_choice_types.rbi @@ -5,18 +5,18 @@ module OpenAI module Responses class ToolChoiceTypes < OpenAI::Internal::Type::BaseModel # The type of hosted tool the model should to use. Learn more about - # [built-in tools](https://platform.openai.com/docs/guides/tools). + # [built-in tools](https://platform.openai.com/docs/guides/tools). # - # Allowed values are: + # Allowed values are: # - # - `file_search` - # - `web_search_preview` - # - `computer_use_preview` + # - `file_search` + # - `web_search_preview` + # - `computer_use_preview` sig { returns(OpenAI::Models::Responses::ToolChoiceTypes::Type::OrSymbol) } attr_accessor :type # Indicates that the model should use a built-in tool to generate a response. - # [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools). + # [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools). sig { params(type: OpenAI::Models::Responses::ToolChoiceTypes::Type::OrSymbol).returns(T.attached_class) } def self.new(type:); end @@ -24,13 +24,13 @@ module OpenAI def to_hash; end # The type of hosted tool the model should to use. Learn more about - # [built-in tools](https://platform.openai.com/docs/guides/tools). + # [built-in tools](https://platform.openai.com/docs/guides/tools). # - # Allowed values are: + # Allowed values are: # - # - `file_search` - # - `web_search_preview` - # - `computer_use_preview` + # - `file_search` + # - `web_search_preview` + # - `computer_use_preview` module Type extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/responses/web_search_tool.rbi b/rbi/lib/openai/models/responses/web_search_tool.rbi index 63a05033..75ccebd6 100644 --- a/rbi/lib/openai/models/responses/web_search_tool.rbi +++ b/rbi/lib/openai/models/responses/web_search_tool.rbi @@ -6,13 +6,13 @@ module OpenAI class WebSearchTool < OpenAI::Internal::Type::BaseModel # The type of the web search tool. One of: # - # - `web_search_preview` - # - `web_search_preview_2025_03_11` + # - `web_search_preview` + # - `web_search_preview_2025_03_11` sig { returns(OpenAI::Models::Responses::WebSearchTool::Type::OrSymbol) } attr_accessor :type # High level guidance for the amount of context window space to use for the - # search. One of `low`, `medium`, or `high`. `medium` is the default. + # search. One of `low`, `medium`, or `high`. `medium` is the default. sig { returns(T.nilable(OpenAI::Models::Responses::WebSearchTool::SearchContextSize::OrSymbol)) } attr_reader :search_context_size @@ -31,8 +31,8 @@ module OpenAI attr_writer :user_location # This tool searches the web for relevant results to use in a response. Learn more - # about the - # [web search tool](https://platform.openai.com/docs/guides/tools-web-search). + # about the + # [web search tool](https://platform.openai.com/docs/guides/tools-web-search). sig do params( type: OpenAI::Models::Responses::WebSearchTool::Type::OrSymbol, @@ -57,8 +57,8 @@ module OpenAI # The type of the web search tool. One of: # - # - `web_search_preview` - # - `web_search_preview_2025_03_11` + # - `web_search_preview` + # - `web_search_preview_2025_03_11` module Type extend OpenAI::Internal::Type::Enum @@ -76,7 +76,7 @@ module OpenAI end # High level guidance for the amount of context window space to use for the - # search. One of `low`, `medium`, or `high`. `medium` is the default. + # search. One of `low`, `medium`, or `high`. `medium` is the default. module SearchContextSize extend OpenAI::Internal::Type::Enum @@ -106,7 +106,7 @@ module OpenAI attr_writer :city # The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of - # the user, e.g. `US`. + # the user, e.g. `US`. sig { returns(T.nilable(String)) } attr_reader :country @@ -121,7 +121,7 @@ module OpenAI attr_writer :region # The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the - # user, e.g. `America/Los_Angeles`. + # user, e.g. `America/Los_Angeles`. sig { returns(T.nilable(String)) } attr_reader :timezone diff --git a/rbi/lib/openai/models/static_file_chunking_strategy.rbi b/rbi/lib/openai/models/static_file_chunking_strategy.rbi index 1a672f16..ce9bb8d5 100644 --- a/rbi/lib/openai/models/static_file_chunking_strategy.rbi +++ b/rbi/lib/openai/models/static_file_chunking_strategy.rbi @@ -5,12 +5,12 @@ module OpenAI class StaticFileChunkingStrategy < OpenAI::Internal::Type::BaseModel # The number of tokens that overlap between chunks. The default value is `400`. # - # Note that the overlap must not exceed half of `max_chunk_size_tokens`. + # Note that the overlap must not exceed half of `max_chunk_size_tokens`. sig { returns(Integer) } attr_accessor :chunk_overlap_tokens # The maximum number of tokens in each chunk. The default value is `800`. The - # minimum value is `100` and the maximum value is `4096`. + # minimum value is `100` and the maximum value is `4096`. sig { returns(Integer) } attr_accessor :max_chunk_size_tokens diff --git a/rbi/lib/openai/models/upload.rbi b/rbi/lib/openai/models/upload.rbi index bc80b4c6..7a9a7404 100644 --- a/rbi/lib/openai/models/upload.rbi +++ b/rbi/lib/openai/models/upload.rbi @@ -28,8 +28,8 @@ module OpenAI attr_accessor :object # The intended purpose of the file. - # [Please refer here](https://platform.openai.com/docs/api-reference/files/object#files/object-purpose) - # for acceptable values. + # [Please refer here](https://platform.openai.com/docs/api-reference/files/object#files/object-purpose) + # for acceptable values. sig { returns(String) } attr_accessor :purpose diff --git a/rbi/lib/openai/models/upload_complete_params.rbi b/rbi/lib/openai/models/upload_complete_params.rbi index f6d148b3..344ab351 100644 --- a/rbi/lib/openai/models/upload_complete_params.rbi +++ b/rbi/lib/openai/models/upload_complete_params.rbi @@ -11,7 +11,7 @@ module OpenAI attr_accessor :part_ids # The optional md5 checksum for the file contents to verify if the bytes uploaded - # matches what you expect. + # matches what you expect. sig { returns(T.nilable(String)) } attr_reader :md5 diff --git a/rbi/lib/openai/models/upload_create_params.rbi b/rbi/lib/openai/models/upload_create_params.rbi index 050879be..74c1e762 100644 --- a/rbi/lib/openai/models/upload_create_params.rbi +++ b/rbi/lib/openai/models/upload_create_params.rbi @@ -16,15 +16,15 @@ module OpenAI # The MIME type of the file. # - # This must fall within the supported MIME types for your file purpose. See the - # supported MIME types for assistants and vision. + # This must fall within the supported MIME types for your file purpose. See the + # supported MIME types for assistants and vision. sig { returns(String) } attr_accessor :mime_type # The intended purpose of the uploaded file. # - # See the - # [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). + # See the + # [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). sig { returns(OpenAI::Models::FilePurpose::OrSymbol) } attr_accessor :purpose diff --git a/rbi/lib/openai/models/vector_store.rbi b/rbi/lib/openai/models/vector_store.rbi index 9969477d..3426c9f4 100644 --- a/rbi/lib/openai/models/vector_store.rbi +++ b/rbi/lib/openai/models/vector_store.rbi @@ -22,11 +22,11 @@ module OpenAI attr_accessor :last_active_at # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata @@ -39,8 +39,8 @@ module OpenAI attr_accessor :object # The status of the vector store, which can be either `expired`, `in_progress`, or - # `completed`. A status of `completed` indicates that the vector store is ready - # for use. + # `completed`. A status of `completed` indicates that the vector store is ready + # for use. sig { returns(OpenAI::Models::VectorStore::Status::TaggedSymbol) } attr_accessor :status @@ -60,7 +60,7 @@ module OpenAI attr_accessor :expires_at # A vector store is a collection of processed files can be used by the - # `file_search` tool. + # `file_search` tool. sig do params( id: String, @@ -157,8 +157,8 @@ module OpenAI end # The status of the vector store, which can be either `expired`, `in_progress`, or - # `completed`. A status of `completed` indicates that the vector store is ready - # for use. + # `completed`. A status of `completed` indicates that the vector store is ready + # for use. module Status extend OpenAI::Internal::Type::Enum @@ -175,7 +175,7 @@ module OpenAI class ExpiresAfter < OpenAI::Internal::Type::BaseModel # Anchor timestamp after which the expiration policy applies. Supported anchors: - # `last_active_at`. + # `last_active_at`. sig { returns(Symbol) } attr_accessor :anchor diff --git a/rbi/lib/openai/models/vector_store_create_params.rbi b/rbi/lib/openai/models/vector_store_create_params.rbi index a5ba26e4..c6256c21 100644 --- a/rbi/lib/openai/models/vector_store_create_params.rbi +++ b/rbi/lib/openai/models/vector_store_create_params.rbi @@ -7,7 +7,7 @@ module OpenAI include OpenAI::Internal::Type::RequestParameters # The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. Only applicable if `file_ids` is non-empty. + # strategy. Only applicable if `file_ids` is non-empty. sig do returns( T.nilable( @@ -45,8 +45,8 @@ module OpenAI attr_writer :expires_after # A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that - # the vector store should use. Useful for tools like `file_search` that can access - # files. + # the vector store should use. Useful for tools like `file_search` that can access + # files. sig { returns(T.nilable(T::Array[String])) } attr_reader :file_ids @@ -54,11 +54,11 @@ module OpenAI attr_writer :file_ids # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata @@ -114,7 +114,7 @@ module OpenAI class ExpiresAfter < OpenAI::Internal::Type::BaseModel # Anchor timestamp after which the expiration policy applies. Supported anchors: - # `last_active_at`. + # `last_active_at`. sig { returns(Symbol) } attr_accessor :anchor diff --git a/rbi/lib/openai/models/vector_store_list_params.rbi b/rbi/lib/openai/models/vector_store_list_params.rbi index 240ac978..e40b3b45 100644 --- a/rbi/lib/openai/models/vector_store_list_params.rbi +++ b/rbi/lib/openai/models/vector_store_list_params.rbi @@ -7,9 +7,9 @@ module OpenAI include OpenAI::Internal::Type::RequestParameters # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. sig { returns(T.nilable(String)) } attr_reader :after @@ -17,9 +17,9 @@ module OpenAI attr_writer :after # A cursor for use in pagination. `before` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # starting with obj_foo, your subsequent call can include before=obj_foo in order - # to fetch the previous page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. sig { returns(T.nilable(String)) } attr_reader :before @@ -27,7 +27,7 @@ module OpenAI attr_writer :before # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. sig { returns(T.nilable(Integer)) } attr_reader :limit @@ -35,7 +35,7 @@ module OpenAI attr_writer :limit # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. sig { returns(T.nilable(OpenAI::Models::VectorStoreListParams::Order::OrSymbol)) } attr_reader :order @@ -69,7 +69,7 @@ module OpenAI def to_hash; end # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. module Order extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/vector_store_search_params.rbi b/rbi/lib/openai/models/vector_store_search_params.rbi index 523d2871..e5456dff 100644 --- a/rbi/lib/openai/models/vector_store_search_params.rbi +++ b/rbi/lib/openai/models/vector_store_search_params.rbi @@ -23,7 +23,7 @@ module OpenAI attr_writer :filters # The maximum number of results to return. This number should be between 1 and 50 - # inclusive. + # inclusive. sig { returns(T.nilable(Integer)) } attr_reader :max_num_results diff --git a/rbi/lib/openai/models/vector_store_search_response.rbi b/rbi/lib/openai/models/vector_store_search_response.rbi index 06e2c923..454d54a1 100644 --- a/rbi/lib/openai/models/vector_store_search_response.rbi +++ b/rbi/lib/openai/models/vector_store_search_response.rbi @@ -4,10 +4,10 @@ module OpenAI module Models class VectorStoreSearchResponse < OpenAI::Internal::Type::BaseModel # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. Keys are strings with a maximum - # length of 64 characters. Values are strings with a maximum length of 512 - # characters, booleans, or numbers. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. sig { returns(T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) } attr_accessor :attributes diff --git a/rbi/lib/openai/models/vector_store_update_params.rbi b/rbi/lib/openai/models/vector_store_update_params.rbi index 81e71bed..b40311aa 100644 --- a/rbi/lib/openai/models/vector_store_update_params.rbi +++ b/rbi/lib/openai/models/vector_store_update_params.rbi @@ -19,11 +19,11 @@ module OpenAI attr_writer :expires_after # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata @@ -57,7 +57,7 @@ module OpenAI class ExpiresAfter < OpenAI::Internal::Type::BaseModel # Anchor timestamp after which the expiration policy applies. Supported anchors: - # `last_active_at`. + # `last_active_at`. sig { returns(Symbol) } attr_accessor :anchor diff --git a/rbi/lib/openai/models/vector_stores/file_batch_create_params.rbi b/rbi/lib/openai/models/vector_stores/file_batch_create_params.rbi index fd42f4c3..f64e4470 100644 --- a/rbi/lib/openai/models/vector_stores/file_batch_create_params.rbi +++ b/rbi/lib/openai/models/vector_stores/file_batch_create_params.rbi @@ -8,21 +8,21 @@ module OpenAI include OpenAI::Internal::Type::RequestParameters # A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that - # the vector store should use. Useful for tools like `file_search` that can access - # files. + # the vector store should use. Useful for tools like `file_search` that can access + # files. sig { returns(T::Array[String]) } attr_accessor :file_ids # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. Keys are strings with a maximum - # length of 64 characters. Values are strings with a maximum length of 512 - # characters, booleans, or numbers. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. sig { returns(T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) } attr_accessor :attributes # The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. Only applicable if `file_ids` is non-empty. + # strategy. Only applicable if `file_ids` is non-empty. sig do returns( T.nilable( diff --git a/rbi/lib/openai/models/vector_stores/file_batch_list_files_params.rbi b/rbi/lib/openai/models/vector_stores/file_batch_list_files_params.rbi index 93928ade..8911fb0e 100644 --- a/rbi/lib/openai/models/vector_stores/file_batch_list_files_params.rbi +++ b/rbi/lib/openai/models/vector_stores/file_batch_list_files_params.rbi @@ -11,9 +11,9 @@ module OpenAI attr_accessor :vector_store_id # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. sig { returns(T.nilable(String)) } attr_reader :after @@ -21,9 +21,9 @@ module OpenAI attr_writer :after # A cursor for use in pagination. `before` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # starting with obj_foo, your subsequent call can include before=obj_foo in order - # to fetch the previous page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. sig { returns(T.nilable(String)) } attr_reader :before @@ -38,7 +38,7 @@ module OpenAI attr_writer :filter # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. sig { returns(T.nilable(Integer)) } attr_reader :limit @@ -46,7 +46,7 @@ module OpenAI attr_writer :limit # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. sig { returns(T.nilable(OpenAI::Models::VectorStores::FileBatchListFilesParams::Order::OrSymbol)) } attr_reader :order @@ -114,7 +114,7 @@ module OpenAI end # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. module Order extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/vector_stores/file_create_params.rbi b/rbi/lib/openai/models/vector_stores/file_create_params.rbi index f63b7e8d..774e6c2e 100644 --- a/rbi/lib/openai/models/vector_stores/file_create_params.rbi +++ b/rbi/lib/openai/models/vector_stores/file_create_params.rbi @@ -8,21 +8,21 @@ module OpenAI include OpenAI::Internal::Type::RequestParameters # A [File](https://platform.openai.com/docs/api-reference/files) ID that the - # vector store should use. Useful for tools like `file_search` that can access - # files. + # vector store should use. Useful for tools like `file_search` that can access + # files. sig { returns(String) } attr_accessor :file_id # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. Keys are strings with a maximum - # length of 64 characters. Values are strings with a maximum length of 512 - # characters, booleans, or numbers. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. sig { returns(T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) } attr_accessor :attributes # The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. Only applicable if `file_ids` is non-empty. + # strategy. Only applicable if `file_ids` is non-empty. sig do returns( T.nilable( diff --git a/rbi/lib/openai/models/vector_stores/file_list_params.rbi b/rbi/lib/openai/models/vector_stores/file_list_params.rbi index 9f451eec..10960b9c 100644 --- a/rbi/lib/openai/models/vector_stores/file_list_params.rbi +++ b/rbi/lib/openai/models/vector_stores/file_list_params.rbi @@ -8,9 +8,9 @@ module OpenAI include OpenAI::Internal::Type::RequestParameters # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. sig { returns(T.nilable(String)) } attr_reader :after @@ -18,9 +18,9 @@ module OpenAI attr_writer :after # A cursor for use in pagination. `before` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # starting with obj_foo, your subsequent call can include before=obj_foo in order - # to fetch the previous page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. sig { returns(T.nilable(String)) } attr_reader :before @@ -35,7 +35,7 @@ module OpenAI attr_writer :filter # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. sig { returns(T.nilable(Integer)) } attr_reader :limit @@ -43,7 +43,7 @@ module OpenAI attr_writer :limit # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. sig { returns(T.nilable(OpenAI::Models::VectorStores::FileListParams::Order::OrSymbol)) } attr_reader :order @@ -96,7 +96,7 @@ module OpenAI end # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. module Order extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/vector_stores/file_update_params.rbi b/rbi/lib/openai/models/vector_stores/file_update_params.rbi index 9eb12ec9..41edb9ab 100644 --- a/rbi/lib/openai/models/vector_stores/file_update_params.rbi +++ b/rbi/lib/openai/models/vector_stores/file_update_params.rbi @@ -11,10 +11,10 @@ module OpenAI attr_accessor :vector_store_id # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. Keys are strings with a maximum - # length of 64 characters. Values are strings with a maximum length of 512 - # characters, booleans, or numbers. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. sig { returns(T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) } attr_accessor :attributes diff --git a/rbi/lib/openai/models/vector_stores/vector_store_file.rbi b/rbi/lib/openai/models/vector_stores/vector_store_file.rbi index e729c6ee..6bf82fb0 100644 --- a/rbi/lib/openai/models/vector_stores/vector_store_file.rbi +++ b/rbi/lib/openai/models/vector_stores/vector_store_file.rbi @@ -13,7 +13,7 @@ module OpenAI attr_accessor :created_at # The last error associated with this vector store file. Will be `null` if there - # are no errors. + # are no errors. sig { returns(T.nilable(OpenAI::Models::VectorStores::VectorStoreFile::LastError)) } attr_reader :last_error @@ -30,28 +30,28 @@ module OpenAI attr_accessor :object # The status of the vector store file, which can be either `in_progress`, - # `completed`, `cancelled`, or `failed`. The status `completed` indicates that the - # vector store file is ready for use. + # `completed`, `cancelled`, or `failed`. The status `completed` indicates that the + # vector store file is ready for use. sig { returns(OpenAI::Models::VectorStores::VectorStoreFile::Status::TaggedSymbol) } attr_accessor :status # The total vector store usage in bytes. Note that this may be different from the - # original file size. + # original file size. sig { returns(Integer) } attr_accessor :usage_bytes # The ID of the - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # that the [File](https://platform.openai.com/docs/api-reference/files) is - # attached to. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # that the [File](https://platform.openai.com/docs/api-reference/files) is + # attached to. sig { returns(String) } attr_accessor :vector_store_id # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. Keys are strings with a maximum - # length of 64 characters. Values are strings with a maximum length of 512 - # characters, booleans, or numbers. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. sig { returns(T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) } attr_accessor :attributes @@ -135,7 +135,7 @@ module OpenAI attr_accessor :message # The last error associated with this vector store file. Will be `null` if there - # are no errors. + # are no errors. sig do params(code: OpenAI::Models::VectorStores::VectorStoreFile::LastError::Code::OrSymbol, message: String) .returns(T.attached_class) @@ -172,8 +172,8 @@ module OpenAI end # The status of the vector store file, which can be either `in_progress`, - # `completed`, `cancelled`, or `failed`. The status `completed` indicates that the - # vector store file is ready for use. + # `completed`, `cancelled`, or `failed`. The status `completed` indicates that the + # vector store file is ready for use. module Status extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/models/vector_stores/vector_store_file_batch.rbi b/rbi/lib/openai/models/vector_stores/vector_store_file_batch.rbi index 0ee00156..cef32c64 100644 --- a/rbi/lib/openai/models/vector_stores/vector_store_file_batch.rbi +++ b/rbi/lib/openai/models/vector_stores/vector_store_file_batch.rbi @@ -9,7 +9,7 @@ module OpenAI attr_accessor :id # The Unix timestamp (in seconds) for when the vector store files batch was - # created. + # created. sig { returns(Integer) } attr_accessor :created_at @@ -29,14 +29,14 @@ module OpenAI attr_accessor :object # The status of the vector store files batch, which can be either `in_progress`, - # `completed`, `cancelled` or `failed`. + # `completed`, `cancelled` or `failed`. sig { returns(OpenAI::Models::VectorStores::VectorStoreFileBatch::Status::TaggedSymbol) } attr_accessor :status # The ID of the - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # that the [File](https://platform.openai.com/docs/api-reference/files) is - # attached to. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # that the [File](https://platform.openai.com/docs/api-reference/files) is + # attached to. sig { returns(String) } attr_accessor :vector_store_id @@ -124,7 +124,7 @@ module OpenAI end # The status of the vector store files batch, which can be either `in_progress`, - # `completed`, `cancelled` or `failed`. + # `completed`, `cancelled` or `failed`. module Status extend OpenAI::Internal::Type::Enum diff --git a/rbi/lib/openai/request_options.rbi b/rbi/lib/openai/request_options.rbi index 3f0ab9b4..9c3df5a0 100644 --- a/rbi/lib/openai/request_options.rbi +++ b/rbi/lib/openai/request_options.rbi @@ -2,32 +2,32 @@ module OpenAI # Specify HTTP behaviour to use for a specific request. These options supplement - # or override those provided at the client level. + # or override those provided at the client level. # - # When making a request, you can pass an actual {RequestOptions} instance, or - # simply pass a Hash with symbol keys matching the attributes on this class. + # When making a request, you can pass an actual {RequestOptions} instance, or + # simply pass a Hash with symbol keys matching the attributes on this class. class RequestOptions < OpenAI::Internal::Type::BaseModel # @api private sig { params(opts: T.any(T.self_type, T::Hash[Symbol, T.anything])).void } def self.validate!(opts); end # Idempotency key to send with request and all associated retries. Will only be - # sent for write requests. + # sent for write requests. sig { returns(T.nilable(String)) } attr_accessor :idempotency_key # Extra query params to send with the request. These are `.merge`’d into any - # `query` given at the client level. + # `query` given at the client level. sig { returns(T.nilable(T::Hash[String, T.nilable(T.any(T::Array[String], String))])) } attr_accessor :extra_query # Extra headers to send with the request. These are `.merged`’d into any - # `extra_headers` given at the client level. + # `extra_headers` given at the client level. sig { returns(T.nilable(T::Hash[String, T.nilable(String)])) } attr_accessor :extra_headers # Extra data to send with the request. These are deep merged into any data - # generated as part of the normal request. + # generated as part of the normal request. sig { returns(T.nilable(T.anything)) } attr_accessor :extra_body diff --git a/rbi/lib/openai/resources/audio/speech.rbi b/rbi/lib/openai/resources/audio/speech.rbi index f51b153a..2717898a 100644 --- a/rbi/lib/openai/resources/audio/speech.rbi +++ b/rbi/lib/openai/resources/audio/speech.rbi @@ -21,21 +21,21 @@ module OpenAI # The text to generate audio for. The maximum length is 4096 characters. input:, # One of the available [TTS models](https://platform.openai.com/docs/models#tts): - # `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`. + # `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`. model:, # The voice to use when generating the audio. Supported voices are `alloy`, `ash`, - # `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and - # `verse`. Previews of the voices are available in the - # [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). + # `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and + # `verse`. Previews of the voices are available in the + # [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). voice:, # Control the voice of your generated audio with additional instructions. Does not - # work with `tts-1` or `tts-1-hd`. + # work with `tts-1` or `tts-1-hd`. instructions: nil, # The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, - # `wav`, and `pcm`. + # `wav`, and `pcm`. response_format: nil, # The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is - # the default. + # the default. speed: nil, request_options: {} ); end diff --git a/rbi/lib/openai/resources/audio/transcriptions.rbi b/rbi/lib/openai/resources/audio/transcriptions.rbi index 9fe4f4a3..824a08af 100644 --- a/rbi/lib/openai/resources/audio/transcriptions.rbi +++ b/rbi/lib/openai/resources/audio/transcriptions.rbi @@ -5,9 +5,9 @@ module OpenAI class Audio class Transcriptions # See {OpenAI::Resources::Audio::Transcriptions#create_streaming} for streaming - # counterpart. + # counterpart. # - # Transcribes audio into the input language. + # Transcribes audio into the input language. sig do params( file: T.any(IO, StringIO), @@ -25,52 +25,52 @@ module OpenAI end def create( # The audio file object (not file name) to transcribe, in one of these formats: - # flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + # flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. file:, # ID of the model to use. The options are `gpt-4o-transcribe`, - # `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source - # Whisper V2 model). + # `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source + # Whisper V2 model). model:, # Additional information to include in the transcription response. `logprobs` will - # return the log probabilities of the tokens in the response to understand the - # model's confidence in the transcription. `logprobs` only works with - # response_format set to `json` and only with the models `gpt-4o-transcribe` and - # `gpt-4o-mini-transcribe`. + # return the log probabilities of the tokens in the response to understand the + # model's confidence in the transcription. `logprobs` only works with + # response_format set to `json` and only with the models `gpt-4o-transcribe` and + # `gpt-4o-mini-transcribe`. include: nil, # The language of the input audio. Supplying the input language in - # [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) - # format will improve accuracy and latency. + # [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + # format will improve accuracy and latency. language: nil, # An optional text to guide the model's style or continue a previous audio - # segment. The - # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) - # should match the audio language. + # segment. The + # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + # should match the audio language. prompt: nil, # The format of the output, in one of these options: `json`, `text`, `srt`, - # `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, - # the only supported format is `json`. + # `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, + # the only supported format is `json`. response_format: nil, # The sampling temperature, between 0 and 1. Higher values like 0.8 will make the - # output more random, while lower values like 0.2 will make it more focused and - # deterministic. If set to 0, the model will use - # [log probability](https://en.wikipedia.org/wiki/Log_probability) to - # automatically increase the temperature until certain thresholds are hit. + # output more random, while lower values like 0.2 will make it more focused and + # deterministic. If set to 0, the model will use + # [log probability](https://en.wikipedia.org/wiki/Log_probability) to + # automatically increase the temperature until certain thresholds are hit. temperature: nil, # The timestamp granularities to populate for this transcription. - # `response_format` must be set `verbose_json` to use timestamp granularities. - # Either or both of these options are supported: `word`, or `segment`. Note: There - # is no additional latency for segment timestamps, but generating word timestamps - # incurs additional latency. + # `response_format` must be set `verbose_json` to use timestamp granularities. + # Either or both of these options are supported: `word`, or `segment`. Note: There + # is no additional latency for segment timestamps, but generating word timestamps + # incurs additional latency. timestamp_granularities: nil, # There is no need to provide `stream:`. Instead, use `#create_streaming` or - # `#create` for streaming and non-streaming use cases, respectively. + # `#create` for streaming and non-streaming use cases, respectively. stream: false, request_options: {} ); end # See {OpenAI::Resources::Audio::Transcriptions#create} for non-streaming - # counterpart. + # counterpart. # - # Transcribes audio into the input language. + # Transcribes audio into the input language. sig do params( file: T.any(IO, StringIO), @@ -95,45 +95,45 @@ module OpenAI end def create_streaming( # The audio file object (not file name) to transcribe, in one of these formats: - # flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + # flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. file:, # ID of the model to use. The options are `gpt-4o-transcribe`, - # `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source - # Whisper V2 model). + # `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source + # Whisper V2 model). model:, # Additional information to include in the transcription response. `logprobs` will - # return the log probabilities of the tokens in the response to understand the - # model's confidence in the transcription. `logprobs` only works with - # response_format set to `json` and only with the models `gpt-4o-transcribe` and - # `gpt-4o-mini-transcribe`. + # return the log probabilities of the tokens in the response to understand the + # model's confidence in the transcription. `logprobs` only works with + # response_format set to `json` and only with the models `gpt-4o-transcribe` and + # `gpt-4o-mini-transcribe`. include: nil, # The language of the input audio. Supplying the input language in - # [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) - # format will improve accuracy and latency. + # [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + # format will improve accuracy and latency. language: nil, # An optional text to guide the model's style or continue a previous audio - # segment. The - # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) - # should match the audio language. + # segment. The + # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + # should match the audio language. prompt: nil, # The format of the output, in one of these options: `json`, `text`, `srt`, - # `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, - # the only supported format is `json`. + # `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, + # the only supported format is `json`. response_format: nil, # The sampling temperature, between 0 and 1. Higher values like 0.8 will make the - # output more random, while lower values like 0.2 will make it more focused and - # deterministic. If set to 0, the model will use - # [log probability](https://en.wikipedia.org/wiki/Log_probability) to - # automatically increase the temperature until certain thresholds are hit. + # output more random, while lower values like 0.2 will make it more focused and + # deterministic. If set to 0, the model will use + # [log probability](https://en.wikipedia.org/wiki/Log_probability) to + # automatically increase the temperature until certain thresholds are hit. temperature: nil, # The timestamp granularities to populate for this transcription. - # `response_format` must be set `verbose_json` to use timestamp granularities. - # Either or both of these options are supported: `word`, or `segment`. Note: There - # is no additional latency for segment timestamps, but generating word timestamps - # incurs additional latency. + # `response_format` must be set `verbose_json` to use timestamp granularities. + # Either or both of these options are supported: `word`, or `segment`. Note: There + # is no additional latency for segment timestamps, but generating word timestamps + # incurs additional latency. timestamp_granularities: nil, # There is no need to provide `stream:`. Instead, use `#create_streaming` or - # `#create` for streaming and non-streaming use cases, respectively. + # `#create` for streaming and non-streaming use cases, respectively. stream: true, request_options: {} ); end diff --git a/rbi/lib/openai/resources/audio/translations.rbi b/rbi/lib/openai/resources/audio/translations.rbi index 4ee7bcc0..d39ad32c 100644 --- a/rbi/lib/openai/resources/audio/translations.rbi +++ b/rbi/lib/openai/resources/audio/translations.rbi @@ -18,24 +18,24 @@ module OpenAI end def create( # The audio file object (not file name) translate, in one of these formats: flac, - # mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + # mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. file:, # ID of the model to use. Only `whisper-1` (which is powered by our open source - # Whisper V2 model) is currently available. + # Whisper V2 model) is currently available. model:, # An optional text to guide the model's style or continue a previous audio - # segment. The - # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) - # should be in English. + # segment. The + # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + # should be in English. prompt: nil, # The format of the output, in one of these options: `json`, `text`, `srt`, - # `verbose_json`, or `vtt`. + # `verbose_json`, or `vtt`. response_format: nil, # The sampling temperature, between 0 and 1. Higher values like 0.8 will make the - # output more random, while lower values like 0.2 will make it more focused and - # deterministic. If set to 0, the model will use - # [log probability](https://en.wikipedia.org/wiki/Log_probability) to - # automatically increase the temperature until certain thresholds are hit. + # output more random, while lower values like 0.2 will make it more focused and + # deterministic. If set to 0, the model will use + # [log probability](https://en.wikipedia.org/wiki/Log_probability) to + # automatically increase the temperature until certain thresholds are hit. temperature: nil, request_options: {} ); end diff --git a/rbi/lib/openai/resources/batches.rbi b/rbi/lib/openai/resources/batches.rbi index ec20cb07..390a8fa9 100644 --- a/rbi/lib/openai/resources/batches.rbi +++ b/rbi/lib/openai/resources/batches.rbi @@ -16,29 +16,29 @@ module OpenAI end def create( # The time frame within which the batch should be processed. Currently only `24h` - # is supported. + # is supported. completion_window:, # The endpoint to be used for all requests in the batch. Currently - # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` - # are supported. Note that `/v1/embeddings` batches are also restricted to a - # maximum of 50,000 embedding inputs across all requests in the batch. + # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` + # are supported. Note that `/v1/embeddings` batches are also restricted to a + # maximum of 50,000 embedding inputs across all requests in the batch. endpoint:, # The ID of an uploaded file that contains requests for the new batch. # - # See [upload file](https://platform.openai.com/docs/api-reference/files/create) - # for how to upload a file. + # See [upload file](https://platform.openai.com/docs/api-reference/files/create) + # for how to upload a file. # - # Your input file must be formatted as a - # [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), - # and must be uploaded with the purpose `batch`. The file can contain up to 50,000 - # requests, and can be up to 200 MB in size. + # Your input file must be formatted as a + # [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), + # and must be uploaded with the purpose `batch`. The file can contain up to 50,000 + # requests, and can be up to 200 MB in size. input_file_id:, # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, request_options: {} ); end @@ -66,18 +66,18 @@ module OpenAI end def list( # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. after: nil, # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. limit: nil, request_options: {} ); end # Cancels an in-progress batch. The batch will be in status `cancelling` for up to - # 10 minutes, before changing to `cancelled`, where it will have partial results - # (if any) available in the output file. + # 10 minutes, before changing to `cancelled`, where it will have partial results + # (if any) available in the output file. sig do params( batch_id: String, diff --git a/rbi/lib/openai/resources/beta/assistants.rbi b/rbi/lib/openai/resources/beta/assistants.rbi index 277c81d2..580fbcba 100644 --- a/rbi/lib/openai/resources/beta/assistants.rbi +++ b/rbi/lib/openai/resources/beta/assistants.rbi @@ -39,71 +39,71 @@ module OpenAI end def create( # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. model:, # The description of the assistant. The maximum length is 512 characters. description: nil, # The system instructions that the assistant uses. The maximum length is 256,000 - # characters. + # characters. instructions: nil, # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, # The name of the assistant. The maximum length is 256 characters. name: nil, # **o-series models only** # - # Constrains effort on reasoning for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. reasoning_effort: nil, # Specifies the format that the model must output. Compatible with - # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), - # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - # message the model generates is valid JSON. + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. # - # **Important:** when using JSON mode, you **must** also instruct the model to - # produce JSON yourself via a system or user message. Without this, the model may - # generate an unending stream of whitespace until the generation reaches the token - # limit, resulting in a long-running and seemingly "stuck" request. Also note that - # the message content may be partially cut off if `finish_reason="length"`, which - # indicates the generation exceeded `max_tokens` or the conversation exceeded the - # max context length. + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. response_format: nil, # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. temperature: nil, # A set of resources that are used by the assistant's tools. The resources are - # specific to the type of tool. For example, the `code_interpreter` tool requires - # a list of file IDs, while the `file_search` tool requires a list of vector store - # IDs. + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. tool_resources: nil, # A list of tool enabled on the assistant. There can be a maximum of 128 tools per - # assistant. Tools can be of types `code_interpreter`, `file_search`, or - # `function`. + # assistant. Tools can be of types `code_interpreter`, `file_search`, or + # `function`. tools: nil, # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or temperature but not both. + # We generally recommend altering this or temperature but not both. top_p: nil, request_options: {} ); end @@ -160,69 +160,69 @@ module OpenAI # The description of the assistant. The maximum length is 512 characters. description: nil, # The system instructions that the assistant uses. The maximum length is 256,000 - # characters. + # characters. instructions: nil, # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. model: nil, # The name of the assistant. The maximum length is 256 characters. name: nil, # **o-series models only** # - # Constrains effort on reasoning for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. reasoning_effort: nil, # Specifies the format that the model must output. Compatible with - # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), - # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - # message the model generates is valid JSON. + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. # - # **Important:** when using JSON mode, you **must** also instruct the model to - # produce JSON yourself via a system or user message. Without this, the model may - # generate an unending stream of whitespace until the generation reaches the token - # limit, resulting in a long-running and seemingly "stuck" request. Also note that - # the message content may be partially cut off if `finish_reason="length"`, which - # indicates the generation exceeded `max_tokens` or the conversation exceeded the - # max context length. + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. response_format: nil, # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. temperature: nil, # A set of resources that are used by the assistant's tools. The resources are - # specific to the type of tool. For example, the `code_interpreter` tool requires - # a list of file IDs, while the `file_search` tool requires a list of vector store - # IDs. + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. tool_resources: nil, # A list of tool enabled on the assistant. There can be a maximum of 128 tools per - # assistant. Tools can be of types `code_interpreter`, `file_search`, or - # `function`. + # assistant. Tools can be of types `code_interpreter`, `file_search`, or + # `function`. tools: nil, # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or temperature but not both. + # We generally recommend altering this or temperature but not both. top_p: nil, request_options: {} ); end @@ -239,20 +239,20 @@ module OpenAI end def list( # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. after: nil, # A cursor for use in pagination. `before` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # starting with obj_foo, your subsequent call can include before=obj_foo in order - # to fetch the previous page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. before: nil, # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. limit: nil, # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. order: nil, request_options: {} ); end diff --git a/rbi/lib/openai/resources/beta/threads.rbi b/rbi/lib/openai/resources/beta/threads.rbi index 07a8d939..316bbca4 100644 --- a/rbi/lib/openai/resources/beta/threads.rbi +++ b/rbi/lib/openai/resources/beta/threads.rbi @@ -22,19 +22,19 @@ module OpenAI end def create( # A list of [messages](https://platform.openai.com/docs/api-reference/messages) to - # start the thread with. + # start the thread with. messages: nil, # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, # A set of resources that are made available to the assistant's tools in this - # thread. The resources are specific to the type of tool. For example, the - # `code_interpreter` tool requires a list of file IDs, while the `file_search` - # tool requires a list of vector store IDs. + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. tool_resources: nil, request_options: {} ); end @@ -65,16 +65,16 @@ module OpenAI # The ID of the thread to modify. Only the `metadata` can be modified. thread_id, # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, # A set of resources that are made available to the assistant's tools in this - # thread. The resources are specific to the type of tool. For example, the - # `code_interpreter` tool requires a list of file IDs, while the `file_search` - # tool requires a list of vector store IDs. + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. tool_resources: nil, request_options: {} ); end @@ -93,7 +93,7 @@ module OpenAI ); end # See {OpenAI::Resources::Beta::Threads#stream_raw} for streaming counterpart. # - # Create a thread and run it in one request. + # Create a thread and run it in one request. sig do params( assistant_id: String, @@ -143,102 +143,102 @@ module OpenAI end def create_and_run( # The ID of the - # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to - # execute this run. + # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + # execute this run. assistant_id:, # Override the default system message of the assistant. This is useful for - # modifying the behavior on a per-run basis. + # modifying the behavior on a per-run basis. instructions: nil, # The maximum number of completion tokens that may be used over the course of the - # run. The run will make a best effort to use only the number of completion tokens - # specified, across multiple turns of the run. If the run exceeds the number of - # completion tokens specified, the run will end with status `incomplete`. See - # `incomplete_details` for more info. + # run. The run will make a best effort to use only the number of completion tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # completion tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. max_completion_tokens: nil, # The maximum number of prompt tokens that may be used over the course of the run. - # The run will make a best effort to use only the number of prompt tokens - # specified, across multiple turns of the run. If the run exceeds the number of - # prompt tokens specified, the run will end with status `incomplete`. See - # `incomplete_details` for more info. + # The run will make a best effort to use only the number of prompt tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # prompt tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. max_prompt_tokens: nil, # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to - # be used to execute this run. If a value is provided here, it will override the - # model associated with the assistant. If not, the model associated with the - # assistant will be used. + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. model: nil, # Whether to enable - # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) - # during tool use. + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. parallel_tool_calls: nil, # Specifies the format that the model must output. Compatible with - # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), - # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - # message the model generates is valid JSON. + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. # - # **Important:** when using JSON mode, you **must** also instruct the model to - # produce JSON yourself via a system or user message. Without this, the model may - # generate an unending stream of whitespace until the generation reaches the token - # limit, resulting in a long-running and seemingly "stuck" request. Also note that - # the message content may be partially cut off if `finish_reason="length"`, which - # indicates the generation exceeded `max_tokens` or the conversation exceeded the - # max context length. + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. response_format: nil, # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. temperature: nil, # Options to create a new thread. If no thread is provided when running a request, - # an empty thread will be created. + # an empty thread will be created. thread: nil, # Controls which (if any) tool is called by the model. `none` means the model will - # not call any tools and instead generates a message. `auto` is the default value - # and means the model can pick between generating a message or calling one or more - # tools. `required` means the model must call one or more tools before responding - # to the user. Specifying a particular tool like `{"type": "file_search"}` or - # `{"type": "function", "function": {"name": "my_function"}}` forces the model to - # call that tool. + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. tool_choice: nil, # A set of resources that are used by the assistant's tools. The resources are - # specific to the type of tool. For example, the `code_interpreter` tool requires - # a list of file IDs, while the `file_search` tool requires a list of vector store - # IDs. + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. tool_resources: nil, # Override the tools the assistant can use for this run. This is useful for - # modifying the behavior on a per-run basis. + # modifying the behavior on a per-run basis. tools: nil, # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or temperature but not both. + # We generally recommend altering this or temperature but not both. top_p: nil, # Controls for how a thread will be truncated prior to the run. Use this to - # control the intial context window of the run. + # control the intial context window of the run. truncation_strategy: nil, # There is no need to provide `stream:`. Instead, use `#stream_raw` or - # `#create_and_run` for streaming and non-streaming use cases, respectively. + # `#create_and_run` for streaming and non-streaming use cases, respectively. stream: false, request_options: {} ); end # See {OpenAI::Resources::Beta::Threads#create_and_run} for non-streaming - # counterpart. + # counterpart. # - # Create a thread and run it in one request. + # Create a thread and run it in one request. sig do params( assistant_id: String, @@ -317,95 +317,95 @@ module OpenAI end def stream_raw( # The ID of the - # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to - # execute this run. + # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + # execute this run. assistant_id:, # Override the default system message of the assistant. This is useful for - # modifying the behavior on a per-run basis. + # modifying the behavior on a per-run basis. instructions: nil, # The maximum number of completion tokens that may be used over the course of the - # run. The run will make a best effort to use only the number of completion tokens - # specified, across multiple turns of the run. If the run exceeds the number of - # completion tokens specified, the run will end with status `incomplete`. See - # `incomplete_details` for more info. + # run. The run will make a best effort to use only the number of completion tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # completion tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. max_completion_tokens: nil, # The maximum number of prompt tokens that may be used over the course of the run. - # The run will make a best effort to use only the number of prompt tokens - # specified, across multiple turns of the run. If the run exceeds the number of - # prompt tokens specified, the run will end with status `incomplete`. See - # `incomplete_details` for more info. + # The run will make a best effort to use only the number of prompt tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # prompt tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. max_prompt_tokens: nil, # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to - # be used to execute this run. If a value is provided here, it will override the - # model associated with the assistant. If not, the model associated with the - # assistant will be used. + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. model: nil, # Whether to enable - # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) - # during tool use. + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. parallel_tool_calls: nil, # Specifies the format that the model must output. Compatible with - # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), - # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - # message the model generates is valid JSON. + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. # - # **Important:** when using JSON mode, you **must** also instruct the model to - # produce JSON yourself via a system or user message. Without this, the model may - # generate an unending stream of whitespace until the generation reaches the token - # limit, resulting in a long-running and seemingly "stuck" request. Also note that - # the message content may be partially cut off if `finish_reason="length"`, which - # indicates the generation exceeded `max_tokens` or the conversation exceeded the - # max context length. + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. response_format: nil, # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. temperature: nil, # Options to create a new thread. If no thread is provided when running a request, - # an empty thread will be created. + # an empty thread will be created. thread: nil, # Controls which (if any) tool is called by the model. `none` means the model will - # not call any tools and instead generates a message. `auto` is the default value - # and means the model can pick between generating a message or calling one or more - # tools. `required` means the model must call one or more tools before responding - # to the user. Specifying a particular tool like `{"type": "file_search"}` or - # `{"type": "function", "function": {"name": "my_function"}}` forces the model to - # call that tool. + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. tool_choice: nil, # A set of resources that are used by the assistant's tools. The resources are - # specific to the type of tool. For example, the `code_interpreter` tool requires - # a list of file IDs, while the `file_search` tool requires a list of vector store - # IDs. + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. tool_resources: nil, # Override the tools the assistant can use for this run. This is useful for - # modifying the behavior on a per-run basis. + # modifying the behavior on a per-run basis. tools: nil, # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or temperature but not both. + # We generally recommend altering this or temperature but not both. top_p: nil, # Controls for how a thread will be truncated prior to the run. Use this to - # control the intial context window of the run. + # control the intial context window of the run. truncation_strategy: nil, # There is no need to provide `stream:`. Instead, use `#stream_raw` or - # `#create_and_run` for streaming and non-streaming use cases, respectively. + # `#create_and_run` for streaming and non-streaming use cases, respectively. stream: true, request_options: {} ); end diff --git a/rbi/lib/openai/resources/beta/threads/messages.rbi b/rbi/lib/openai/resources/beta/threads/messages.rbi index ac987e13..2d12cb7e 100644 --- a/rbi/lib/openai/resources/beta/threads/messages.rbi +++ b/rbi/lib/openai/resources/beta/threads/messages.rbi @@ -31,25 +31,25 @@ module OpenAI end def create( # The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) - # to create a message for. + # to create a message for. thread_id, # The text contents of the message. content:, # The role of the entity that is creating the message. Allowed values include: # - # - `user`: Indicates the message is sent by an actual user and should be used in - # most cases to represent user-generated messages. - # - `assistant`: Indicates the message is generated by the assistant. Use this - # value to insert messages from the assistant into the conversation. + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. role:, # A list of files attached to the message, and the tools they should be added to. attachments: nil, # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, request_options: {} ); end @@ -66,7 +66,7 @@ module OpenAI # The ID of the message to retrieve. message_id, # The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) - # to which this message belongs. + # to which this message belongs. thread_id:, request_options: {} ); end @@ -86,11 +86,11 @@ module OpenAI # Path param: The ID of the thread to which this message belongs. thread_id:, # Body param: Set of 16 key-value pairs that can be attached to an object. This - # can be useful for storing additional information about the object in a - # structured format, and querying for objects via API or the dashboard. + # can be useful for storing additional information about the object in a + # structured format, and querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, request_options: {} ); end @@ -109,23 +109,23 @@ module OpenAI end def list( # The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) - # the messages belong to. + # the messages belong to. thread_id, # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. after: nil, # A cursor for use in pagination. `before` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # starting with obj_foo, your subsequent call can include before=obj_foo in order - # to fetch the previous page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. before: nil, # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. limit: nil, # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. order: nil, # Filter messages by the run ID that generated them. run_id: nil, diff --git a/rbi/lib/openai/resources/beta/threads/runs.rbi b/rbi/lib/openai/resources/beta/threads/runs.rbi index 09f55c13..e3a1216a 100644 --- a/rbi/lib/openai/resources/beta/threads/runs.rbi +++ b/rbi/lib/openai/resources/beta/threads/runs.rbi @@ -9,9 +9,9 @@ module OpenAI attr_reader :steps # See {OpenAI::Resources::Beta::Threads::Runs#create_stream_raw} for streaming - # counterpart. + # counterpart. # - # Create a run. + # Create a run. sig do params( thread_id: String, @@ -68,120 +68,120 @@ module OpenAI # Path param: The ID of the thread to run. thread_id, # Body param: The ID of the - # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to - # execute this run. + # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + # execute this run. assistant_id:, # Query param: A list of additional fields to include in the response. Currently - # the only supported value is - # `step_details.tool_calls[*].file_search.results[*].content` to fetch the file - # search result content. + # the only supported value is + # `step_details.tool_calls[*].file_search.results[*].content` to fetch the file + # search result content. # - # See the - # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) - # for more information. + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. include: nil, # Body param: Appends additional instructions at the end of the instructions for - # the run. This is useful for modifying the behavior on a per-run basis without - # overriding other instructions. + # the run. This is useful for modifying the behavior on a per-run basis without + # overriding other instructions. additional_instructions: nil, # Body param: Adds additional messages to the thread before creating the run. additional_messages: nil, # Body param: Overrides the - # [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) - # of the assistant. This is useful for modifying the behavior on a per-run basis. + # [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) + # of the assistant. This is useful for modifying the behavior on a per-run basis. instructions: nil, # Body param: The maximum number of completion tokens that may be used over the - # course of the run. The run will make a best effort to use only the number of - # completion tokens specified, across multiple turns of the run. If the run - # exceeds the number of completion tokens specified, the run will end with status - # `incomplete`. See `incomplete_details` for more info. + # course of the run. The run will make a best effort to use only the number of + # completion tokens specified, across multiple turns of the run. If the run + # exceeds the number of completion tokens specified, the run will end with status + # `incomplete`. See `incomplete_details` for more info. max_completion_tokens: nil, # Body param: The maximum number of prompt tokens that may be used over the course - # of the run. The run will make a best effort to use only the number of prompt - # tokens specified, across multiple turns of the run. If the run exceeds the - # number of prompt tokens specified, the run will end with status `incomplete`. - # See `incomplete_details` for more info. + # of the run. The run will make a best effort to use only the number of prompt + # tokens specified, across multiple turns of the run. If the run exceeds the + # number of prompt tokens specified, the run will end with status `incomplete`. + # See `incomplete_details` for more info. max_prompt_tokens: nil, # Body param: Set of 16 key-value pairs that can be attached to an object. This - # can be useful for storing additional information about the object in a - # structured format, and querying for objects via API or the dashboard. + # can be useful for storing additional information about the object in a + # structured format, and querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, # Body param: The ID of the - # [Model](https://platform.openai.com/docs/api-reference/models) to be used to - # execute this run. If a value is provided here, it will override the model - # associated with the assistant. If not, the model associated with the assistant - # will be used. + # [Model](https://platform.openai.com/docs/api-reference/models) to be used to + # execute this run. If a value is provided here, it will override the model + # associated with the assistant. If not, the model associated with the assistant + # will be used. model: nil, # Body param: Whether to enable - # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) - # during tool use. + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. parallel_tool_calls: nil, # Body param: **o-series models only** # - # Constrains effort on reasoning for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. reasoning_effort: nil, # Body param: Specifies the format that the model must output. Compatible with - # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), - # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - # message the model generates is valid JSON. + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. # - # **Important:** when using JSON mode, you **must** also instruct the model to - # produce JSON yourself via a system or user message. Without this, the model may - # generate an unending stream of whitespace until the generation reaches the token - # limit, resulting in a long-running and seemingly "stuck" request. Also note that - # the message content may be partially cut off if `finish_reason="length"`, which - # indicates the generation exceeded `max_tokens` or the conversation exceeded the - # max context length. + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. response_format: nil, # Body param: What sampling temperature to use, between 0 and 2. Higher values - # like 0.8 will make the output more random, while lower values like 0.2 will make - # it more focused and deterministic. + # like 0.8 will make the output more random, while lower values like 0.2 will make + # it more focused and deterministic. temperature: nil, # Body param: Controls which (if any) tool is called by the model. `none` means - # the model will not call any tools and instead generates a message. `auto` is the - # default value and means the model can pick between generating a message or - # calling one or more tools. `required` means the model must call one or more - # tools before responding to the user. Specifying a particular tool like - # `{"type": "file_search"}` or - # `{"type": "function", "function": {"name": "my_function"}}` forces the model to - # call that tool. + # the model will not call any tools and instead generates a message. `auto` is the + # default value and means the model can pick between generating a message or + # calling one or more tools. `required` means the model must call one or more + # tools before responding to the user. Specifying a particular tool like + # `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. tool_choice: nil, # Body param: Override the tools the assistant can use for this run. This is - # useful for modifying the behavior on a per-run basis. + # useful for modifying the behavior on a per-run basis. tools: nil, # Body param: An alternative to sampling with temperature, called nucleus - # sampling, where the model considers the results of the tokens with top_p - # probability mass. So 0.1 means only the tokens comprising the top 10% - # probability mass are considered. + # sampling, where the model considers the results of the tokens with top_p + # probability mass. So 0.1 means only the tokens comprising the top 10% + # probability mass are considered. # - # We generally recommend altering this or temperature but not both. + # We generally recommend altering this or temperature but not both. top_p: nil, # Body param: Controls for how a thread will be truncated prior to the run. Use - # this to control the intial context window of the run. + # this to control the intial context window of the run. truncation_strategy: nil, # There is no need to provide `stream:`. Instead, use `#create_stream_raw` or - # `#create` for streaming and non-streaming use cases, respectively. + # `#create` for streaming and non-streaming use cases, respectively. stream: false, request_options: {} ); end # See {OpenAI::Resources::Beta::Threads::Runs#create} for non-streaming - # counterpart. + # counterpart. # - # Create a run. + # Create a run. sig do params( thread_id: String, @@ -267,113 +267,113 @@ module OpenAI # Path param: The ID of the thread to run. thread_id, # Body param: The ID of the - # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to - # execute this run. + # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + # execute this run. assistant_id:, # Query param: A list of additional fields to include in the response. Currently - # the only supported value is - # `step_details.tool_calls[*].file_search.results[*].content` to fetch the file - # search result content. + # the only supported value is + # `step_details.tool_calls[*].file_search.results[*].content` to fetch the file + # search result content. # - # See the - # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) - # for more information. + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. include: nil, # Body param: Appends additional instructions at the end of the instructions for - # the run. This is useful for modifying the behavior on a per-run basis without - # overriding other instructions. + # the run. This is useful for modifying the behavior on a per-run basis without + # overriding other instructions. additional_instructions: nil, # Body param: Adds additional messages to the thread before creating the run. additional_messages: nil, # Body param: Overrides the - # [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) - # of the assistant. This is useful for modifying the behavior on a per-run basis. + # [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) + # of the assistant. This is useful for modifying the behavior on a per-run basis. instructions: nil, # Body param: The maximum number of completion tokens that may be used over the - # course of the run. The run will make a best effort to use only the number of - # completion tokens specified, across multiple turns of the run. If the run - # exceeds the number of completion tokens specified, the run will end with status - # `incomplete`. See `incomplete_details` for more info. + # course of the run. The run will make a best effort to use only the number of + # completion tokens specified, across multiple turns of the run. If the run + # exceeds the number of completion tokens specified, the run will end with status + # `incomplete`. See `incomplete_details` for more info. max_completion_tokens: nil, # Body param: The maximum number of prompt tokens that may be used over the course - # of the run. The run will make a best effort to use only the number of prompt - # tokens specified, across multiple turns of the run. If the run exceeds the - # number of prompt tokens specified, the run will end with status `incomplete`. - # See `incomplete_details` for more info. + # of the run. The run will make a best effort to use only the number of prompt + # tokens specified, across multiple turns of the run. If the run exceeds the + # number of prompt tokens specified, the run will end with status `incomplete`. + # See `incomplete_details` for more info. max_prompt_tokens: nil, # Body param: Set of 16 key-value pairs that can be attached to an object. This - # can be useful for storing additional information about the object in a - # structured format, and querying for objects via API or the dashboard. + # can be useful for storing additional information about the object in a + # structured format, and querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, # Body param: The ID of the - # [Model](https://platform.openai.com/docs/api-reference/models) to be used to - # execute this run. If a value is provided here, it will override the model - # associated with the assistant. If not, the model associated with the assistant - # will be used. + # [Model](https://platform.openai.com/docs/api-reference/models) to be used to + # execute this run. If a value is provided here, it will override the model + # associated with the assistant. If not, the model associated with the assistant + # will be used. model: nil, # Body param: Whether to enable - # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) - # during tool use. + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. parallel_tool_calls: nil, # Body param: **o-series models only** # - # Constrains effort on reasoning for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. reasoning_effort: nil, # Body param: Specifies the format that the model must output. Compatible with - # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), - # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - # message the model generates is valid JSON. + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. # - # **Important:** when using JSON mode, you **must** also instruct the model to - # produce JSON yourself via a system or user message. Without this, the model may - # generate an unending stream of whitespace until the generation reaches the token - # limit, resulting in a long-running and seemingly "stuck" request. Also note that - # the message content may be partially cut off if `finish_reason="length"`, which - # indicates the generation exceeded `max_tokens` or the conversation exceeded the - # max context length. + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. response_format: nil, # Body param: What sampling temperature to use, between 0 and 2. Higher values - # like 0.8 will make the output more random, while lower values like 0.2 will make - # it more focused and deterministic. + # like 0.8 will make the output more random, while lower values like 0.2 will make + # it more focused and deterministic. temperature: nil, # Body param: Controls which (if any) tool is called by the model. `none` means - # the model will not call any tools and instead generates a message. `auto` is the - # default value and means the model can pick between generating a message or - # calling one or more tools. `required` means the model must call one or more - # tools before responding to the user. Specifying a particular tool like - # `{"type": "file_search"}` or - # `{"type": "function", "function": {"name": "my_function"}}` forces the model to - # call that tool. + # the model will not call any tools and instead generates a message. `auto` is the + # default value and means the model can pick between generating a message or + # calling one or more tools. `required` means the model must call one or more + # tools before responding to the user. Specifying a particular tool like + # `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. tool_choice: nil, # Body param: Override the tools the assistant can use for this run. This is - # useful for modifying the behavior on a per-run basis. + # useful for modifying the behavior on a per-run basis. tools: nil, # Body param: An alternative to sampling with temperature, called nucleus - # sampling, where the model considers the results of the tokens with top_p - # probability mass. So 0.1 means only the tokens comprising the top 10% - # probability mass are considered. + # sampling, where the model considers the results of the tokens with top_p + # probability mass. So 0.1 means only the tokens comprising the top 10% + # probability mass are considered. # - # We generally recommend altering this or temperature but not both. + # We generally recommend altering this or temperature but not both. top_p: nil, # Body param: Controls for how a thread will be truncated prior to the run. Use - # this to control the intial context window of the run. + # this to control the intial context window of the run. truncation_strategy: nil, # There is no need to provide `stream:`. Instead, use `#create_stream_raw` or - # `#create` for streaming and non-streaming use cases, respectively. + # `#create` for streaming and non-streaming use cases, respectively. stream: true, request_options: {} ); end @@ -390,7 +390,7 @@ module OpenAI # The ID of the run to retrieve. run_id, # The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) - # that was run. + # that was run. thread_id:, request_options: {} ); end @@ -408,14 +408,14 @@ module OpenAI # Path param: The ID of the run to modify. run_id, # Path param: The ID of the - # [thread](https://platform.openai.com/docs/api-reference/threads) that was run. + # [thread](https://platform.openai.com/docs/api-reference/threads) that was run. thread_id:, # Body param: Set of 16 key-value pairs that can be attached to an object. This - # can be useful for storing additional information about the object in a - # structured format, and querying for objects via API or the dashboard. + # can be useful for storing additional information about the object in a + # structured format, and querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, request_options: {} ); end @@ -435,20 +435,20 @@ module OpenAI # The ID of the thread the run belongs to. thread_id, # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. after: nil, # A cursor for use in pagination. `before` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # starting with obj_foo, your subsequent call can include before=obj_foo in order - # to fetch the previous page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. before: nil, # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. limit: nil, # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. order: nil, request_options: {} ); end @@ -469,12 +469,12 @@ module OpenAI request_options: {} ); end # See {OpenAI::Resources::Beta::Threads::Runs#submit_tool_outputs_stream_raw} for - # streaming counterpart. + # streaming counterpart. # - # When a run has the `status: "requires_action"` and `required_action.type` is - # `submit_tool_outputs`, this endpoint can be used to submit the outputs from the - # tool calls once they're all completed. All outputs must be submitted in a single - # request. + # When a run has the `status: "requires_action"` and `required_action.type` is + # `submit_tool_outputs`, this endpoint can be used to submit the outputs from the + # tool calls once they're all completed. All outputs must be submitted in a single + # request. sig do params( run_id: String, @@ -489,24 +489,24 @@ module OpenAI # Path param: The ID of the run that requires the tool output submission. run_id, # Path param: The ID of the - # [thread](https://platform.openai.com/docs/api-reference/threads) to which this - # run belongs. + # [thread](https://platform.openai.com/docs/api-reference/threads) to which this + # run belongs. thread_id:, # Body param: A list of tools for which the outputs are being submitted. tool_outputs:, # There is no need to provide `stream:`. Instead, use - # `#submit_tool_outputs_stream_raw` or `#submit_tool_outputs` for streaming and - # non-streaming use cases, respectively. + # `#submit_tool_outputs_stream_raw` or `#submit_tool_outputs` for streaming and + # non-streaming use cases, respectively. stream: false, request_options: {} ); end # See {OpenAI::Resources::Beta::Threads::Runs#submit_tool_outputs} for - # non-streaming counterpart. + # non-streaming counterpart. # - # When a run has the `status: "requires_action"` and `required_action.type` is - # `submit_tool_outputs`, this endpoint can be used to submit the outputs from the - # tool calls once they're all completed. All outputs must be submitted in a single - # request. + # When a run has the `status: "requires_action"` and `required_action.type` is + # `submit_tool_outputs`, this endpoint can be used to submit the outputs from the + # tool calls once they're all completed. All outputs must be submitted in a single + # request. sig do params( run_id: String, @@ -550,14 +550,14 @@ module OpenAI # Path param: The ID of the run that requires the tool output submission. run_id, # Path param: The ID of the - # [thread](https://platform.openai.com/docs/api-reference/threads) to which this - # run belongs. + # [thread](https://platform.openai.com/docs/api-reference/threads) to which this + # run belongs. thread_id:, # Body param: A list of tools for which the outputs are being submitted. tool_outputs:, # There is no need to provide `stream:`. Instead, use - # `#submit_tool_outputs_stream_raw` or `#submit_tool_outputs` for streaming and - # non-streaming use cases, respectively. + # `#submit_tool_outputs_stream_raw` or `#submit_tool_outputs` for streaming and + # non-streaming use cases, respectively. stream: true, request_options: {} ); end diff --git a/rbi/lib/openai/resources/beta/threads/runs/steps.rbi b/rbi/lib/openai/resources/beta/threads/runs/steps.rbi index 86935b35..949e7d14 100644 --- a/rbi/lib/openai/resources/beta/threads/runs/steps.rbi +++ b/rbi/lib/openai/resources/beta/threads/runs/steps.rbi @@ -25,13 +25,13 @@ module OpenAI # Path param: The ID of the run to which the run step belongs. run_id:, # Query param: A list of additional fields to include in the response. Currently - # the only supported value is - # `step_details.tool_calls[*].file_search.results[*].content` to fetch the file - # search result content. + # the only supported value is + # `step_details.tool_calls[*].file_search.results[*].content` to fetch the file + # search result content. # - # See the - # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) - # for more information. + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. include: nil, request_options: {} ); end @@ -55,29 +55,29 @@ module OpenAI # Path param: The ID of the thread the run and run steps belong to. thread_id:, # Query param: A cursor for use in pagination. `after` is an object ID that - # defines your place in the list. For instance, if you make a list request and - # receive 100 objects, ending with obj_foo, your subsequent call can include - # after=obj_foo in order to fetch the next page of the list. + # defines your place in the list. For instance, if you make a list request and + # receive 100 objects, ending with obj_foo, your subsequent call can include + # after=obj_foo in order to fetch the next page of the list. after: nil, # Query param: A cursor for use in pagination. `before` is an object ID that - # defines your place in the list. For instance, if you make a list request and - # receive 100 objects, starting with obj_foo, your subsequent call can include - # before=obj_foo in order to fetch the previous page of the list. + # defines your place in the list. For instance, if you make a list request and + # receive 100 objects, starting with obj_foo, your subsequent call can include + # before=obj_foo in order to fetch the previous page of the list. before: nil, # Query param: A list of additional fields to include in the response. Currently - # the only supported value is - # `step_details.tool_calls[*].file_search.results[*].content` to fetch the file - # search result content. + # the only supported value is + # `step_details.tool_calls[*].file_search.results[*].content` to fetch the file + # search result content. # - # See the - # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) - # for more information. + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. include: nil, # Query param: A limit on the number of objects to be returned. Limit can range - # between 1 and 100, and the default is 20. + # between 1 and 100, and the default is 20. limit: nil, # Query param: Sort order by the `created_at` timestamp of the objects. `asc` for - # ascending order and `desc` for descending order. + # ascending order and `desc` for descending order. order: nil, request_options: {} ); end diff --git a/rbi/lib/openai/resources/chat/completions.rbi b/rbi/lib/openai/resources/chat/completions.rbi index 34c8a36d..672459b2 100644 --- a/rbi/lib/openai/resources/chat/completions.rbi +++ b/rbi/lib/openai/resources/chat/completions.rbi @@ -9,23 +9,23 @@ module OpenAI # See {OpenAI::Resources::Chat::Completions#stream_raw} for streaming counterpart. # - # **Starting a new project?** We recommend trying - # [Responses](https://platform.openai.com/docs/api-reference/responses) to take - # advantage of the latest OpenAI platform features. Compare - # [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses). + # **Starting a new project?** We recommend trying + # [Responses](https://platform.openai.com/docs/api-reference/responses) to take + # advantage of the latest OpenAI platform features. Compare + # [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses). # - # --- + # --- # - # Creates a model response for the given chat conversation. Learn more in the - # [text generation](https://platform.openai.com/docs/guides/text-generation), - # [vision](https://platform.openai.com/docs/guides/vision), and - # [audio](https://platform.openai.com/docs/guides/audio) guides. + # Creates a model response for the given chat conversation. Learn more in the + # [text generation](https://platform.openai.com/docs/guides/text-generation), + # [vision](https://platform.openai.com/docs/guides/vision), and + # [audio](https://platform.openai.com/docs/guides/audio) guides. # - # Parameter support can differ depending on the model used to generate the - # response, particularly for newer reasoning models. Parameters that are only - # supported for reasoning models are noted below. For the current state of - # unsupported parameters in reasoning models, - # [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). + # Parameter support can differ depending on the model used to generate the + # response, particularly for newer reasoning models. Parameters that are only + # supported for reasoning models are noted below. For the current state of + # unsupported parameters in reasoning models, + # [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). sig do params( messages: T::Array[ @@ -88,212 +88,212 @@ module OpenAI end def create( # A list of messages comprising the conversation so far. Depending on the - # [model](https://platform.openai.com/docs/models) you use, different message - # types (modalities) are supported, like - # [text](https://platform.openai.com/docs/guides/text-generation), - # [images](https://platform.openai.com/docs/guides/vision), and - # [audio](https://platform.openai.com/docs/guides/audio). + # [model](https://platform.openai.com/docs/models) you use, different message + # types (modalities) are supported, like + # [text](https://platform.openai.com/docs/guides/text-generation), + # [images](https://platform.openai.com/docs/guides/vision), and + # [audio](https://platform.openai.com/docs/guides/audio). messages:, # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a - # wide range of models with different capabilities, performance characteristics, - # and price points. Refer to the - # [model guide](https://platform.openai.com/docs/models) to browse and compare - # available models. + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. model:, # Parameters for audio output. Required when audio output is requested with - # `modalities: ["audio"]`. - # [Learn more](https://platform.openai.com/docs/guides/audio). + # `modalities: ["audio"]`. + # [Learn more](https://platform.openai.com/docs/guides/audio). audio: nil, # Number between -2.0 and 2.0. Positive values penalize new tokens based on their - # existing frequency in the text so far, decreasing the model's likelihood to - # repeat the same line verbatim. + # existing frequency in the text so far, decreasing the model's likelihood to + # repeat the same line verbatim. frequency_penalty: nil, # Deprecated in favor of `tool_choice`. # - # Controls which (if any) function is called by the model. + # Controls which (if any) function is called by the model. # - # `none` means the model will not call a function and instead generates a message. + # `none` means the model will not call a function and instead generates a message. # - # `auto` means the model can pick between generating a message or calling a - # function. + # `auto` means the model can pick between generating a message or calling a + # function. # - # Specifying a particular function via `{"name": "my_function"}` forces the model - # to call that function. + # Specifying a particular function via `{"name": "my_function"}` forces the model + # to call that function. # - # `none` is the default when no functions are present. `auto` is the default if - # functions are present. + # `none` is the default when no functions are present. `auto` is the default if + # functions are present. function_call: nil, # Deprecated in favor of `tools`. # - # A list of functions the model may generate JSON inputs for. + # A list of functions the model may generate JSON inputs for. functions: nil, # Modify the likelihood of specified tokens appearing in the completion. # - # Accepts a JSON object that maps tokens (specified by their token ID in the - # tokenizer) to an associated bias value from -100 to 100. Mathematically, the - # bias is added to the logits generated by the model prior to sampling. The exact - # effect will vary per model, but values between -1 and 1 should decrease or - # increase likelihood of selection; values like -100 or 100 should result in a ban - # or exclusive selection of the relevant token. + # Accepts a JSON object that maps tokens (specified by their token ID in the + # tokenizer) to an associated bias value from -100 to 100. Mathematically, the + # bias is added to the logits generated by the model prior to sampling. The exact + # effect will vary per model, but values between -1 and 1 should decrease or + # increase likelihood of selection; values like -100 or 100 should result in a ban + # or exclusive selection of the relevant token. logit_bias: nil, # Whether to return log probabilities of the output tokens or not. If true, - # returns the log probabilities of each output token returned in the `content` of - # `message`. + # returns the log probabilities of each output token returned in the `content` of + # `message`. logprobs: nil, # An upper bound for the number of tokens that can be generated for a completion, - # including visible output tokens and - # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). max_completion_tokens: nil, # The maximum number of [tokens](/tokenizer) that can be generated in the chat - # completion. This value can be used to control - # [costs](https://openai.com/api/pricing/) for text generated via API. + # completion. This value can be used to control + # [costs](https://openai.com/api/pricing/) for text generated via API. # - # This value is now deprecated in favor of `max_completion_tokens`, and is not - # compatible with - # [o1 series models](https://platform.openai.com/docs/guides/reasoning). + # This value is now deprecated in favor of `max_completion_tokens`, and is not + # compatible with + # [o1 series models](https://platform.openai.com/docs/guides/reasoning). max_tokens: nil, # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, # Output types that you would like the model to generate. Most models are capable - # of generating text, which is the default: + # of generating text, which is the default: # - # `["text"]` + # `["text"]` # - # The `gpt-4o-audio-preview` model can also be used to - # [generate audio](https://platform.openai.com/docs/guides/audio). To request that - # this model generate both text and audio responses, you can use: + # The `gpt-4o-audio-preview` model can also be used to + # [generate audio](https://platform.openai.com/docs/guides/audio). To request that + # this model generate both text and audio responses, you can use: # - # `["text", "audio"]` + # `["text", "audio"]` modalities: nil, # How many chat completion choices to generate for each input message. Note that - # you will be charged based on the number of generated tokens across all of the - # choices. Keep `n` as `1` to minimize costs. + # you will be charged based on the number of generated tokens across all of the + # choices. Keep `n` as `1` to minimize costs. n: nil, # Whether to enable - # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) - # during tool use. + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. parallel_tool_calls: nil, # Static predicted output content, such as the content of a text file that is - # being regenerated. + # being regenerated. prediction: nil, # Number between -2.0 and 2.0. Positive values penalize new tokens based on - # whether they appear in the text so far, increasing the model's likelihood to - # talk about new topics. + # whether they appear in the text so far, increasing the model's likelihood to + # talk about new topics. presence_penalty: nil, # **o-series models only** # - # Constrains effort on reasoning for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. reasoning_effort: nil, # An object specifying the format that the model must output. # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # Setting to `{ "type": "json_object" }` enables the older JSON mode, which - # ensures the message the model generates is valid JSON. Using `json_schema` is - # preferred for models that support it. + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. response_format: nil, # This feature is in Beta. If specified, our system will make a best effort to - # sample deterministically, such that repeated requests with the same `seed` and - # parameters should return the same result. Determinism is not guaranteed, and you - # should refer to the `system_fingerprint` response parameter to monitor changes - # in the backend. + # sample deterministically, such that repeated requests with the same `seed` and + # parameters should return the same result. Determinism is not guaranteed, and you + # should refer to the `system_fingerprint` response parameter to monitor changes + # in the backend. seed: nil, # Specifies the latency tier to use for processing the request. This parameter is - # relevant for customers subscribed to the scale tier service: - # - # - If set to 'auto', and the Project is Scale tier enabled, the system will - # utilize scale tier credits until they are exhausted. - # - If set to 'auto', and the Project is not Scale tier enabled, the request will - # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. - # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. - # - When not set, the default behavior is 'auto'. - # - # When this parameter is set, the response body will include the `service_tier` - # utilized. + # relevant for customers subscribed to the scale tier service: + # + # - If set to 'auto', and the Project is Scale tier enabled, the system will + # utilize scale tier credits until they are exhausted. + # - If set to 'auto', and the Project is not Scale tier enabled, the request will + # be processed using the default service tier with a lower uptime SLA and no + # latency guarentee. + # - If set to 'default', the request will be processed using the default service + # tier with a lower uptime SLA and no latency guarentee. + # - When not set, the default behavior is 'auto'. + # + # When this parameter is set, the response body will include the `service_tier` + # utilized. service_tier: nil, # Up to 4 sequences where the API will stop generating further tokens. The - # returned text will not contain the stop sequence. + # returned text will not contain the stop sequence. stop: nil, # Whether or not to store the output of this chat completion request for use in - # our [model distillation](https://platform.openai.com/docs/guides/distillation) - # or [evals](https://platform.openai.com/docs/guides/evals) products. + # our [model distillation](https://platform.openai.com/docs/guides/distillation) + # or [evals](https://platform.openai.com/docs/guides/evals) products. store: nil, # Options for streaming response. Only set this when you set `stream: true`. stream_options: nil, # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. We generally recommend altering this or `top_p` but - # not both. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. temperature: nil, # Controls which (if any) tool is called by the model. `none` means the model will - # not call any tool and instead generates a message. `auto` means the model can - # pick between generating a message or calling one or more tools. `required` means - # the model must call one or more tools. Specifying a particular tool via - # `{"type": "function", "function": {"name": "my_function"}}` forces the model to - # call that tool. - # - # `none` is the default when no tools are present. `auto` is the default if tools - # are present. + # not call any tool and instead generates a message. `auto` means the model can + # pick between generating a message or calling one or more tools. `required` means + # the model must call one or more tools. Specifying a particular tool via + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. + # + # `none` is the default when no tools are present. `auto` is the default if tools + # are present. tool_choice: nil, # A list of tools the model may call. Currently, only functions are supported as a - # tool. Use this to provide a list of functions the model may generate JSON inputs - # for. A max of 128 functions are supported. + # tool. Use this to provide a list of functions the model may generate JSON inputs + # for. A max of 128 functions are supported. tools: nil, # An integer between 0 and 20 specifying the number of most likely tokens to - # return at each token position, each with an associated log probability. - # `logprobs` must be set to `true` if this parameter is used. + # return at each token position, each with an associated log probability. + # `logprobs` must be set to `true` if this parameter is used. top_logprobs: nil, # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or `temperature` but not both. + # We generally recommend altering this or `temperature` but not both. top_p: nil, # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, # This tool searches the web for relevant results to use in a response. Learn more - # about the - # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + # about the + # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). web_search_options: nil, # There is no need to provide `stream:`. Instead, use `#stream_raw` or `#create` - # for streaming and non-streaming use cases, respectively. + # for streaming and non-streaming use cases, respectively. stream: false, request_options: {} ); end # See {OpenAI::Resources::Chat::Completions#create} for non-streaming counterpart. # - # **Starting a new project?** We recommend trying - # [Responses](https://platform.openai.com/docs/api-reference/responses) to take - # advantage of the latest OpenAI platform features. Compare - # [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses). + # **Starting a new project?** We recommend trying + # [Responses](https://platform.openai.com/docs/api-reference/responses) to take + # advantage of the latest OpenAI platform features. Compare + # [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses). # - # --- + # --- # - # Creates a model response for the given chat conversation. Learn more in the - # [text generation](https://platform.openai.com/docs/guides/text-generation), - # [vision](https://platform.openai.com/docs/guides/vision), and - # [audio](https://platform.openai.com/docs/guides/audio) guides. + # Creates a model response for the given chat conversation. Learn more in the + # [text generation](https://platform.openai.com/docs/guides/text-generation), + # [vision](https://platform.openai.com/docs/guides/vision), and + # [audio](https://platform.openai.com/docs/guides/audio) guides. # - # Parameter support can differ depending on the model used to generate the - # response, particularly for newer reasoning models. Parameters that are only - # supported for reasoning models are noted below. For the current state of - # unsupported parameters in reasoning models, - # [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). + # Parameter support can differ depending on the model used to generate the + # response, particularly for newer reasoning models. Parameters that are only + # supported for reasoning models are noted below. For the current state of + # unsupported parameters in reasoning models, + # [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). sig do params( messages: T::Array[ @@ -356,195 +356,195 @@ module OpenAI end def stream_raw( # A list of messages comprising the conversation so far. Depending on the - # [model](https://platform.openai.com/docs/models) you use, different message - # types (modalities) are supported, like - # [text](https://platform.openai.com/docs/guides/text-generation), - # [images](https://platform.openai.com/docs/guides/vision), and - # [audio](https://platform.openai.com/docs/guides/audio). + # [model](https://platform.openai.com/docs/models) you use, different message + # types (modalities) are supported, like + # [text](https://platform.openai.com/docs/guides/text-generation), + # [images](https://platform.openai.com/docs/guides/vision), and + # [audio](https://platform.openai.com/docs/guides/audio). messages:, # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a - # wide range of models with different capabilities, performance characteristics, - # and price points. Refer to the - # [model guide](https://platform.openai.com/docs/models) to browse and compare - # available models. + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. model:, # Parameters for audio output. Required when audio output is requested with - # `modalities: ["audio"]`. - # [Learn more](https://platform.openai.com/docs/guides/audio). + # `modalities: ["audio"]`. + # [Learn more](https://platform.openai.com/docs/guides/audio). audio: nil, # Number between -2.0 and 2.0. Positive values penalize new tokens based on their - # existing frequency in the text so far, decreasing the model's likelihood to - # repeat the same line verbatim. + # existing frequency in the text so far, decreasing the model's likelihood to + # repeat the same line verbatim. frequency_penalty: nil, # Deprecated in favor of `tool_choice`. # - # Controls which (if any) function is called by the model. + # Controls which (if any) function is called by the model. # - # `none` means the model will not call a function and instead generates a message. + # `none` means the model will not call a function and instead generates a message. # - # `auto` means the model can pick between generating a message or calling a - # function. + # `auto` means the model can pick between generating a message or calling a + # function. # - # Specifying a particular function via `{"name": "my_function"}` forces the model - # to call that function. + # Specifying a particular function via `{"name": "my_function"}` forces the model + # to call that function. # - # `none` is the default when no functions are present. `auto` is the default if - # functions are present. + # `none` is the default when no functions are present. `auto` is the default if + # functions are present. function_call: nil, # Deprecated in favor of `tools`. # - # A list of functions the model may generate JSON inputs for. + # A list of functions the model may generate JSON inputs for. functions: nil, # Modify the likelihood of specified tokens appearing in the completion. # - # Accepts a JSON object that maps tokens (specified by their token ID in the - # tokenizer) to an associated bias value from -100 to 100. Mathematically, the - # bias is added to the logits generated by the model prior to sampling. The exact - # effect will vary per model, but values between -1 and 1 should decrease or - # increase likelihood of selection; values like -100 or 100 should result in a ban - # or exclusive selection of the relevant token. + # Accepts a JSON object that maps tokens (specified by their token ID in the + # tokenizer) to an associated bias value from -100 to 100. Mathematically, the + # bias is added to the logits generated by the model prior to sampling. The exact + # effect will vary per model, but values between -1 and 1 should decrease or + # increase likelihood of selection; values like -100 or 100 should result in a ban + # or exclusive selection of the relevant token. logit_bias: nil, # Whether to return log probabilities of the output tokens or not. If true, - # returns the log probabilities of each output token returned in the `content` of - # `message`. + # returns the log probabilities of each output token returned in the `content` of + # `message`. logprobs: nil, # An upper bound for the number of tokens that can be generated for a completion, - # including visible output tokens and - # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). max_completion_tokens: nil, # The maximum number of [tokens](/tokenizer) that can be generated in the chat - # completion. This value can be used to control - # [costs](https://openai.com/api/pricing/) for text generated via API. + # completion. This value can be used to control + # [costs](https://openai.com/api/pricing/) for text generated via API. # - # This value is now deprecated in favor of `max_completion_tokens`, and is not - # compatible with - # [o1 series models](https://platform.openai.com/docs/guides/reasoning). + # This value is now deprecated in favor of `max_completion_tokens`, and is not + # compatible with + # [o1 series models](https://platform.openai.com/docs/guides/reasoning). max_tokens: nil, # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, # Output types that you would like the model to generate. Most models are capable - # of generating text, which is the default: + # of generating text, which is the default: # - # `["text"]` + # `["text"]` # - # The `gpt-4o-audio-preview` model can also be used to - # [generate audio](https://platform.openai.com/docs/guides/audio). To request that - # this model generate both text and audio responses, you can use: + # The `gpt-4o-audio-preview` model can also be used to + # [generate audio](https://platform.openai.com/docs/guides/audio). To request that + # this model generate both text and audio responses, you can use: # - # `["text", "audio"]` + # `["text", "audio"]` modalities: nil, # How many chat completion choices to generate for each input message. Note that - # you will be charged based on the number of generated tokens across all of the - # choices. Keep `n` as `1` to minimize costs. + # you will be charged based on the number of generated tokens across all of the + # choices. Keep `n` as `1` to minimize costs. n: nil, # Whether to enable - # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) - # during tool use. + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. parallel_tool_calls: nil, # Static predicted output content, such as the content of a text file that is - # being regenerated. + # being regenerated. prediction: nil, # Number between -2.0 and 2.0. Positive values penalize new tokens based on - # whether they appear in the text so far, increasing the model's likelihood to - # talk about new topics. + # whether they appear in the text so far, increasing the model's likelihood to + # talk about new topics. presence_penalty: nil, # **o-series models only** # - # Constrains effort on reasoning for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. reasoning_effort: nil, # An object specifying the format that the model must output. # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # Setting to `{ "type": "json_object" }` enables the older JSON mode, which - # ensures the message the model generates is valid JSON. Using `json_schema` is - # preferred for models that support it. + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. response_format: nil, # This feature is in Beta. If specified, our system will make a best effort to - # sample deterministically, such that repeated requests with the same `seed` and - # parameters should return the same result. Determinism is not guaranteed, and you - # should refer to the `system_fingerprint` response parameter to monitor changes - # in the backend. + # sample deterministically, such that repeated requests with the same `seed` and + # parameters should return the same result. Determinism is not guaranteed, and you + # should refer to the `system_fingerprint` response parameter to monitor changes + # in the backend. seed: nil, # Specifies the latency tier to use for processing the request. This parameter is - # relevant for customers subscribed to the scale tier service: - # - # - If set to 'auto', and the Project is Scale tier enabled, the system will - # utilize scale tier credits until they are exhausted. - # - If set to 'auto', and the Project is not Scale tier enabled, the request will - # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. - # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. - # - When not set, the default behavior is 'auto'. - # - # When this parameter is set, the response body will include the `service_tier` - # utilized. + # relevant for customers subscribed to the scale tier service: + # + # - If set to 'auto', and the Project is Scale tier enabled, the system will + # utilize scale tier credits until they are exhausted. + # - If set to 'auto', and the Project is not Scale tier enabled, the request will + # be processed using the default service tier with a lower uptime SLA and no + # latency guarentee. + # - If set to 'default', the request will be processed using the default service + # tier with a lower uptime SLA and no latency guarentee. + # - When not set, the default behavior is 'auto'. + # + # When this parameter is set, the response body will include the `service_tier` + # utilized. service_tier: nil, # Up to 4 sequences where the API will stop generating further tokens. The - # returned text will not contain the stop sequence. + # returned text will not contain the stop sequence. stop: nil, # Whether or not to store the output of this chat completion request for use in - # our [model distillation](https://platform.openai.com/docs/guides/distillation) - # or [evals](https://platform.openai.com/docs/guides/evals) products. + # our [model distillation](https://platform.openai.com/docs/guides/distillation) + # or [evals](https://platform.openai.com/docs/guides/evals) products. store: nil, # Options for streaming response. Only set this when you set `stream: true`. stream_options: nil, # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. We generally recommend altering this or `top_p` but - # not both. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. temperature: nil, # Controls which (if any) tool is called by the model. `none` means the model will - # not call any tool and instead generates a message. `auto` means the model can - # pick between generating a message or calling one or more tools. `required` means - # the model must call one or more tools. Specifying a particular tool via - # `{"type": "function", "function": {"name": "my_function"}}` forces the model to - # call that tool. - # - # `none` is the default when no tools are present. `auto` is the default if tools - # are present. + # not call any tool and instead generates a message. `auto` means the model can + # pick between generating a message or calling one or more tools. `required` means + # the model must call one or more tools. Specifying a particular tool via + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. + # + # `none` is the default when no tools are present. `auto` is the default if tools + # are present. tool_choice: nil, # A list of tools the model may call. Currently, only functions are supported as a - # tool. Use this to provide a list of functions the model may generate JSON inputs - # for. A max of 128 functions are supported. + # tool. Use this to provide a list of functions the model may generate JSON inputs + # for. A max of 128 functions are supported. tools: nil, # An integer between 0 and 20 specifying the number of most likely tokens to - # return at each token position, each with an associated log probability. - # `logprobs` must be set to `true` if this parameter is used. + # return at each token position, each with an associated log probability. + # `logprobs` must be set to `true` if this parameter is used. top_logprobs: nil, # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or `temperature` but not both. + # We generally recommend altering this or `temperature` but not both. top_p: nil, # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, # This tool searches the web for relevant results to use in a response. Learn more - # about the - # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + # about the + # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). web_search_options: nil, # There is no need to provide `stream:`. Instead, use `#stream_raw` or `#create` - # for streaming and non-streaming use cases, respectively. + # for streaming and non-streaming use cases, respectively. stream: true, request_options: {} ); end # Get a stored chat completion. Only Chat Completions that have been created with - # the `store` parameter set to `true` will be returned. + # the `store` parameter set to `true` will be returned. sig do params( completion_id: String, @@ -558,8 +558,8 @@ module OpenAI request_options: {} ); end # Modify a stored chat completion. Only Chat Completions that have been created - # with the `store` parameter set to `true` can be modified. Currently, the only - # supported modification is to update the `metadata` field. + # with the `store` parameter set to `true` can be modified. Currently, the only + # supported modification is to update the `metadata` field. sig do params( completion_id: String, @@ -572,16 +572,16 @@ module OpenAI # The ID of the chat completion to update. completion_id, # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata:, request_options: {} ); end # List stored Chat Completions. Only Chat Completions that have been stored with - # the `store` parameter set to `true` will be returned. + # the `store` parameter set to `true` will be returned. sig do params( after: String, @@ -600,17 +600,17 @@ module OpenAI limit: nil, # A list of metadata keys to filter the Chat Completions by. Example: # - # `metadata[key1]=value1&metadata[key2]=value2` + # `metadata[key1]=value1&metadata[key2]=value2` metadata: nil, # The model used to generate the Chat Completions. model: nil, # Sort order for Chat Completions by timestamp. Use `asc` for ascending order or - # `desc` for descending order. Defaults to `asc`. + # `desc` for descending order. Defaults to `asc`. order: nil, request_options: {} ); end # Delete a stored chat completion. Only Chat Completions that have been created - # with the `store` parameter set to `true` can be deleted. + # with the `store` parameter set to `true` can be deleted. sig do params( completion_id: String, diff --git a/rbi/lib/openai/resources/chat/completions/messages.rbi b/rbi/lib/openai/resources/chat/completions/messages.rbi index bf8cde9f..53d6538e 100644 --- a/rbi/lib/openai/resources/chat/completions/messages.rbi +++ b/rbi/lib/openai/resources/chat/completions/messages.rbi @@ -6,7 +6,7 @@ module OpenAI class Completions class Messages # Get the messages in a stored chat completion. Only Chat Completions that have - # been created with the `store` parameter set to `true` will be returned. + # been created with the `store` parameter set to `true` will be returned. sig do params( completion_id: String, @@ -25,7 +25,7 @@ module OpenAI # Number of messages to retrieve. limit: nil, # Sort order for messages by timestamp. Use `asc` for ascending order or `desc` - # for descending order. Defaults to `asc`. + # for descending order. Defaults to `asc`. order: nil, request_options: {} ); end diff --git a/rbi/lib/openai/resources/completions.rbi b/rbi/lib/openai/resources/completions.rbi index a91aba7f..f57faf40 100644 --- a/rbi/lib/openai/resources/completions.rbi +++ b/rbi/lib/openai/resources/completions.rbi @@ -5,7 +5,7 @@ module OpenAI class Completions # See {OpenAI::Resources::Completions#create_streaming} for streaming counterpart. # - # Creates a completion for the provided prompt and parameters. + # Creates a completion for the provided prompt and parameters. sig do params( model: T.any(String, OpenAI::Models::CompletionCreateParams::Model::OrSymbol), @@ -32,116 +32,116 @@ module OpenAI end def create( # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. model:, # The prompt(s) to generate completions for, encoded as a string, array of - # strings, array of tokens, or array of token arrays. + # strings, array of tokens, or array of token arrays. # - # Note that <|endoftext|> is the document separator that the model sees during - # training, so if a prompt is not specified the model will generate as if from the - # beginning of a new document. + # Note that <|endoftext|> is the document separator that the model sees during + # training, so if a prompt is not specified the model will generate as if from the + # beginning of a new document. prompt:, # Generates `best_of` completions server-side and returns the "best" (the one with - # the highest log probability per token). Results cannot be streamed. + # the highest log probability per token). Results cannot be streamed. # - # When used with `n`, `best_of` controls the number of candidate completions and - # `n` specifies how many to return – `best_of` must be greater than `n`. + # When used with `n`, `best_of` controls the number of candidate completions and + # `n` specifies how many to return – `best_of` must be greater than `n`. # - # **Note:** Because this parameter generates many completions, it can quickly - # consume your token quota. Use carefully and ensure that you have reasonable - # settings for `max_tokens` and `stop`. + # **Note:** Because this parameter generates many completions, it can quickly + # consume your token quota. Use carefully and ensure that you have reasonable + # settings for `max_tokens` and `stop`. best_of: nil, # Echo back the prompt in addition to the completion echo: nil, # Number between -2.0 and 2.0. Positive values penalize new tokens based on their - # existing frequency in the text so far, decreasing the model's likelihood to - # repeat the same line verbatim. + # existing frequency in the text so far, decreasing the model's likelihood to + # repeat the same line verbatim. # - # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) + # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) frequency_penalty: nil, # Modify the likelihood of specified tokens appearing in the completion. # - # Accepts a JSON object that maps tokens (specified by their token ID in the GPT - # tokenizer) to an associated bias value from -100 to 100. You can use this - # [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. - # Mathematically, the bias is added to the logits generated by the model prior to - # sampling. The exact effect will vary per model, but values between -1 and 1 - # should decrease or increase likelihood of selection; values like -100 or 100 - # should result in a ban or exclusive selection of the relevant token. + # Accepts a JSON object that maps tokens (specified by their token ID in the GPT + # tokenizer) to an associated bias value from -100 to 100. You can use this + # [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. + # Mathematically, the bias is added to the logits generated by the model prior to + # sampling. The exact effect will vary per model, but values between -1 and 1 + # should decrease or increase likelihood of selection; values like -100 or 100 + # should result in a ban or exclusive selection of the relevant token. # - # As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token - # from being generated. + # As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token + # from being generated. logit_bias: nil, # Include the log probabilities on the `logprobs` most likely output tokens, as - # well the chosen tokens. For example, if `logprobs` is 5, the API will return a - # list of the 5 most likely tokens. The API will always return the `logprob` of - # the sampled token, so there may be up to `logprobs+1` elements in the response. + # well the chosen tokens. For example, if `logprobs` is 5, the API will return a + # list of the 5 most likely tokens. The API will always return the `logprob` of + # the sampled token, so there may be up to `logprobs+1` elements in the response. # - # The maximum value for `logprobs` is 5. + # The maximum value for `logprobs` is 5. logprobs: nil, # The maximum number of [tokens](/tokenizer) that can be generated in the - # completion. + # completion. # - # The token count of your prompt plus `max_tokens` cannot exceed the model's - # context length. - # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - # for counting tokens. + # The token count of your prompt plus `max_tokens` cannot exceed the model's + # context length. + # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + # for counting tokens. max_tokens: nil, # How many completions to generate for each prompt. # - # **Note:** Because this parameter generates many completions, it can quickly - # consume your token quota. Use carefully and ensure that you have reasonable - # settings for `max_tokens` and `stop`. + # **Note:** Because this parameter generates many completions, it can quickly + # consume your token quota. Use carefully and ensure that you have reasonable + # settings for `max_tokens` and `stop`. n: nil, # Number between -2.0 and 2.0. Positive values penalize new tokens based on - # whether they appear in the text so far, increasing the model's likelihood to - # talk about new topics. + # whether they appear in the text so far, increasing the model's likelihood to + # talk about new topics. # - # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) + # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) presence_penalty: nil, # If specified, our system will make a best effort to sample deterministically, - # such that repeated requests with the same `seed` and parameters should return - # the same result. + # such that repeated requests with the same `seed` and parameters should return + # the same result. # - # Determinism is not guaranteed, and you should refer to the `system_fingerprint` - # response parameter to monitor changes in the backend. + # Determinism is not guaranteed, and you should refer to the `system_fingerprint` + # response parameter to monitor changes in the backend. seed: nil, # Up to 4 sequences where the API will stop generating further tokens. The - # returned text will not contain the stop sequence. + # returned text will not contain the stop sequence. stop: nil, # Options for streaming response. Only set this when you set `stream: true`. stream_options: nil, # The suffix that comes after a completion of inserted text. # - # This parameter is only supported for `gpt-3.5-turbo-instruct`. + # This parameter is only supported for `gpt-3.5-turbo-instruct`. suffix: nil, # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. # - # We generally recommend altering this or `top_p` but not both. + # We generally recommend altering this or `top_p` but not both. temperature: nil, # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or `temperature` but not both. + # We generally recommend altering this or `temperature` but not both. top_p: nil, # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, # There is no need to provide `stream:`. Instead, use `#create_streaming` or - # `#create` for streaming and non-streaming use cases, respectively. + # `#create` for streaming and non-streaming use cases, respectively. stream: false, request_options: {} ); end # See {OpenAI::Resources::Completions#create} for non-streaming counterpart. # - # Creates a completion for the provided prompt and parameters. + # Creates a completion for the provided prompt and parameters. sig do params( model: T.any(String, OpenAI::Models::CompletionCreateParams::Model::OrSymbol), @@ -168,110 +168,110 @@ module OpenAI end def create_streaming( # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. model:, # The prompt(s) to generate completions for, encoded as a string, array of - # strings, array of tokens, or array of token arrays. + # strings, array of tokens, or array of token arrays. # - # Note that <|endoftext|> is the document separator that the model sees during - # training, so if a prompt is not specified the model will generate as if from the - # beginning of a new document. + # Note that <|endoftext|> is the document separator that the model sees during + # training, so if a prompt is not specified the model will generate as if from the + # beginning of a new document. prompt:, # Generates `best_of` completions server-side and returns the "best" (the one with - # the highest log probability per token). Results cannot be streamed. + # the highest log probability per token). Results cannot be streamed. # - # When used with `n`, `best_of` controls the number of candidate completions and - # `n` specifies how many to return – `best_of` must be greater than `n`. + # When used with `n`, `best_of` controls the number of candidate completions and + # `n` specifies how many to return – `best_of` must be greater than `n`. # - # **Note:** Because this parameter generates many completions, it can quickly - # consume your token quota. Use carefully and ensure that you have reasonable - # settings for `max_tokens` and `stop`. + # **Note:** Because this parameter generates many completions, it can quickly + # consume your token quota. Use carefully and ensure that you have reasonable + # settings for `max_tokens` and `stop`. best_of: nil, # Echo back the prompt in addition to the completion echo: nil, # Number between -2.0 and 2.0. Positive values penalize new tokens based on their - # existing frequency in the text so far, decreasing the model's likelihood to - # repeat the same line verbatim. + # existing frequency in the text so far, decreasing the model's likelihood to + # repeat the same line verbatim. # - # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) + # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) frequency_penalty: nil, # Modify the likelihood of specified tokens appearing in the completion. # - # Accepts a JSON object that maps tokens (specified by their token ID in the GPT - # tokenizer) to an associated bias value from -100 to 100. You can use this - # [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. - # Mathematically, the bias is added to the logits generated by the model prior to - # sampling. The exact effect will vary per model, but values between -1 and 1 - # should decrease or increase likelihood of selection; values like -100 or 100 - # should result in a ban or exclusive selection of the relevant token. + # Accepts a JSON object that maps tokens (specified by their token ID in the GPT + # tokenizer) to an associated bias value from -100 to 100. You can use this + # [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. + # Mathematically, the bias is added to the logits generated by the model prior to + # sampling. The exact effect will vary per model, but values between -1 and 1 + # should decrease or increase likelihood of selection; values like -100 or 100 + # should result in a ban or exclusive selection of the relevant token. # - # As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token - # from being generated. + # As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token + # from being generated. logit_bias: nil, # Include the log probabilities on the `logprobs` most likely output tokens, as - # well the chosen tokens. For example, if `logprobs` is 5, the API will return a - # list of the 5 most likely tokens. The API will always return the `logprob` of - # the sampled token, so there may be up to `logprobs+1` elements in the response. + # well the chosen tokens. For example, if `logprobs` is 5, the API will return a + # list of the 5 most likely tokens. The API will always return the `logprob` of + # the sampled token, so there may be up to `logprobs+1` elements in the response. # - # The maximum value for `logprobs` is 5. + # The maximum value for `logprobs` is 5. logprobs: nil, # The maximum number of [tokens](/tokenizer) that can be generated in the - # completion. + # completion. # - # The token count of your prompt plus `max_tokens` cannot exceed the model's - # context length. - # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - # for counting tokens. + # The token count of your prompt plus `max_tokens` cannot exceed the model's + # context length. + # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + # for counting tokens. max_tokens: nil, # How many completions to generate for each prompt. # - # **Note:** Because this parameter generates many completions, it can quickly - # consume your token quota. Use carefully and ensure that you have reasonable - # settings for `max_tokens` and `stop`. + # **Note:** Because this parameter generates many completions, it can quickly + # consume your token quota. Use carefully and ensure that you have reasonable + # settings for `max_tokens` and `stop`. n: nil, # Number between -2.0 and 2.0. Positive values penalize new tokens based on - # whether they appear in the text so far, increasing the model's likelihood to - # talk about new topics. + # whether they appear in the text so far, increasing the model's likelihood to + # talk about new topics. # - # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) + # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) presence_penalty: nil, # If specified, our system will make a best effort to sample deterministically, - # such that repeated requests with the same `seed` and parameters should return - # the same result. + # such that repeated requests with the same `seed` and parameters should return + # the same result. # - # Determinism is not guaranteed, and you should refer to the `system_fingerprint` - # response parameter to monitor changes in the backend. + # Determinism is not guaranteed, and you should refer to the `system_fingerprint` + # response parameter to monitor changes in the backend. seed: nil, # Up to 4 sequences where the API will stop generating further tokens. The - # returned text will not contain the stop sequence. + # returned text will not contain the stop sequence. stop: nil, # Options for streaming response. Only set this when you set `stream: true`. stream_options: nil, # The suffix that comes after a completion of inserted text. # - # This parameter is only supported for `gpt-3.5-turbo-instruct`. + # This parameter is only supported for `gpt-3.5-turbo-instruct`. suffix: nil, # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. # - # We generally recommend altering this or `top_p` but not both. + # We generally recommend altering this or `top_p` but not both. temperature: nil, # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or `temperature` but not both. + # We generally recommend altering this or `temperature` but not both. top_p: nil, # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, # There is no need to provide `stream:`. Instead, use `#create_streaming` or - # `#create` for streaming and non-streaming use cases, respectively. + # `#create` for streaming and non-streaming use cases, respectively. stream: true, request_options: {} ); end diff --git a/rbi/lib/openai/resources/embeddings.rbi b/rbi/lib/openai/resources/embeddings.rbi index f6e4ac51..ae6d042f 100644 --- a/rbi/lib/openai/resources/embeddings.rbi +++ b/rbi/lib/openai/resources/embeddings.rbi @@ -17,29 +17,29 @@ module OpenAI end def create( # Input text to embed, encoded as a string or array of tokens. To embed multiple - # inputs in a single request, pass an array of strings or array of token arrays. - # The input must not exceed the max input tokens for the model (8192 tokens for - # `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 - # dimensions or less. - # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - # for counting tokens. Some models may also impose a limit on total number of - # tokens summed across inputs. + # inputs in a single request, pass an array of strings or array of token arrays. + # The input must not exceed the max input tokens for the model (8192 tokens for + # `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 + # dimensions or less. + # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + # for counting tokens. Some models may also impose a limit on total number of + # tokens summed across inputs. input:, # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. model:, # The number of dimensions the resulting output embeddings should have. Only - # supported in `text-embedding-3` and later models. + # supported in `text-embedding-3` and later models. dimensions: nil, # The format to return the embeddings in. Can be either `float` or - # [`base64`](https://pypi.org/project/pybase64/). + # [`base64`](https://pypi.org/project/pybase64/). encoding_format: nil, # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, request_options: {} ); end diff --git a/rbi/lib/openai/resources/files.rbi b/rbi/lib/openai/resources/files.rbi index 56bf2af9..482ed085 100644 --- a/rbi/lib/openai/resources/files.rbi +++ b/rbi/lib/openai/resources/files.rbi @@ -4,26 +4,26 @@ module OpenAI module Resources class Files # Upload a file that can be used across various endpoints. Individual files can be - # up to 512 MB, and the size of all files uploaded by one organization can be up - # to 100 GB. + # up to 512 MB, and the size of all files uploaded by one organization can be up + # to 100 GB. # - # The Assistants API supports files up to 2 million tokens and of specific file - # types. See the - # [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for - # details. + # The Assistants API supports files up to 2 million tokens and of specific file + # types. See the + # [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for + # details. # - # The Fine-tuning API only supports `.jsonl` files. The input also has certain - # required formats for fine-tuning - # [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or - # [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) - # models. + # The Fine-tuning API only supports `.jsonl` files. The input also has certain + # required formats for fine-tuning + # [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + # [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + # models. # - # The Batch API only supports `.jsonl` files up to 200 MB in size. The input also - # has a specific required - # [format](https://platform.openai.com/docs/api-reference/batch/request-input). + # The Batch API only supports `.jsonl` files up to 200 MB in size. The input also + # has a specific required + # [format](https://platform.openai.com/docs/api-reference/batch/request-input). # - # Please [contact us](https://help.openai.com/) if you need to increase these - # storage limits. + # Please [contact us](https://help.openai.com/) if you need to increase these + # storage limits. sig do params( file: T.any(IO, StringIO), @@ -36,9 +36,9 @@ module OpenAI # The File object (not file name) to be uploaded. file:, # The intended purpose of the uploaded file. One of: - `assistants`: Used in the - # Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for - # fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: - # Flexible file type for any purpose - `evals`: Used for eval data sets + # Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for + # fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: + # Flexible file type for any purpose - `evals`: Used for eval data sets purpose:, request_options: {} ); end @@ -68,15 +68,15 @@ module OpenAI end def list( # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. after: nil, # A limit on the number of objects to be returned. Limit can range between 1 and - # 10,000, and the default is 10,000. + # 10,000, and the default is 10,000. limit: nil, # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. order: nil, # Only return files with the given purpose. purpose: nil, diff --git a/rbi/lib/openai/resources/fine_tuning/jobs.rbi b/rbi/lib/openai/resources/fine_tuning/jobs.rbi index 90b307a0..b39d1d85 100644 --- a/rbi/lib/openai/resources/fine_tuning/jobs.rbi +++ b/rbi/lib/openai/resources/fine_tuning/jobs.rbi @@ -8,12 +8,12 @@ module OpenAI attr_reader :checkpoints # Creates a fine-tuning job which begins the process of creating a new model from - # a given dataset. + # a given dataset. # - # Response includes details of the enqueued job including job status and the name - # of the fine-tuned models once complete. + # Response includes details of the enqueued job including job status and the name + # of the fine-tuned models once complete. # - # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) + # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) sig do params( model: T.any(String, OpenAI::Models::FineTuning::JobCreateParams::Model::OrSymbol), @@ -33,68 +33,68 @@ module OpenAI end def create( # The name of the model to fine-tune. You can select one of the - # [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). + # [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). model:, # The ID of an uploaded file that contains training data. # - # See [upload file](https://platform.openai.com/docs/api-reference/files/create) - # for how to upload a file. + # See [upload file](https://platform.openai.com/docs/api-reference/files/create) + # for how to upload a file. # - # Your dataset must be formatted as a JSONL file. Additionally, you must upload - # your file with the purpose `fine-tune`. + # Your dataset must be formatted as a JSONL file. Additionally, you must upload + # your file with the purpose `fine-tune`. # - # The contents of the file should differ depending on if the model uses the - # [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input), - # [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) - # format, or if the fine-tuning method uses the - # [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) - # format. + # The contents of the file should differ depending on if the model uses the + # [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input), + # [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + # format, or if the fine-tuning method uses the + # [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) + # format. # - # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) - # for more details. + # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) + # for more details. training_file:, # The hyperparameters used for the fine-tuning job. This value is now deprecated - # in favor of `method`, and should be passed in under the `method` parameter. + # in favor of `method`, and should be passed in under the `method` parameter. hyperparameters: nil, # A list of integrations to enable for your fine-tuning job. integrations: nil, # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, # The method used for fine-tuning. method_: nil, # The seed controls the reproducibility of the job. Passing in the same seed and - # job parameters should produce the same results, but may differ in rare cases. If - # a seed is not specified, one will be generated for you. + # job parameters should produce the same results, but may differ in rare cases. If + # a seed is not specified, one will be generated for you. seed: nil, # A string of up to 64 characters that will be added to your fine-tuned model - # name. + # name. # - # For example, a `suffix` of "custom-model-name" would produce a model name like - # `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. + # For example, a `suffix` of "custom-model-name" would produce a model name like + # `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. suffix: nil, # The ID of an uploaded file that contains validation data. # - # If you provide this file, the data is used to generate validation metrics - # periodically during fine-tuning. These metrics can be viewed in the fine-tuning - # results file. The same data should not be present in both train and validation - # files. + # If you provide this file, the data is used to generate validation metrics + # periodically during fine-tuning. These metrics can be viewed in the fine-tuning + # results file. The same data should not be present in both train and validation + # files. # - # Your dataset must be formatted as a JSONL file. You must upload your file with - # the purpose `fine-tune`. + # Your dataset must be formatted as a JSONL file. You must upload your file with + # the purpose `fine-tune`. # - # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) - # for more details. + # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) + # for more details. validation_file: nil, request_options: {} ); end # Get info about a fine-tuning job. # - # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) + # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) sig do params( fine_tuning_job_id: String, @@ -123,7 +123,7 @@ module OpenAI # Number of fine-tuning jobs to retrieve. limit: nil, # Optional metadata filter. To filter, use the syntax `metadata[k]=v`. - # Alternatively, set `metadata=null` to indicate no metadata. + # Alternatively, set `metadata=null` to indicate no metadata. metadata: nil, request_options: {} ); end diff --git a/rbi/lib/openai/resources/images.rbi b/rbi/lib/openai/resources/images.rbi index 7b0c9a45..79a97ac3 100644 --- a/rbi/lib/openai/resources/images.rbi +++ b/rbi/lib/openai/resources/images.rbi @@ -18,24 +18,24 @@ module OpenAI end def create_variation( # The image to use as the basis for the variation(s). Must be a valid PNG file, - # less than 4MB, and square. + # less than 4MB, and square. image:, # The model to use for image generation. Only `dall-e-2` is supported at this - # time. + # time. model: nil, # The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only - # `n=1` is supported. + # `n=1` is supported. n: nil, # The format in which the generated images are returned. Must be one of `url` or - # `b64_json`. URLs are only valid for 60 minutes after the image has been - # generated. + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. response_format: nil, # The size of the generated images. Must be one of `256x256`, `512x512`, or - # `1024x1024`. + # `1024x1024`. size: nil, # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, request_options: {} ); end @@ -56,30 +56,30 @@ module OpenAI end def edit( # The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask - # is not provided, image must have transparency, which will be used as the mask. + # is not provided, image must have transparency, which will be used as the mask. image:, # A text description of the desired image(s). The maximum length is 1000 - # characters. + # characters. prompt:, # An additional image whose fully transparent areas (e.g. where alpha is zero) - # indicate where `image` should be edited. Must be a valid PNG file, less than - # 4MB, and have the same dimensions as `image`. + # indicate where `image` should be edited. Must be a valid PNG file, less than + # 4MB, and have the same dimensions as `image`. mask: nil, # The model to use for image generation. Only `dall-e-2` is supported at this - # time. + # time. model: nil, # The number of images to generate. Must be between 1 and 10. n: nil, # The format in which the generated images are returned. Must be one of `url` or - # `b64_json`. URLs are only valid for 60 minutes after the image has been - # generated. + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. response_format: nil, # The size of the generated images. Must be one of `256x256`, `512x512`, or - # `1024x1024`. + # `1024x1024`. size: nil, # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, request_options: {} ); end @@ -100,33 +100,33 @@ module OpenAI end def generate( # A text description of the desired image(s). The maximum length is 1000 - # characters for `dall-e-2` and 4000 characters for `dall-e-3`. + # characters for `dall-e-2` and 4000 characters for `dall-e-3`. prompt:, # The model to use for image generation. model: nil, # The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only - # `n=1` is supported. + # `n=1` is supported. n: nil, # The quality of the image that will be generated. `hd` creates images with finer - # details and greater consistency across the image. This param is only supported - # for `dall-e-3`. + # details and greater consistency across the image. This param is only supported + # for `dall-e-3`. quality: nil, # The format in which the generated images are returned. Must be one of `url` or - # `b64_json`. URLs are only valid for 60 minutes after the image has been - # generated. + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. response_format: nil, # The size of the generated images. Must be one of `256x256`, `512x512`, or - # `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or - # `1024x1792` for `dall-e-3` models. + # `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or + # `1024x1792` for `dall-e-3` models. size: nil, # The style of the generated images. Must be one of `vivid` or `natural`. Vivid - # causes the model to lean towards generating hyper-real and dramatic images. - # Natural causes the model to produce more natural, less hyper-real looking - # images. This param is only supported for `dall-e-3`. + # causes the model to lean towards generating hyper-real and dramatic images. + # Natural causes the model to produce more natural, less hyper-real looking + # images. This param is only supported for `dall-e-3`. style: nil, # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, request_options: {} ); end diff --git a/rbi/lib/openai/resources/models.rbi b/rbi/lib/openai/resources/models.rbi index 58a63411..252a1dca 100644 --- a/rbi/lib/openai/resources/models.rbi +++ b/rbi/lib/openai/resources/models.rbi @@ -4,7 +4,7 @@ module OpenAI module Resources class Models # Retrieves a model instance, providing basic information about the model such as - # the owner and permissioning. + # the owner and permissioning. sig do params( model: String, @@ -18,7 +18,7 @@ module OpenAI request_options: {} ); end # Lists the currently available models, and provides basic information about each - # one such as the owner and availability. + # one such as the owner and availability. sig do params(request_options: T.nilable(T.any(OpenAI::RequestOptions, OpenAI::Internal::AnyHash))) .returns(OpenAI::Internal::Page[OpenAI::Models::Model]) @@ -26,7 +26,7 @@ module OpenAI def list(request_options: {}); end # Delete a fine-tuned model. You must have the Owner role in your organization to - # delete a model. + # delete a model. sig do params( model: String, diff --git a/rbi/lib/openai/resources/moderations.rbi b/rbi/lib/openai/resources/moderations.rbi index 9e47411a..b068d8e7 100644 --- a/rbi/lib/openai/resources/moderations.rbi +++ b/rbi/lib/openai/resources/moderations.rbi @@ -4,7 +4,7 @@ module OpenAI module Resources class Moderations # Classifies if text and/or image inputs are potentially harmful. Learn more in - # the [moderation guide](https://platform.openai.com/docs/guides/moderation). + # the [moderation guide](https://platform.openai.com/docs/guides/moderation). sig do params( input: T.any( @@ -25,12 +25,12 @@ module OpenAI end def create( # Input (or inputs) to classify. Can be a single string, an array of strings, or - # an array of multi-modal input objects similar to other models. + # an array of multi-modal input objects similar to other models. input:, # The content moderation model you would like to use. Learn more in - # [the moderation guide](https://platform.openai.com/docs/guides/moderation), and - # learn about available models - # [here](https://platform.openai.com/docs/models#moderation). + # [the moderation guide](https://platform.openai.com/docs/guides/moderation), and + # learn about available models + # [here](https://platform.openai.com/docs/models#moderation). model: nil, request_options: {} ); end diff --git a/rbi/lib/openai/resources/responses.rbi b/rbi/lib/openai/resources/responses.rbi index 8fb89354..a8108266 100644 --- a/rbi/lib/openai/resources/responses.rbi +++ b/rbi/lib/openai/resources/responses.rbi @@ -8,17 +8,17 @@ module OpenAI # See {OpenAI::Resources::Responses#stream_raw} for streaming counterpart. # - # Creates a model response. Provide - # [text](https://platform.openai.com/docs/guides/text) or - # [image](https://platform.openai.com/docs/guides/images) inputs to generate - # [text](https://platform.openai.com/docs/guides/text) or - # [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have - # the model call your own - # [custom code](https://platform.openai.com/docs/guides/function-calling) or use - # built-in [tools](https://platform.openai.com/docs/guides/tools) like - # [web search](https://platform.openai.com/docs/guides/tools-web-search) or - # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use - # your own data as input for the model's response. + # Creates a model response. Provide + # [text](https://platform.openai.com/docs/guides/text) or + # [image](https://platform.openai.com/docs/guides/images) inputs to generate + # [text](https://platform.openai.com/docs/guides/text) or + # [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have + # the model call your own + # [custom code](https://platform.openai.com/docs/guides/function-calling) or use + # built-in [tools](https://platform.openai.com/docs/guides/tools) like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use + # your own data as input for the model's response. sig do params( input: T.any( @@ -77,126 +77,126 @@ module OpenAI def create( # Text, image, or file inputs to the model, used to generate a response. # - # Learn more: + # Learn more: # - # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - # - [Image inputs](https://platform.openai.com/docs/guides/images) - # - [File inputs](https://platform.openai.com/docs/guides/pdf-files) - # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) - # - [Function calling](https://platform.openai.com/docs/guides/function-calling) + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Image inputs](https://platform.openai.com/docs/guides/images) + # - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + # - [Function calling](https://platform.openai.com/docs/guides/function-calling) input:, # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a - # wide range of models with different capabilities, performance characteristics, - # and price points. Refer to the - # [model guide](https://platform.openai.com/docs/models) to browse and compare - # available models. + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. model:, # Specify additional output data to include in the model response. Currently - # supported values are: + # supported values are: # - # - `file_search_call.results`: Include the search results of the file search tool - # call. - # - `message.input_image.image_url`: Include image urls from the input message. - # - `computer_call_output.output.image_url`: Include image urls from the computer - # call output. + # - `file_search_call.results`: Include the search results of the file search tool + # call. + # - `message.input_image.image_url`: Include image urls from the input message. + # - `computer_call_output.output.image_url`: Include image urls from the computer + # call output. include: nil, # Inserts a system (or developer) message as the first item in the model's - # context. + # context. # - # When using along with `previous_response_id`, the instructions from a previous - # response will not be carried over to the next response. This makes it simple to - # swap out system (or developer) messages in new responses. + # When using along with `previous_response_id`, the instructions from a previous + # response will not be carried over to the next response. This makes it simple to + # swap out system (or developer) messages in new responses. instructions: nil, # An upper bound for the number of tokens that can be generated for a response, - # including visible output tokens and - # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). max_output_tokens: nil, # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, # Whether to allow the model to run tool calls in parallel. parallel_tool_calls: nil, # The unique ID of the previous response to the model. Use this to create - # multi-turn conversations. Learn more about - # [conversation state](https://platform.openai.com/docs/guides/conversation-state). + # multi-turn conversations. Learn more about + # [conversation state](https://platform.openai.com/docs/guides/conversation-state). previous_response_id: nil, # **o-series models only** # - # Configuration options for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). + # Configuration options for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). reasoning: nil, # Whether to store the generated model response for later retrieval via API. store: nil, # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. We generally recommend altering this or `top_p` but - # not both. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. temperature: nil, # Configuration options for a text response from the model. Can be plain text or - # structured JSON data. Learn more: + # structured JSON data. Learn more: # - # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) text: nil, # How the model should select which tool (or tools) to use when generating a - # response. See the `tools` parameter to see how to specify which tools the model - # can call. + # response. See the `tools` parameter to see how to specify which tools the model + # can call. tool_choice: nil, # An array of tools the model may call while generating a response. You can - # specify which tool to use by setting the `tool_choice` parameter. + # specify which tool to use by setting the `tool_choice` parameter. # - # The two categories of tools you can provide the model are: + # The two categories of tools you can provide the model are: # - # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's - # capabilities, like - # [web search](https://platform.openai.com/docs/guides/tools-web-search) or - # [file search](https://platform.openai.com/docs/guides/tools-file-search). - # Learn more about - # [built-in tools](https://platform.openai.com/docs/guides/tools). - # - **Function calls (custom tools)**: Functions that are defined by you, enabling - # the model to call your own code. Learn more about - # [function calling](https://platform.openai.com/docs/guides/function-calling). + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code. Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). tools: nil, # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or `temperature` but not both. + # We generally recommend altering this or `temperature` but not both. top_p: nil, # The truncation strategy to use for the model response. # - # - `auto`: If the context of this response and previous ones exceeds the model's - # context window size, the model will truncate the response to fit the context - # window by dropping input items in the middle of the conversation. - # - `disabled` (default): If a model response will exceed the context window size - # for a model, the request will fail with a 400 error. + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. truncation: nil, # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, # There is no need to provide `stream:`. Instead, use `#stream_raw` or `#create` - # for streaming and non-streaming use cases, respectively. + # for streaming and non-streaming use cases, respectively. stream: false, request_options: {} ); end # See {OpenAI::Resources::Responses#create} for non-streaming counterpart. # - # Creates a model response. Provide - # [text](https://platform.openai.com/docs/guides/text) or - # [image](https://platform.openai.com/docs/guides/images) inputs to generate - # [text](https://platform.openai.com/docs/guides/text) or - # [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have - # the model call your own - # [custom code](https://platform.openai.com/docs/guides/function-calling) or use - # built-in [tools](https://platform.openai.com/docs/guides/tools) like - # [web search](https://platform.openai.com/docs/guides/tools-web-search) or - # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use - # your own data as input for the model's response. + # Creates a model response. Provide + # [text](https://platform.openai.com/docs/guides/text) or + # [image](https://platform.openai.com/docs/guides/images) inputs to generate + # [text](https://platform.openai.com/docs/guides/text) or + # [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have + # the model call your own + # [custom code](https://platform.openai.com/docs/guides/function-calling) or use + # built-in [tools](https://platform.openai.com/docs/guides/tools) like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use + # your own data as input for the model's response. sig do params( input: T.any( @@ -292,110 +292,110 @@ module OpenAI def stream_raw( # Text, image, or file inputs to the model, used to generate a response. # - # Learn more: + # Learn more: # - # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - # - [Image inputs](https://platform.openai.com/docs/guides/images) - # - [File inputs](https://platform.openai.com/docs/guides/pdf-files) - # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) - # - [Function calling](https://platform.openai.com/docs/guides/function-calling) + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Image inputs](https://platform.openai.com/docs/guides/images) + # - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + # - [Function calling](https://platform.openai.com/docs/guides/function-calling) input:, # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a - # wide range of models with different capabilities, performance characteristics, - # and price points. Refer to the - # [model guide](https://platform.openai.com/docs/models) to browse and compare - # available models. + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. model:, # Specify additional output data to include in the model response. Currently - # supported values are: + # supported values are: # - # - `file_search_call.results`: Include the search results of the file search tool - # call. - # - `message.input_image.image_url`: Include image urls from the input message. - # - `computer_call_output.output.image_url`: Include image urls from the computer - # call output. + # - `file_search_call.results`: Include the search results of the file search tool + # call. + # - `message.input_image.image_url`: Include image urls from the input message. + # - `computer_call_output.output.image_url`: Include image urls from the computer + # call output. include: nil, # Inserts a system (or developer) message as the first item in the model's - # context. + # context. # - # When using along with `previous_response_id`, the instructions from a previous - # response will not be carried over to the next response. This makes it simple to - # swap out system (or developer) messages in new responses. + # When using along with `previous_response_id`, the instructions from a previous + # response will not be carried over to the next response. This makes it simple to + # swap out system (or developer) messages in new responses. instructions: nil, # An upper bound for the number of tokens that can be generated for a response, - # including visible output tokens and - # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). max_output_tokens: nil, # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, # Whether to allow the model to run tool calls in parallel. parallel_tool_calls: nil, # The unique ID of the previous response to the model. Use this to create - # multi-turn conversations. Learn more about - # [conversation state](https://platform.openai.com/docs/guides/conversation-state). + # multi-turn conversations. Learn more about + # [conversation state](https://platform.openai.com/docs/guides/conversation-state). previous_response_id: nil, # **o-series models only** # - # Configuration options for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). + # Configuration options for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). reasoning: nil, # Whether to store the generated model response for later retrieval via API. store: nil, # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. We generally recommend altering this or `top_p` but - # not both. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. temperature: nil, # Configuration options for a text response from the model. Can be plain text or - # structured JSON data. Learn more: + # structured JSON data. Learn more: # - # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) text: nil, # How the model should select which tool (or tools) to use when generating a - # response. See the `tools` parameter to see how to specify which tools the model - # can call. + # response. See the `tools` parameter to see how to specify which tools the model + # can call. tool_choice: nil, # An array of tools the model may call while generating a response. You can - # specify which tool to use by setting the `tool_choice` parameter. + # specify which tool to use by setting the `tool_choice` parameter. # - # The two categories of tools you can provide the model are: + # The two categories of tools you can provide the model are: # - # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's - # capabilities, like - # [web search](https://platform.openai.com/docs/guides/tools-web-search) or - # [file search](https://platform.openai.com/docs/guides/tools-file-search). - # Learn more about - # [built-in tools](https://platform.openai.com/docs/guides/tools). - # - **Function calls (custom tools)**: Functions that are defined by you, enabling - # the model to call your own code. Learn more about - # [function calling](https://platform.openai.com/docs/guides/function-calling). + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code. Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). tools: nil, # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or `temperature` but not both. + # We generally recommend altering this or `temperature` but not both. top_p: nil, # The truncation strategy to use for the model response. # - # - `auto`: If the context of this response and previous ones exceeds the model's - # context window size, the model will truncate the response to fit the context - # window by dropping input items in the middle of the conversation. - # - `disabled` (default): If a model response will exceed the context window size - # for a model, the request will fail with a 400 error. + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. truncation: nil, # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, # There is no need to provide `stream:`. Instead, use `#stream_raw` or `#create` - # for streaming and non-streaming use cases, respectively. + # for streaming and non-streaming use cases, respectively. stream: true, request_options: {} ); end @@ -412,7 +412,7 @@ module OpenAI # The ID of the response to retrieve. response_id, # Additional fields to include in the response. See the `include` parameter for - # Response creation above for more information. + # Response creation above for more information. include: nil, request_options: {} ); end diff --git a/rbi/lib/openai/resources/responses/input_items.rbi b/rbi/lib/openai/resources/responses/input_items.rbi index 26668496..0c70b7b0 100644 --- a/rbi/lib/openai/resources/responses/input_items.rbi +++ b/rbi/lib/openai/resources/responses/input_items.rbi @@ -38,15 +38,15 @@ module OpenAI # An item ID to list items before, used in pagination. before: nil, # Additional fields to include in the response. See the `include` parameter for - # Response creation above for more information. + # Response creation above for more information. include: nil, # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. limit: nil, # The order to return the input items in. Default is `asc`. # - # - `asc`: Return the input items in ascending order. - # - `desc`: Return the input items in descending order. + # - `asc`: Return the input items in ascending order. + # - `desc`: Return the input items in descending order. order: nil, request_options: {} ); end diff --git a/rbi/lib/openai/resources/uploads.rbi b/rbi/lib/openai/resources/uploads.rbi index bdc55f98..c397fdbf 100644 --- a/rbi/lib/openai/resources/uploads.rbi +++ b/rbi/lib/openai/resources/uploads.rbi @@ -7,24 +7,24 @@ module OpenAI attr_reader :parts # Creates an intermediate - # [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object - # that you can add - # [Parts](https://platform.openai.com/docs/api-reference/uploads/part-object) to. - # Currently, an Upload can accept at most 8 GB in total and expires after an hour - # after you create it. + # [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object + # that you can add + # [Parts](https://platform.openai.com/docs/api-reference/uploads/part-object) to. + # Currently, an Upload can accept at most 8 GB in total and expires after an hour + # after you create it. # - # Once you complete the Upload, we will create a - # [File](https://platform.openai.com/docs/api-reference/files/object) object that - # contains all the parts you uploaded. This File is usable in the rest of our - # platform as a regular File object. + # Once you complete the Upload, we will create a + # [File](https://platform.openai.com/docs/api-reference/files/object) object that + # contains all the parts you uploaded. This File is usable in the rest of our + # platform as a regular File object. # - # For certain `purpose` values, the correct `mime_type` must be specified. Please - # refer to documentation for the - # [supported MIME types for your use case](https://platform.openai.com/docs/assistants/tools/file-search#supported-files). + # For certain `purpose` values, the correct `mime_type` must be specified. Please + # refer to documentation for the + # [supported MIME types for your use case](https://platform.openai.com/docs/assistants/tools/file-search#supported-files). # - # For guidance on the proper filename extensions for each purpose, please follow - # the documentation on - # [creating a File](https://platform.openai.com/docs/api-reference/files/create). + # For guidance on the proper filename extensions for each purpose, please follow + # the documentation on + # [creating a File](https://platform.openai.com/docs/api-reference/files/create). sig do params( bytes: Integer, @@ -42,13 +42,13 @@ module OpenAI filename:, # The MIME type of the file. # - # This must fall within the supported MIME types for your file purpose. See the - # supported MIME types for assistants and vision. + # This must fall within the supported MIME types for your file purpose. See the + # supported MIME types for assistants and vision. mime_type:, # The intended purpose of the uploaded file. # - # See the - # [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). + # See the + # [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). purpose:, request_options: {} ); end @@ -66,18 +66,18 @@ module OpenAI request_options: {} ); end # Completes the - # [Upload](https://platform.openai.com/docs/api-reference/uploads/object). + # [Upload](https://platform.openai.com/docs/api-reference/uploads/object). # - # Within the returned Upload object, there is a nested - # [File](https://platform.openai.com/docs/api-reference/files/object) object that - # is ready to use in the rest of the platform. + # Within the returned Upload object, there is a nested + # [File](https://platform.openai.com/docs/api-reference/files/object) object that + # is ready to use in the rest of the platform. # - # You can specify the order of the Parts by passing in an ordered list of the Part - # IDs. + # You can specify the order of the Parts by passing in an ordered list of the Part + # IDs. # - # The number of bytes uploaded upon completion must match the number of bytes - # initially specified when creating the Upload object. No Parts may be added after - # an Upload is completed. + # The number of bytes uploaded upon completion must match the number of bytes + # initially specified when creating the Upload object. No Parts may be added after + # an Upload is completed. sig do params( upload_id: String, @@ -93,7 +93,7 @@ module OpenAI # The ordered list of Part IDs. part_ids:, # The optional md5 checksum for the file contents to verify if the bytes uploaded - # matches what you expect. + # matches what you expect. md5: nil, request_options: {} ); end diff --git a/rbi/lib/openai/resources/uploads/parts.rbi b/rbi/lib/openai/resources/uploads/parts.rbi index 47bb936f..9b073eea 100644 --- a/rbi/lib/openai/resources/uploads/parts.rbi +++ b/rbi/lib/openai/resources/uploads/parts.rbi @@ -5,16 +5,16 @@ module OpenAI class Uploads class Parts # Adds a - # [Part](https://platform.openai.com/docs/api-reference/uploads/part-object) to an - # [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object. - # A Part represents a chunk of bytes from the file you are trying to upload. + # [Part](https://platform.openai.com/docs/api-reference/uploads/part-object) to an + # [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object. + # A Part represents a chunk of bytes from the file you are trying to upload. # - # Each Part can be at most 64 MB, and you can add Parts until you hit the Upload - # maximum of 8 GB. + # Each Part can be at most 64 MB, and you can add Parts until you hit the Upload + # maximum of 8 GB. # - # It is possible to add multiple Parts in parallel. You can decide the intended - # order of the Parts when you - # [complete the Upload](https://platform.openai.com/docs/api-reference/uploads/complete). + # It is possible to add multiple Parts in parallel. You can decide the intended + # order of the Parts when you + # [complete the Upload](https://platform.openai.com/docs/api-reference/uploads/complete). sig do params( upload_id: String, diff --git a/rbi/lib/openai/resources/vector_stores.rbi b/rbi/lib/openai/resources/vector_stores.rbi index b0868a1f..7658101e 100644 --- a/rbi/lib/openai/resources/vector_stores.rbi +++ b/rbi/lib/openai/resources/vector_stores.rbi @@ -27,20 +27,20 @@ module OpenAI end def create( # The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. Only applicable if `file_ids` is non-empty. + # strategy. Only applicable if `file_ids` is non-empty. chunking_strategy: nil, # The expiration policy for a vector store. expires_after: nil, # A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that - # the vector store should use. Useful for tools like `file_search` that can access - # files. + # the vector store should use. Useful for tools like `file_search` that can access + # files. file_ids: nil, # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, # The name of the vector store. name: nil, @@ -76,11 +76,11 @@ module OpenAI # The expiration policy for a vector store. expires_after: nil, # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, # The name of the vector store. name: nil, @@ -99,20 +99,20 @@ module OpenAI end def list( # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. after: nil, # A cursor for use in pagination. `before` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # starting with obj_foo, your subsequent call can include before=obj_foo in order - # to fetch the previous page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. before: nil, # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. limit: nil, # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. order: nil, request_options: {} ); end @@ -130,7 +130,7 @@ module OpenAI request_options: {} ); end # Search a vector store for relevant chunks based on a query and file attributes - # filter. + # filter. sig do params( vector_store_id: String, @@ -151,7 +151,7 @@ module OpenAI # A filter to apply based on file attributes. filters: nil, # The maximum number of results to return. This number should be between 1 and 50 - # inclusive. + # inclusive. max_num_results: nil, # Ranking options for search. ranking_options: nil, diff --git a/rbi/lib/openai/resources/vector_stores/file_batches.rbi b/rbi/lib/openai/resources/vector_stores/file_batches.rbi index ae4348ba..8a6a5231 100644 --- a/rbi/lib/openai/resources/vector_stores/file_batches.rbi +++ b/rbi/lib/openai/resources/vector_stores/file_batches.rbi @@ -23,17 +23,17 @@ module OpenAI # The ID of the vector store for which to create a File Batch. vector_store_id, # A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that - # the vector store should use. Useful for tools like `file_search` that can access - # files. + # the vector store should use. Useful for tools like `file_search` that can access + # files. file_ids:, # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. Keys are strings with a maximum - # length of 64 characters. Values are strings with a maximum length of 512 - # characters, booleans, or numbers. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. attributes: nil, # The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. Only applicable if `file_ids` is non-empty. + # strategy. Only applicable if `file_ids` is non-empty. chunking_strategy: nil, request_options: {} ); end @@ -54,7 +54,7 @@ module OpenAI request_options: {} ); end # Cancel a vector store file batch. This attempts to cancel the processing of - # files in this batch as soon as possible. + # files in this batch as soon as possible. sig do params( batch_id: String, @@ -90,23 +90,23 @@ module OpenAI # Path param: The ID of the vector store that the files belong to. vector_store_id:, # Query param: A cursor for use in pagination. `after` is an object ID that - # defines your place in the list. For instance, if you make a list request and - # receive 100 objects, ending with obj_foo, your subsequent call can include - # after=obj_foo in order to fetch the next page of the list. + # defines your place in the list. For instance, if you make a list request and + # receive 100 objects, ending with obj_foo, your subsequent call can include + # after=obj_foo in order to fetch the next page of the list. after: nil, # Query param: A cursor for use in pagination. `before` is an object ID that - # defines your place in the list. For instance, if you make a list request and - # receive 100 objects, starting with obj_foo, your subsequent call can include - # before=obj_foo in order to fetch the previous page of the list. + # defines your place in the list. For instance, if you make a list request and + # receive 100 objects, starting with obj_foo, your subsequent call can include + # before=obj_foo in order to fetch the previous page of the list. before: nil, # Query param: Filter by file status. One of `in_progress`, `completed`, `failed`, - # `cancelled`. + # `cancelled`. filter: nil, # Query param: A limit on the number of objects to be returned. Limit can range - # between 1 and 100, and the default is 20. + # between 1 and 100, and the default is 20. limit: nil, # Query param: Sort order by the `created_at` timestamp of the objects. `asc` for - # ascending order and `desc` for descending order. + # ascending order and `desc` for descending order. order: nil, request_options: {} ); end diff --git a/rbi/lib/openai/resources/vector_stores/files.rbi b/rbi/lib/openai/resources/vector_stores/files.rbi index 22550fd4..eb8c2391 100644 --- a/rbi/lib/openai/resources/vector_stores/files.rbi +++ b/rbi/lib/openai/resources/vector_stores/files.rbi @@ -5,8 +5,8 @@ module OpenAI class VectorStores class Files # Create a vector store file by attaching a - # [File](https://platform.openai.com/docs/api-reference/files) to a - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object). + # [File](https://platform.openai.com/docs/api-reference/files) to a + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object). sig do params( vector_store_id: String, @@ -25,17 +25,17 @@ module OpenAI # The ID of the vector store for which to create a File. vector_store_id, # A [File](https://platform.openai.com/docs/api-reference/files) ID that the - # vector store should use. Useful for tools like `file_search` that can access - # files. + # vector store should use. Useful for tools like `file_search` that can access + # files. file_id:, # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. Keys are strings with a maximum - # length of 64 characters. Values are strings with a maximum length of 512 - # characters, booleans, or numbers. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. attributes: nil, # The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. Only applicable if `file_ids` is non-empty. + # strategy. Only applicable if `file_ids` is non-empty. chunking_strategy: nil, request_options: {} ); end @@ -71,10 +71,10 @@ module OpenAI # Path param: The ID of the vector store the file belongs to. vector_store_id:, # Body param: Set of 16 key-value pairs that can be attached to an object. This - # can be useful for storing additional information about the object in a - # structured format, and querying for objects via API or the dashboard. Keys are - # strings with a maximum length of 64 characters. Values are strings with a - # maximum length of 512 characters, booleans, or numbers. + # can be useful for storing additional information about the object in a + # structured format, and querying for objects via API or the dashboard. Keys are + # strings with a maximum length of 64 characters. Values are strings with a + # maximum length of 512 characters, booleans, or numbers. attributes:, request_options: {} ); end @@ -95,29 +95,29 @@ module OpenAI # The ID of the vector store that the files belong to. vector_store_id, # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. after: nil, # A cursor for use in pagination. `before` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # starting with obj_foo, your subsequent call can include before=obj_foo in order - # to fetch the previous page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. before: nil, # Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. filter: nil, # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. limit: nil, # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. order: nil, request_options: {} ); end # Delete a vector store file. This will remove the file from the vector store but - # the file itself will not be deleted. To delete the file, use the - # [delete file](https://platform.openai.com/docs/api-reference/files/delete) - # endpoint. + # the file itself will not be deleted. To delete the file, use the + # [delete file](https://platform.openai.com/docs/api-reference/files/delete) + # endpoint. sig do params( file_id: String,