From aa22f34916a6b037e8097d1a86547d0d8dbfc0f6 Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Fri, 14 Mar 2025 04:13:35 +0000 Subject: [PATCH] chore: ensure doc strings for rbi method arguments --- lib/openai/base_model.rb | 4 +- lib/openai/base_page.rb | 2 - lib/openai/base_stream.rb | 2 - lib/openai/stream.rb | 2 - rbi/lib/openai/base_model.rbi | 1 + rbi/lib/openai/base_page.rbi | 1 - rbi/lib/openai/base_stream.rbi | 1 - rbi/lib/openai/client.rbi | 5 + rbi/lib/openai/resources/audio/speech.rbi | 20 +- .../openai/resources/audio/transcriptions.rbi | 23 ++ .../openai/resources/audio/translations.rbi | 24 +- rbi/lib/openai/resources/batches.rbi | 53 ++- rbi/lib/openai/resources/beta/assistants.rbi | 145 +++++++- rbi/lib/openai/resources/beta/threads.rbi | 195 +++++++++- .../resources/beta/threads/messages.rbi | 72 +++- .../openai/resources/beta/threads/runs.rbi | 259 ++++++++++++- .../resources/beta/threads/runs/steps.rbi | 41 ++- rbi/lib/openai/resources/chat/completions.rbi | 348 +++++++++++++++++- .../resources/chat/completions/messages.rbi | 13 +- rbi/lib/openai/resources/completions.rbi | 172 +++++++++ rbi/lib/openai/resources/embeddings.rbi | 29 +- rbi/lib/openai/resources/files.rbi | 46 ++- rbi/lib/openai/resources/fine_tuning/jobs.rbi | 82 ++++- .../fine_tuning/jobs/checkpoints.rbi | 10 +- rbi/lib/openai/resources/images.rbi | 53 +++ rbi/lib/openai/resources/models.rbi | 12 +- rbi/lib/openai/resources/moderations.rbi | 12 +- rbi/lib/openai/resources/responses.rbi | 189 +++++++++- .../resources/responses/input_items.rbi | 18 +- rbi/lib/openai/resources/uploads.rbi | 35 +- rbi/lib/openai/resources/uploads/parts.rbi | 8 +- rbi/lib/openai/resources/vector_stores.rbi | 69 +++- .../resources/vector_stores/file_batches.rbi | 51 ++- .../openai/resources/vector_stores/files.rbi | 71 +++- rbi/lib/openai/util.rbi | 8 +- 35 files changed, 2003 insertions(+), 73 deletions(-) diff --git a/lib/openai/base_model.rb b/lib/openai/base_model.rb index 23f83864..3abd2ca3 100644 --- a/lib/openai/base_model.rb +++ b/lib/openai/base_model.rb @@ -876,8 +876,6 @@ def initialize(type_info, spec = {}) end end - # @api private - # # @abstract # # @example @@ -903,6 +901,8 @@ def known_fields @known_fields ||= (self < OpenAI::BaseModel ? superclass.known_fields.dup : {}) end + # @api private + # # @return [Hash{Symbol=>Hash{Symbol=>Object}}] def fields known_fields.transform_values do |field| diff --git a/lib/openai/base_page.rb b/lib/openai/base_page.rb index b8185c65..481df2ea 100644 --- a/lib/openai/base_page.rb +++ b/lib/openai/base_page.rb @@ -1,8 +1,6 @@ # frozen_string_literal: true module OpenAI - # @api private - # # @abstract # # @example diff --git a/lib/openai/base_stream.rb b/lib/openai/base_stream.rb index 7151b3f7..2aef50fe 100644 --- a/lib/openai/base_stream.rb +++ b/lib/openai/base_stream.rb @@ -1,8 +1,6 @@ # frozen_string_literal: true module OpenAI - # @api private - # # @example # ```ruby # stream.for_each do |message| diff --git a/lib/openai/stream.rb b/lib/openai/stream.rb index 02dea5de..f6f0b113 100644 --- a/lib/openai/stream.rb +++ b/lib/openai/stream.rb @@ -1,8 +1,6 @@ # frozen_string_literal: true module OpenAI - # @api private - # # @example # ```ruby # stream.for_each do |message| diff --git a/rbi/lib/openai/base_model.rbi b/rbi/lib/openai/base_model.rbi index b0ed4c49..b6422096 100644 --- a/rbi/lib/openai/base_model.rbi +++ b/rbi/lib/openai/base_model.rbi @@ -438,6 +438,7 @@ module OpenAI def known_fields end + # @api private sig do returns(T::Hash[Symbol, T.all(OpenAI::BaseModel::KnownFieldShape, {type: OpenAI::Converter::Input})]) end diff --git a/rbi/lib/openai/base_page.rbi b/rbi/lib/openai/base_page.rbi index bf6ab11f..ad3a2e19 100644 --- a/rbi/lib/openai/base_page.rbi +++ b/rbi/lib/openai/base_page.rbi @@ -1,7 +1,6 @@ # typed: strong module OpenAI - # @api private module BasePage abstract! diff --git a/rbi/lib/openai/base_stream.rbi b/rbi/lib/openai/base_stream.rbi index 79324aa7..c5f6c58e 100644 --- a/rbi/lib/openai/base_stream.rbi +++ b/rbi/lib/openai/base_stream.rbi @@ -1,7 +1,6 @@ # typed: strong module OpenAI - # @api private module BaseStream Message = type_member(:in) Elem = type_member(:out) diff --git a/rbi/lib/openai/client.rbi b/rbi/lib/openai/client.rbi index 4b8256ce..2d8c31bf 100644 --- a/rbi/lib/openai/client.rbi +++ b/rbi/lib/openai/client.rbi @@ -98,10 +98,15 @@ module OpenAI .returns(T.attached_class) end def self.new( + # Override the default base URL for the API, e.g., `"https://api.example.com/v2/"` base_url: nil, + # Defaults to `ENV["OPENAI_API_KEY"]` api_key: ENV["OPENAI_API_KEY"], + # Defaults to `ENV["OPENAI_ORG_ID"]` organization: ENV["OPENAI_ORG_ID"], + # Defaults to `ENV["OPENAI_PROJECT_ID"]` project: ENV["OPENAI_PROJECT_ID"], + # Max number of retries to attempt after a failed retryable request. max_retries: DEFAULT_MAX_RETRIES, timeout: DEFAULT_TIMEOUT_IN_SECONDS, initial_retry_delay: DEFAULT_INITIAL_RETRY_DELAY, diff --git a/rbi/lib/openai/resources/audio/speech.rbi b/rbi/lib/openai/resources/audio/speech.rbi index ac1162aa..ae6f4be5 100644 --- a/rbi/lib/openai/resources/audio/speech.rbi +++ b/rbi/lib/openai/resources/audio/speech.rbi @@ -16,7 +16,25 @@ module OpenAI ) .returns(T.anything) end - def create(input:, model:, voice:, response_format: nil, speed: nil, request_options: {}) + def create( + # The text to generate audio for. The maximum length is 4096 characters. + input:, + # One of the available [TTS models](https://platform.openai.com/docs/models#tts): + # `tts-1` or `tts-1-hd` + model:, + # The voice to use when generating the audio. Supported voices are `alloy`, `ash`, + # `coral`, `echo`, `fable`, `onyx`, `nova`, `sage` and `shimmer`. Previews of the + # voices are available in the + # [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). + voice:, + # The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, + # `wav`, and `pcm`. + response_format: nil, + # The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is + # the default. + speed: nil, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/audio/transcriptions.rbi b/rbi/lib/openai/resources/audio/transcriptions.rbi index b0455588..50e5c416 100644 --- a/rbi/lib/openai/resources/audio/transcriptions.rbi +++ b/rbi/lib/openai/resources/audio/transcriptions.rbi @@ -19,12 +19,35 @@ module OpenAI .returns(T.any(OpenAI::Models::Audio::Transcription, OpenAI::Models::Audio::TranscriptionVerbose)) end def create( + # The audio file object (not file name) to transcribe, in one of these formats: + # flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. file:, + # ID of the model to use. Only `whisper-1` (which is powered by our open source + # Whisper V2 model) is currently available. model:, + # The language of the input audio. Supplying the input language in + # [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + # format will improve accuracy and latency. language: nil, + # An optional text to guide the model's style or continue a previous audio + # segment. The + # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + # should match the audio language. prompt: nil, + # The format of the output, in one of these options: `json`, `text`, `srt`, + # `verbose_json`, or `vtt`. response_format: nil, + # The sampling temperature, between 0 and 1. Higher values like 0.8 will make the + # output more random, while lower values like 0.2 will make it more focused and + # deterministic. If set to 0, the model will use + # [log probability](https://en.wikipedia.org/wiki/Log_probability) to + # automatically increase the temperature until certain thresholds are hit. temperature: nil, + # The timestamp granularities to populate for this transcription. + # `response_format` must be set `verbose_json` to use timestamp granularities. + # Either or both of these options are supported: `word`, or `segment`. Note: There + # is no additional latency for segment timestamps, but generating word timestamps + # incurs additional latency. timestamp_granularities: nil, request_options: {} ) diff --git a/rbi/lib/openai/resources/audio/translations.rbi b/rbi/lib/openai/resources/audio/translations.rbi index 02565a2d..278e3855 100644 --- a/rbi/lib/openai/resources/audio/translations.rbi +++ b/rbi/lib/openai/resources/audio/translations.rbi @@ -16,7 +16,29 @@ module OpenAI ) .returns(T.any(OpenAI::Models::Audio::Translation, OpenAI::Models::Audio::TranslationVerbose)) end - def create(file:, model:, prompt: nil, response_format: nil, temperature: nil, request_options: {}) + def create( + # The audio file object (not file name) translate, in one of these formats: flac, + # mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + file:, + # ID of the model to use. Only `whisper-1` (which is powered by our open source + # Whisper V2 model) is currently available. + model:, + # An optional text to guide the model's style or continue a previous audio + # segment. The + # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + # should be in English. + prompt: nil, + # The format of the output, in one of these options: `json`, `text`, `srt`, + # `verbose_json`, or `vtt`. + response_format: nil, + # The sampling temperature, between 0 and 1. Higher values like 0.8 will make the + # output more random, while lower values like 0.2 will make it more focused and + # deterministic. If set to 0, the model will use + # [log probability](https://en.wikipedia.org/wiki/Log_probability) to + # automatically increase the temperature until certain thresholds are hit. + temperature: nil, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/batches.rbi b/rbi/lib/openai/resources/batches.rbi index fa633d8c..a3f56e28 100644 --- a/rbi/lib/openai/resources/batches.rbi +++ b/rbi/lib/openai/resources/batches.rbi @@ -14,7 +14,34 @@ module OpenAI ) .returns(OpenAI::Models::Batch) end - def create(completion_window:, endpoint:, input_file_id:, metadata: nil, request_options: {}) + def create( + # The time frame within which the batch should be processed. Currently only `24h` + # is supported. + completion_window:, + # The endpoint to be used for all requests in the batch. Currently + # `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. + # Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 + # embedding inputs across all requests in the batch. + endpoint:, + # The ID of an uploaded file that contains requests for the new batch. + # + # See [upload file](https://platform.openai.com/docs/api-reference/files/create) + # for how to upload a file. + # + # Your input file must be formatted as a + # [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), + # and must be uploaded with the purpose `batch`. The file can contain up to 50,000 + # requests, and can be up to 200 MB in size. + input_file_id:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + request_options: {} + ) end # Retrieves a batch. @@ -25,7 +52,11 @@ module OpenAI ) .returns(OpenAI::Models::Batch) end - def retrieve(batch_id, request_options: {}) + def retrieve( + # The ID of the batch to retrieve. + batch_id, + request_options: {} + ) end # List your organization's batches. @@ -37,7 +68,17 @@ module OpenAI ) .returns(OpenAI::CursorPage[OpenAI::Models::Batch]) end - def list(after: nil, limit: nil, request_options: {}) + def list( + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + after: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + limit: nil, + request_options: {} + ) end # Cancels an in-progress batch. The batch will be in status `cancelling` for up to @@ -50,7 +91,11 @@ module OpenAI ) .returns(OpenAI::Models::Batch) end - def cancel(batch_id, request_options: {}) + def cancel( + # The ID of the batch to cancel. + batch_id, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/beta/assistants.rbi b/rbi/lib/openai/resources/beta/assistants.rbi index 5a235b78..c9d62665 100644 --- a/rbi/lib/openai/resources/beta/assistants.rbi +++ b/rbi/lib/openai/resources/beta/assistants.rbi @@ -36,16 +36,72 @@ module OpenAI .returns(OpenAI::Models::Beta::Assistant) end def create( + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. model:, + # The description of the assistant. The maximum length is 512 characters. description: nil, + # The system instructions that the assistant uses. The maximum length is 256,000 + # characters. instructions: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, + # The name of the assistant. The maximum length is 256 characters. name: nil, + # **o-series models only** + # + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. reasoning_effort: nil, + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. response_format: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. temperature: nil, + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. tool_resources: nil, + # A list of tool enabled on the assistant. There can be a maximum of 128 tools per + # assistant. Tools can be of types `code_interpreter`, `file_search`, or + # `function`. tools: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or temperature but not both. top_p: nil, request_options: {} ) @@ -59,7 +115,11 @@ module OpenAI ) .returns(OpenAI::Models::Beta::Assistant) end - def retrieve(assistant_id, request_options: {}) + def retrieve( + # The ID of the assistant to retrieve. + assistant_id, + request_options: {} + ) end # Modifies an assistant. @@ -95,17 +155,74 @@ module OpenAI .returns(OpenAI::Models::Beta::Assistant) end def update( + # The ID of the assistant to modify. assistant_id, + # The description of the assistant. The maximum length is 512 characters. description: nil, + # The system instructions that the assistant uses. The maximum length is 256,000 + # characters. instructions: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. model: nil, + # The name of the assistant. The maximum length is 256 characters. name: nil, + # **o-series models only** + # + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. reasoning_effort: nil, + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. response_format: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. temperature: nil, + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. tool_resources: nil, + # A list of tool enabled on the assistant. There can be a maximum of 128 tools per + # assistant. Tools can be of types `code_interpreter`, `file_search`, or + # `function`. tools: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or temperature but not both. top_p: nil, request_options: {} ) @@ -122,7 +239,25 @@ module OpenAI ) .returns(OpenAI::CursorPage[OpenAI::Models::Beta::Assistant]) end - def list(after: nil, before: nil, limit: nil, order: nil, request_options: {}) + def list( + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + after: nil, + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. + before: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + limit: nil, + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + order: nil, + request_options: {} + ) end # Delete an assistant. @@ -133,7 +268,11 @@ module OpenAI ) .returns(OpenAI::Models::Beta::AssistantDeleted) end - def delete(assistant_id, request_options: {}) + def delete( + # The ID of the assistant to delete. + assistant_id, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/beta/threads.rbi b/rbi/lib/openai/resources/beta/threads.rbi index b2f631a3..2170549d 100644 --- a/rbi/lib/openai/resources/beta/threads.rbi +++ b/rbi/lib/openai/resources/beta/threads.rbi @@ -22,7 +22,24 @@ module OpenAI ) .returns(OpenAI::Models::Beta::Thread) end - def create(messages: nil, metadata: nil, tool_resources: nil, request_options: {}) + def create( + # A list of [messages](https://platform.openai.com/docs/api-reference/messages) to + # start the thread with. + messages: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. + tool_resources: nil, + request_options: {} + ) end # Retrieves a thread. @@ -33,7 +50,11 @@ module OpenAI ) .returns(OpenAI::Models::Beta::Thread) end - def retrieve(thread_id, request_options: {}) + def retrieve( + # The ID of the thread to retrieve. + thread_id, + request_options: {} + ) end # Modifies a thread. @@ -46,7 +67,23 @@ module OpenAI ) .returns(OpenAI::Models::Beta::Thread) end - def update(thread_id, metadata: nil, tool_resources: nil, request_options: {}) + def update( + # The ID of the thread to modify. Only the `metadata` can be modified. + thread_id, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. + tool_resources: nil, + request_options: {} + ) end # Delete a thread. @@ -57,7 +94,11 @@ module OpenAI ) .returns(OpenAI::Models::Beta::ThreadDeleted) end - def delete(thread_id, request_options: {}) + def delete( + # The ID of the thread to delete. + thread_id, + request_options: {} + ) end # Create a thread and run it in one request. @@ -98,20 +139,93 @@ module OpenAI .returns(OpenAI::Models::Beta::Threads::Run) end def create_and_run( + # The ID of the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + # execute this run. assistant_id:, + # Override the default system message of the assistant. This is useful for + # modifying the behavior on a per-run basis. instructions: nil, + # The maximum number of completion tokens that may be used over the course of the + # run. The run will make a best effort to use only the number of completion tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # completion tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. max_completion_tokens: nil, + # The maximum number of prompt tokens that may be used over the course of the run. + # The run will make a best effort to use only the number of prompt tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # prompt tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. max_prompt_tokens: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, + # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. model: nil, + # Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. parallel_tool_calls: nil, + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. response_format: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. temperature: nil, + # Options to create a new thread. If no thread is provided when running a request, + # an empty thread will be created. thread: nil, + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. tool_choice: nil, + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. tool_resources: nil, + # Override the tools the assistant can use for this run. This is useful for + # modifying the behavior on a per-run basis. tools: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or temperature but not both. top_p: nil, + # Controls for how a thread will be truncated prior to the run. Use this to + # control the intial context window of the run. truncation_strategy: nil, request_options: {} ) @@ -184,20 +298,93 @@ module OpenAI ) end def create_and_run_streaming( + # The ID of the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + # execute this run. assistant_id:, + # Override the default system message of the assistant. This is useful for + # modifying the behavior on a per-run basis. instructions: nil, + # The maximum number of completion tokens that may be used over the course of the + # run. The run will make a best effort to use only the number of completion tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # completion tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. max_completion_tokens: nil, + # The maximum number of prompt tokens that may be used over the course of the run. + # The run will make a best effort to use only the number of prompt tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # prompt tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. max_prompt_tokens: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, + # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. model: nil, + # Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. parallel_tool_calls: nil, + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. response_format: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. temperature: nil, + # Options to create a new thread. If no thread is provided when running a request, + # an empty thread will be created. thread: nil, + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. tool_choice: nil, + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. tool_resources: nil, + # Override the tools the assistant can use for this run. This is useful for + # modifying the behavior on a per-run basis. tools: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or temperature but not both. top_p: nil, + # Controls for how a thread will be truncated prior to the run. Use this to + # control the intial context window of the run. truncation_strategy: nil, request_options: {} ) diff --git a/rbi/lib/openai/resources/beta/threads/messages.rbi b/rbi/lib/openai/resources/beta/threads/messages.rbi index 2e0009e8..d417b9d0 100644 --- a/rbi/lib/openai/resources/beta/threads/messages.rbi +++ b/rbi/lib/openai/resources/beta/threads/messages.rbi @@ -26,7 +26,30 @@ module OpenAI ) .returns(OpenAI::Models::Beta::Threads::Message) end - def create(thread_id, content:, role:, attachments: nil, metadata: nil, request_options: {}) + def create( + # The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) + # to create a message for. + thread_id, + # The text contents of the message. + content:, + # The role of the entity that is creating the message. Allowed values include: + # + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. + role:, + # A list of files attached to the message, and the tools they should be added to. + attachments: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + request_options: {} + ) end # Retrieve a message. @@ -38,7 +61,14 @@ module OpenAI ) .returns(OpenAI::Models::Beta::Threads::Message) end - def retrieve(message_id, thread_id:, request_options: {}) + def retrieve( + # The ID of the message to retrieve. + message_id, + # The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) + # to which this message belongs. + thread_id:, + request_options: {} + ) end # Modifies a message. @@ -51,7 +81,20 @@ module OpenAI ) .returns(OpenAI::Models::Beta::Threads::Message) end - def update(message_id, thread_id:, metadata: nil, request_options: {}) + def update( + # Path param: The ID of the message to modify. + message_id, + # Path param: The ID of the thread to which this message belongs. + thread_id:, + # Body param: Set of 16 key-value pairs that can be attached to an object. This + # can be useful for storing additional information about the object in a + # structured format, and querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + request_options: {} + ) end # Returns a list of messages for a given thread. @@ -68,11 +111,26 @@ module OpenAI .returns(OpenAI::CursorPage[OpenAI::Models::Beta::Threads::Message]) end def list( + # The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) + # the messages belong to. thread_id, + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. after: nil, + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. before: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. limit: nil, + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. order: nil, + # Filter messages by the run ID that generated them. run_id: nil, request_options: {} ) @@ -87,7 +145,13 @@ module OpenAI ) .returns(OpenAI::Models::Beta::Threads::MessageDeleted) end - def delete(message_id, thread_id:, request_options: {}) + def delete( + # The ID of the message to delete. + message_id, + # The ID of the thread to which this message belongs. + thread_id:, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/beta/threads/runs.rbi b/rbi/lib/openai/resources/beta/threads/runs.rbi index ecf30e5f..c2a318dd 100644 --- a/rbi/lib/openai/resources/beta/threads/runs.rbi +++ b/rbi/lib/openai/resources/beta/threads/runs.rbi @@ -50,23 +50,113 @@ module OpenAI .returns(OpenAI::Models::Beta::Threads::Run) end def create( + # Path param: The ID of the thread to run. thread_id, + # Body param: The ID of the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + # execute this run. assistant_id:, + # Query param: A list of additional fields to include in the response. Currently + # the only supported value is + # `step_details.tool_calls[*].file_search.results[*].content` to fetch the file + # search result content. + # + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. include: nil, + # Body param: Appends additional instructions at the end of the instructions for + # the run. This is useful for modifying the behavior on a per-run basis without + # overriding other instructions. additional_instructions: nil, + # Body param: Adds additional messages to the thread before creating the run. additional_messages: nil, + # Body param: Overrides the + # [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) + # of the assistant. This is useful for modifying the behavior on a per-run basis. instructions: nil, + # Body param: The maximum number of completion tokens that may be used over the + # course of the run. The run will make a best effort to use only the number of + # completion tokens specified, across multiple turns of the run. If the run + # exceeds the number of completion tokens specified, the run will end with status + # `incomplete`. See `incomplete_details` for more info. max_completion_tokens: nil, + # Body param: The maximum number of prompt tokens that may be used over the course + # of the run. The run will make a best effort to use only the number of prompt + # tokens specified, across multiple turns of the run. If the run exceeds the + # number of prompt tokens specified, the run will end with status `incomplete`. + # See `incomplete_details` for more info. max_prompt_tokens: nil, + # Body param: Set of 16 key-value pairs that can be attached to an object. This + # can be useful for storing additional information about the object in a + # structured format, and querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, + # Body param: The ID of the + # [Model](https://platform.openai.com/docs/api-reference/models) to be used to + # execute this run. If a value is provided here, it will override the model + # associated with the assistant. If not, the model associated with the assistant + # will be used. model: nil, + # Body param: Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. parallel_tool_calls: nil, + # Body param: **o-series models only** + # + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. reasoning_effort: nil, + # Body param: Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. response_format: nil, + # Body param: What sampling temperature to use, between 0 and 2. Higher values + # like 0.8 will make the output more random, while lower values like 0.2 will make + # it more focused and deterministic. temperature: nil, + # Body param: Controls which (if any) tool is called by the model. `none` means + # the model will not call any tools and instead generates a message. `auto` is the + # default value and means the model can pick between generating a message or + # calling one or more tools. `required` means the model must call one or more + # tools before responding to the user. Specifying a particular tool like + # `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. tool_choice: nil, + # Body param: Override the tools the assistant can use for this run. This is + # useful for modifying the behavior on a per-run basis. tools: nil, + # Body param: An alternative to sampling with temperature, called nucleus + # sampling, where the model considers the results of the tokens with top_p + # probability mass. So 0.1 means only the tokens comprising the top 10% + # probability mass are considered. + # + # We generally recommend altering this or temperature but not both. top_p: nil, + # Body param: Controls for how a thread will be truncated prior to the run. Use + # this to control the intial context window of the run. truncation_strategy: nil, request_options: {} ) @@ -142,23 +232,113 @@ module OpenAI ) end def create_streaming( + # Path param: The ID of the thread to run. thread_id, + # Body param: The ID of the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + # execute this run. assistant_id:, + # Query param: A list of additional fields to include in the response. Currently + # the only supported value is + # `step_details.tool_calls[*].file_search.results[*].content` to fetch the file + # search result content. + # + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. include: nil, + # Body param: Appends additional instructions at the end of the instructions for + # the run. This is useful for modifying the behavior on a per-run basis without + # overriding other instructions. additional_instructions: nil, + # Body param: Adds additional messages to the thread before creating the run. additional_messages: nil, + # Body param: Overrides the + # [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) + # of the assistant. This is useful for modifying the behavior on a per-run basis. instructions: nil, + # Body param: The maximum number of completion tokens that may be used over the + # course of the run. The run will make a best effort to use only the number of + # completion tokens specified, across multiple turns of the run. If the run + # exceeds the number of completion tokens specified, the run will end with status + # `incomplete`. See `incomplete_details` for more info. max_completion_tokens: nil, + # Body param: The maximum number of prompt tokens that may be used over the course + # of the run. The run will make a best effort to use only the number of prompt + # tokens specified, across multiple turns of the run. If the run exceeds the + # number of prompt tokens specified, the run will end with status `incomplete`. + # See `incomplete_details` for more info. max_prompt_tokens: nil, + # Body param: Set of 16 key-value pairs that can be attached to an object. This + # can be useful for storing additional information about the object in a + # structured format, and querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, + # Body param: The ID of the + # [Model](https://platform.openai.com/docs/api-reference/models) to be used to + # execute this run. If a value is provided here, it will override the model + # associated with the assistant. If not, the model associated with the assistant + # will be used. model: nil, + # Body param: Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. parallel_tool_calls: nil, + # Body param: **o-series models only** + # + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. reasoning_effort: nil, + # Body param: Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. response_format: nil, + # Body param: What sampling temperature to use, between 0 and 2. Higher values + # like 0.8 will make the output more random, while lower values like 0.2 will make + # it more focused and deterministic. temperature: nil, + # Body param: Controls which (if any) tool is called by the model. `none` means + # the model will not call any tools and instead generates a message. `auto` is the + # default value and means the model can pick between generating a message or + # calling one or more tools. `required` means the model must call one or more + # tools before responding to the user. Specifying a particular tool like + # `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. tool_choice: nil, + # Body param: Override the tools the assistant can use for this run. This is + # useful for modifying the behavior on a per-run basis. tools: nil, + # Body param: An alternative to sampling with temperature, called nucleus + # sampling, where the model considers the results of the tokens with top_p + # probability mass. So 0.1 means only the tokens comprising the top 10% + # probability mass are considered. + # + # We generally recommend altering this or temperature but not both. top_p: nil, + # Body param: Controls for how a thread will be truncated prior to the run. Use + # this to control the intial context window of the run. truncation_strategy: nil, request_options: {} ) @@ -173,7 +353,14 @@ module OpenAI ) .returns(OpenAI::Models::Beta::Threads::Run) end - def retrieve(run_id, thread_id:, request_options: {}) + def retrieve( + # The ID of the run to retrieve. + run_id, + # The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) + # that was run. + thread_id:, + request_options: {} + ) end # Modifies a run. @@ -186,7 +373,21 @@ module OpenAI ) .returns(OpenAI::Models::Beta::Threads::Run) end - def update(run_id, thread_id:, metadata: nil, request_options: {}) + def update( + # Path param: The ID of the run to modify. + run_id, + # Path param: The ID of the + # [thread](https://platform.openai.com/docs/api-reference/threads) that was run. + thread_id:, + # Body param: Set of 16 key-value pairs that can be attached to an object. This + # can be useful for storing additional information about the object in a + # structured format, and querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + request_options: {} + ) end # Returns a list of runs belonging to a thread. @@ -201,7 +402,27 @@ module OpenAI ) .returns(OpenAI::CursorPage[OpenAI::Models::Beta::Threads::Run]) end - def list(thread_id, after: nil, before: nil, limit: nil, order: nil, request_options: {}) + def list( + # The ID of the thread the run belongs to. + thread_id, + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + after: nil, + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. + before: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + limit: nil, + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + order: nil, + request_options: {} + ) end # Cancels a run that is `in_progress`. @@ -213,7 +434,13 @@ module OpenAI ) .returns(OpenAI::Models::Beta::Threads::Run) end - def cancel(run_id, thread_id:, request_options: {}) + def cancel( + # The ID of the run to cancel. + run_id, + # The ID of the thread to which this run belongs. + thread_id:, + request_options: {} + ) end # When a run has the `status: "requires_action"` and `required_action.type` is @@ -229,7 +456,17 @@ module OpenAI ) .returns(OpenAI::Models::Beta::Threads::Run) end - def submit_tool_outputs(run_id, thread_id:, tool_outputs:, request_options: {}) + def submit_tool_outputs( + # Path param: The ID of the run that requires the tool output submission. + run_id, + # Path param: The ID of the + # [thread](https://platform.openai.com/docs/api-reference/threads) to which this + # run belongs. + thread_id:, + # Body param: A list of tools for which the outputs are being submitted. + tool_outputs:, + request_options: {} + ) end # When a run has the `status: "requires_action"` and `required_action.type` is @@ -274,7 +511,17 @@ module OpenAI ] ) end - def submit_tool_outputs_streaming(run_id, thread_id:, tool_outputs:, request_options: {}) + def submit_tool_outputs_streaming( + # Path param: The ID of the run that requires the tool output submission. + run_id, + # Path param: The ID of the + # [thread](https://platform.openai.com/docs/api-reference/threads) to which this + # run belongs. + thread_id:, + # Body param: A list of tools for which the outputs are being submitted. + tool_outputs:, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/beta/threads/runs/steps.rbi b/rbi/lib/openai/resources/beta/threads/runs/steps.rbi index 0987c273..4833b5fb 100644 --- a/rbi/lib/openai/resources/beta/threads/runs/steps.rbi +++ b/rbi/lib/openai/resources/beta/threads/runs/steps.rbi @@ -17,7 +17,24 @@ module OpenAI ) .returns(OpenAI::Models::Beta::Threads::Runs::RunStep) end - def retrieve(step_id, thread_id:, run_id:, include: nil, request_options: {}) + def retrieve( + # Path param: The ID of the run step to retrieve. + step_id, + # Path param: The ID of the thread to which the run and run step belongs. + thread_id:, + # Path param: The ID of the run to which the run step belongs. + run_id:, + # Query param: A list of additional fields to include in the response. Currently + # the only supported value is + # `step_details.tool_calls[*].file_search.results[*].content` to fetch the file + # search result content. + # + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. + include: nil, + request_options: {} + ) end # Returns a list of run steps belonging to a run. @@ -35,12 +52,34 @@ module OpenAI .returns(OpenAI::CursorPage[OpenAI::Models::Beta::Threads::Runs::RunStep]) end def list( + # Path param: The ID of the run the run steps belong to. run_id, + # Path param: The ID of the thread the run and run steps belong to. thread_id:, + # Query param: A cursor for use in pagination. `after` is an object ID that + # defines your place in the list. For instance, if you make a list request and + # receive 100 objects, ending with obj_foo, your subsequent call can include + # after=obj_foo in order to fetch the next page of the list. after: nil, + # Query param: A cursor for use in pagination. `before` is an object ID that + # defines your place in the list. For instance, if you make a list request and + # receive 100 objects, starting with obj_foo, your subsequent call can include + # before=obj_foo in order to fetch the previous page of the list. before: nil, + # Query param: A list of additional fields to include in the response. Currently + # the only supported value is + # `step_details.tool_calls[*].file_search.results[*].content` to fetch the file + # search result content. + # + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. include: nil, + # Query param: A limit on the number of objects to be returned. Limit can range + # between 1 and 100, and the default is 20. limit: nil, + # Query param: Sort order by the `created_at` timestamp of the objects. `asc` for + # ascending order and `desc` for descending order. order: nil, request_options: {} ) diff --git a/rbi/lib/openai/resources/chat/completions.rbi b/rbi/lib/openai/resources/chat/completions.rbi index b4a45a99..7f6a851c 100644 --- a/rbi/lib/openai/resources/chat/completions.rbi +++ b/rbi/lib/openai/resources/chat/completions.rbi @@ -75,35 +75,188 @@ module OpenAI .returns(OpenAI::Models::Chat::ChatCompletion) end def create( + # A list of messages comprising the conversation so far. Depending on the + # [model](https://platform.openai.com/docs/models) you use, different message + # types (modalities) are supported, like + # [text](https://platform.openai.com/docs/guides/text-generation), + # [images](https://platform.openai.com/docs/guides/vision), and + # [audio](https://platform.openai.com/docs/guides/audio). messages:, + # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. model:, + # Parameters for audio output. Required when audio output is requested with + # `modalities: ["audio"]`. + # [Learn more](https://platform.openai.com/docs/guides/audio). audio: nil, + # Number between -2.0 and 2.0. Positive values penalize new tokens based on their + # existing frequency in the text so far, decreasing the model's likelihood to + # repeat the same line verbatim. frequency_penalty: nil, + # Deprecated in favor of `tool_choice`. + # + # Controls which (if any) function is called by the model. + # + # `none` means the model will not call a function and instead generates a message. + # + # `auto` means the model can pick between generating a message or calling a + # function. + # + # Specifying a particular function via `{"name": "my_function"}` forces the model + # to call that function. + # + # `none` is the default when no functions are present. `auto` is the default if + # functions are present. function_call: nil, + # Deprecated in favor of `tools`. + # + # A list of functions the model may generate JSON inputs for. functions: nil, + # Modify the likelihood of specified tokens appearing in the completion. + # + # Accepts a JSON object that maps tokens (specified by their token ID in the + # tokenizer) to an associated bias value from -100 to 100. Mathematically, the + # bias is added to the logits generated by the model prior to sampling. The exact + # effect will vary per model, but values between -1 and 1 should decrease or + # increase likelihood of selection; values like -100 or 100 should result in a ban + # or exclusive selection of the relevant token. logit_bias: nil, + # Whether to return log probabilities of the output tokens or not. If true, + # returns the log probabilities of each output token returned in the `content` of + # `message`. logprobs: nil, + # An upper bound for the number of tokens that can be generated for a completion, + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). max_completion_tokens: nil, + # The maximum number of [tokens](/tokenizer) that can be generated in the chat + # completion. This value can be used to control + # [costs](https://openai.com/api/pricing/) for text generated via API. + # + # This value is now deprecated in favor of `max_completion_tokens`, and is not + # compatible with + # [o1 series models](https://platform.openai.com/docs/guides/reasoning). max_tokens: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, + # Output types that you would like the model to generate. Most models are capable + # of generating text, which is the default: + # + # `["text"]` + # + # The `gpt-4o-audio-preview` model can also be used to + # [generate audio](https://platform.openai.com/docs/guides/audio). To request that + # this model generate both text and audio responses, you can use: + # + # `["text", "audio"]` modalities: nil, + # How many chat completion choices to generate for each input message. Note that + # you will be charged based on the number of generated tokens across all of the + # choices. Keep `n` as `1` to minimize costs. n: nil, + # Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. parallel_tool_calls: nil, + # Static predicted output content, such as the content of a text file that is + # being regenerated. prediction: nil, + # Number between -2.0 and 2.0. Positive values penalize new tokens based on + # whether they appear in the text so far, increasing the model's likelihood to + # talk about new topics. presence_penalty: nil, + # **o-series models only** + # + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. reasoning_effort: nil, + # An object specifying the format that the model must output. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. response_format: nil, + # This feature is in Beta. If specified, our system will make a best effort to + # sample deterministically, such that repeated requests with the same `seed` and + # parameters should return the same result. Determinism is not guaranteed, and you + # should refer to the `system_fingerprint` response parameter to monitor changes + # in the backend. seed: nil, + # Specifies the latency tier to use for processing the request. This parameter is + # relevant for customers subscribed to the scale tier service: + # + # - If set to 'auto', and the Project is Scale tier enabled, the system will + # utilize scale tier credits until they are exhausted. + # - If set to 'auto', and the Project is not Scale tier enabled, the request will + # be processed using the default service tier with a lower uptime SLA and no + # latency guarentee. + # - If set to 'default', the request will be processed using the default service + # tier with a lower uptime SLA and no latency guarentee. + # - When not set, the default behavior is 'auto'. + # + # When this parameter is set, the response body will include the `service_tier` + # utilized. service_tier: nil, + # Up to 4 sequences where the API will stop generating further tokens. The + # returned text will not contain the stop sequence. stop: nil, + # Whether or not to store the output of this chat completion request for use in + # our [model distillation](https://platform.openai.com/docs/guides/distillation) + # or [evals](https://platform.openai.com/docs/guides/evals) products. store: nil, + # Options for streaming response. Only set this when you set `stream: true`. stream_options: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. temperature: nil, + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tool and instead generates a message. `auto` means the model can + # pick between generating a message or calling one or more tools. `required` means + # the model must call one or more tools. Specifying a particular tool via + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. + # + # `none` is the default when no tools are present. `auto` is the default if tools + # are present. tool_choice: nil, + # A list of tools the model may call. Currently, only functions are supported as a + # tool. Use this to provide a list of functions the model may generate JSON inputs + # for. A max of 128 functions are supported. tools: nil, + # An integer between 0 and 20 specifying the number of most likely tokens to + # return at each token position, each with an associated log probability. + # `logprobs` must be set to `true` if this parameter is used. top_logprobs: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. top_p: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, + # This tool searches the web for relevant results to use in a response. Learn more + # about the + # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). web_search_options: nil, request_options: {} ) @@ -176,35 +329,188 @@ module OpenAI .returns(OpenAI::Stream[OpenAI::Models::Chat::ChatCompletionChunk]) end def create_streaming( + # A list of messages comprising the conversation so far. Depending on the + # [model](https://platform.openai.com/docs/models) you use, different message + # types (modalities) are supported, like + # [text](https://platform.openai.com/docs/guides/text-generation), + # [images](https://platform.openai.com/docs/guides/vision), and + # [audio](https://platform.openai.com/docs/guides/audio). messages:, + # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. model:, + # Parameters for audio output. Required when audio output is requested with + # `modalities: ["audio"]`. + # [Learn more](https://platform.openai.com/docs/guides/audio). audio: nil, + # Number between -2.0 and 2.0. Positive values penalize new tokens based on their + # existing frequency in the text so far, decreasing the model's likelihood to + # repeat the same line verbatim. frequency_penalty: nil, + # Deprecated in favor of `tool_choice`. + # + # Controls which (if any) function is called by the model. + # + # `none` means the model will not call a function and instead generates a message. + # + # `auto` means the model can pick between generating a message or calling a + # function. + # + # Specifying a particular function via `{"name": "my_function"}` forces the model + # to call that function. + # + # `none` is the default when no functions are present. `auto` is the default if + # functions are present. function_call: nil, + # Deprecated in favor of `tools`. + # + # A list of functions the model may generate JSON inputs for. functions: nil, + # Modify the likelihood of specified tokens appearing in the completion. + # + # Accepts a JSON object that maps tokens (specified by their token ID in the + # tokenizer) to an associated bias value from -100 to 100. Mathematically, the + # bias is added to the logits generated by the model prior to sampling. The exact + # effect will vary per model, but values between -1 and 1 should decrease or + # increase likelihood of selection; values like -100 or 100 should result in a ban + # or exclusive selection of the relevant token. logit_bias: nil, + # Whether to return log probabilities of the output tokens or not. If true, + # returns the log probabilities of each output token returned in the `content` of + # `message`. logprobs: nil, + # An upper bound for the number of tokens that can be generated for a completion, + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). max_completion_tokens: nil, + # The maximum number of [tokens](/tokenizer) that can be generated in the chat + # completion. This value can be used to control + # [costs](https://openai.com/api/pricing/) for text generated via API. + # + # This value is now deprecated in favor of `max_completion_tokens`, and is not + # compatible with + # [o1 series models](https://platform.openai.com/docs/guides/reasoning). max_tokens: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, + # Output types that you would like the model to generate. Most models are capable + # of generating text, which is the default: + # + # `["text"]` + # + # The `gpt-4o-audio-preview` model can also be used to + # [generate audio](https://platform.openai.com/docs/guides/audio). To request that + # this model generate both text and audio responses, you can use: + # + # `["text", "audio"]` modalities: nil, + # How many chat completion choices to generate for each input message. Note that + # you will be charged based on the number of generated tokens across all of the + # choices. Keep `n` as `1` to minimize costs. n: nil, + # Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. parallel_tool_calls: nil, + # Static predicted output content, such as the content of a text file that is + # being regenerated. prediction: nil, + # Number between -2.0 and 2.0. Positive values penalize new tokens based on + # whether they appear in the text so far, increasing the model's likelihood to + # talk about new topics. presence_penalty: nil, + # **o-series models only** + # + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. reasoning_effort: nil, + # An object specifying the format that the model must output. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. response_format: nil, + # This feature is in Beta. If specified, our system will make a best effort to + # sample deterministically, such that repeated requests with the same `seed` and + # parameters should return the same result. Determinism is not guaranteed, and you + # should refer to the `system_fingerprint` response parameter to monitor changes + # in the backend. seed: nil, + # Specifies the latency tier to use for processing the request. This parameter is + # relevant for customers subscribed to the scale tier service: + # + # - If set to 'auto', and the Project is Scale tier enabled, the system will + # utilize scale tier credits until they are exhausted. + # - If set to 'auto', and the Project is not Scale tier enabled, the request will + # be processed using the default service tier with a lower uptime SLA and no + # latency guarentee. + # - If set to 'default', the request will be processed using the default service + # tier with a lower uptime SLA and no latency guarentee. + # - When not set, the default behavior is 'auto'. + # + # When this parameter is set, the response body will include the `service_tier` + # utilized. service_tier: nil, + # Up to 4 sequences where the API will stop generating further tokens. The + # returned text will not contain the stop sequence. stop: nil, + # Whether or not to store the output of this chat completion request for use in + # our [model distillation](https://platform.openai.com/docs/guides/distillation) + # or [evals](https://platform.openai.com/docs/guides/evals) products. store: nil, + # Options for streaming response. Only set this when you set `stream: true`. stream_options: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. temperature: nil, + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tool and instead generates a message. `auto` means the model can + # pick between generating a message or calling one or more tools. `required` means + # the model must call one or more tools. Specifying a particular tool via + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. + # + # `none` is the default when no tools are present. `auto` is the default if tools + # are present. tool_choice: nil, + # A list of tools the model may call. Currently, only functions are supported as a + # tool. Use this to provide a list of functions the model may generate JSON inputs + # for. A max of 128 functions are supported. tools: nil, + # An integer between 0 and 20 specifying the number of most likely tokens to + # return at each token position, each with an associated log probability. + # `logprobs` must be set to `true` if this parameter is used. top_logprobs: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. top_p: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, + # This tool searches the web for relevant results to use in a response. Learn more + # about the + # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). web_search_options: nil, request_options: {} ) @@ -219,7 +525,11 @@ module OpenAI ) .returns(OpenAI::Models::Chat::ChatCompletion) end - def retrieve(completion_id, request_options: {}) + def retrieve( + # The ID of the chat completion to retrieve. + completion_id, + request_options: {} + ) end # Modify a stored chat completion. Only Chat Completions that have been created @@ -233,7 +543,18 @@ module OpenAI ) .returns(OpenAI::Models::Chat::ChatCompletion) end - def update(completion_id, metadata:, request_options: {}) + def update( + # The ID of the chat completion to update. + completion_id, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata:, + request_options: {} + ) end # List stored Chat Completions. Only Chat Completions that have been stored with @@ -249,7 +570,22 @@ module OpenAI ) .returns(OpenAI::CursorPage[OpenAI::Models::Chat::ChatCompletion]) end - def list(after: nil, limit: nil, metadata: nil, model: nil, order: nil, request_options: {}) + def list( + # Identifier for the last chat completion from the previous pagination request. + after: nil, + # Number of Chat Completions to retrieve. + limit: nil, + # A list of metadata keys to filter the Chat Completions by. Example: + # + # `metadata[key1]=value1&metadata[key2]=value2` + metadata: nil, + # The model used to generate the Chat Completions. + model: nil, + # Sort order for Chat Completions by timestamp. Use `asc` for ascending order or + # `desc` for descending order. Defaults to `asc`. + order: nil, + request_options: {} + ) end # Delete a stored chat completion. Only Chat Completions that have been created @@ -261,7 +597,11 @@ module OpenAI ) .returns(OpenAI::Models::Chat::ChatCompletionDeleted) end - def delete(completion_id, request_options: {}) + def delete( + # The ID of the chat completion to delete. + completion_id, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/chat/completions/messages.rbi b/rbi/lib/openai/resources/chat/completions/messages.rbi index 26b43645..3e7c16e2 100644 --- a/rbi/lib/openai/resources/chat/completions/messages.rbi +++ b/rbi/lib/openai/resources/chat/completions/messages.rbi @@ -17,7 +17,18 @@ module OpenAI ) .returns(OpenAI::CursorPage[OpenAI::Models::Chat::ChatCompletionStoreMessage]) end - def list(completion_id, after: nil, limit: nil, order: nil, request_options: {}) + def list( + # The ID of the chat completion to retrieve messages from. + completion_id, + # Identifier for the last message from the previous pagination request. + after: nil, + # Number of messages to retrieve. + limit: nil, + # Sort order for messages by timestamp. Use `asc` for ascending order or `desc` + # for descending order. Defaults to `asc`. + order: nil, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/completions.rbi b/rbi/lib/openai/resources/completions.rbi index 2291fad9..949f94ed 100644 --- a/rbi/lib/openai/resources/completions.rbi +++ b/rbi/lib/openai/resources/completions.rbi @@ -35,22 +35,108 @@ module OpenAI .returns(OpenAI::Models::Completion) end def create( + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. model:, + # The prompt(s) to generate completions for, encoded as a string, array of + # strings, array of tokens, or array of token arrays. + # + # Note that <|endoftext|> is the document separator that the model sees during + # training, so if a prompt is not specified the model will generate as if from the + # beginning of a new document. prompt:, + # Generates `best_of` completions server-side and returns the "best" (the one with + # the highest log probability per token). Results cannot be streamed. + # + # When used with `n`, `best_of` controls the number of candidate completions and + # `n` specifies how many to return – `best_of` must be greater than `n`. + # + # **Note:** Because this parameter generates many completions, it can quickly + # consume your token quota. Use carefully and ensure that you have reasonable + # settings for `max_tokens` and `stop`. best_of: nil, + # Echo back the prompt in addition to the completion echo: nil, + # Number between -2.0 and 2.0. Positive values penalize new tokens based on their + # existing frequency in the text so far, decreasing the model's likelihood to + # repeat the same line verbatim. + # + # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) frequency_penalty: nil, + # Modify the likelihood of specified tokens appearing in the completion. + # + # Accepts a JSON object that maps tokens (specified by their token ID in the GPT + # tokenizer) to an associated bias value from -100 to 100. You can use this + # [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. + # Mathematically, the bias is added to the logits generated by the model prior to + # sampling. The exact effect will vary per model, but values between -1 and 1 + # should decrease or increase likelihood of selection; values like -100 or 100 + # should result in a ban or exclusive selection of the relevant token. + # + # As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token + # from being generated. logit_bias: nil, + # Include the log probabilities on the `logprobs` most likely output tokens, as + # well the chosen tokens. For example, if `logprobs` is 5, the API will return a + # list of the 5 most likely tokens. The API will always return the `logprob` of + # the sampled token, so there may be up to `logprobs+1` elements in the response. + # + # The maximum value for `logprobs` is 5. logprobs: nil, + # The maximum number of [tokens](/tokenizer) that can be generated in the + # completion. + # + # The token count of your prompt plus `max_tokens` cannot exceed the model's + # context length. + # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + # for counting tokens. max_tokens: nil, + # How many completions to generate for each prompt. + # + # **Note:** Because this parameter generates many completions, it can quickly + # consume your token quota. Use carefully and ensure that you have reasonable + # settings for `max_tokens` and `stop`. n: nil, + # Number between -2.0 and 2.0. Positive values penalize new tokens based on + # whether they appear in the text so far, increasing the model's likelihood to + # talk about new topics. + # + # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) presence_penalty: nil, + # If specified, our system will make a best effort to sample deterministically, + # such that repeated requests with the same `seed` and parameters should return + # the same result. + # + # Determinism is not guaranteed, and you should refer to the `system_fingerprint` + # response parameter to monitor changes in the backend. seed: nil, + # Up to 4 sequences where the API will stop generating further tokens. The + # returned text will not contain the stop sequence. stop: nil, + # Options for streaming response. Only set this when you set `stream: true`. stream_options: nil, + # The suffix that comes after a completion of inserted text. + # + # This parameter is only supported for `gpt-3.5-turbo-instruct`. suffix: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. + # + # We generally recommend altering this or `top_p` but not both. temperature: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. top_p: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, request_options: {} ) @@ -88,22 +174,108 @@ module OpenAI .returns(OpenAI::Stream[OpenAI::Models::Completion]) end def create_streaming( + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. model:, + # The prompt(s) to generate completions for, encoded as a string, array of + # strings, array of tokens, or array of token arrays. + # + # Note that <|endoftext|> is the document separator that the model sees during + # training, so if a prompt is not specified the model will generate as if from the + # beginning of a new document. prompt:, + # Generates `best_of` completions server-side and returns the "best" (the one with + # the highest log probability per token). Results cannot be streamed. + # + # When used with `n`, `best_of` controls the number of candidate completions and + # `n` specifies how many to return – `best_of` must be greater than `n`. + # + # **Note:** Because this parameter generates many completions, it can quickly + # consume your token quota. Use carefully and ensure that you have reasonable + # settings for `max_tokens` and `stop`. best_of: nil, + # Echo back the prompt in addition to the completion echo: nil, + # Number between -2.0 and 2.0. Positive values penalize new tokens based on their + # existing frequency in the text so far, decreasing the model's likelihood to + # repeat the same line verbatim. + # + # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) frequency_penalty: nil, + # Modify the likelihood of specified tokens appearing in the completion. + # + # Accepts a JSON object that maps tokens (specified by their token ID in the GPT + # tokenizer) to an associated bias value from -100 to 100. You can use this + # [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. + # Mathematically, the bias is added to the logits generated by the model prior to + # sampling. The exact effect will vary per model, but values between -1 and 1 + # should decrease or increase likelihood of selection; values like -100 or 100 + # should result in a ban or exclusive selection of the relevant token. + # + # As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token + # from being generated. logit_bias: nil, + # Include the log probabilities on the `logprobs` most likely output tokens, as + # well the chosen tokens. For example, if `logprobs` is 5, the API will return a + # list of the 5 most likely tokens. The API will always return the `logprob` of + # the sampled token, so there may be up to `logprobs+1` elements in the response. + # + # The maximum value for `logprobs` is 5. logprobs: nil, + # The maximum number of [tokens](/tokenizer) that can be generated in the + # completion. + # + # The token count of your prompt plus `max_tokens` cannot exceed the model's + # context length. + # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + # for counting tokens. max_tokens: nil, + # How many completions to generate for each prompt. + # + # **Note:** Because this parameter generates many completions, it can quickly + # consume your token quota. Use carefully and ensure that you have reasonable + # settings for `max_tokens` and `stop`. n: nil, + # Number between -2.0 and 2.0. Positive values penalize new tokens based on + # whether they appear in the text so far, increasing the model's likelihood to + # talk about new topics. + # + # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) presence_penalty: nil, + # If specified, our system will make a best effort to sample deterministically, + # such that repeated requests with the same `seed` and parameters should return + # the same result. + # + # Determinism is not guaranteed, and you should refer to the `system_fingerprint` + # response parameter to monitor changes in the backend. seed: nil, + # Up to 4 sequences where the API will stop generating further tokens. The + # returned text will not contain the stop sequence. stop: nil, + # Options for streaming response. Only set this when you set `stream: true`. stream_options: nil, + # The suffix that comes after a completion of inserted text. + # + # This parameter is only supported for `gpt-3.5-turbo-instruct`. suffix: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. + # + # We generally recommend altering this or `top_p` but not both. temperature: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. top_p: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, request_options: {} ) diff --git a/rbi/lib/openai/resources/embeddings.rbi b/rbi/lib/openai/resources/embeddings.rbi index a251c27d..025c37a7 100644 --- a/rbi/lib/openai/resources/embeddings.rbi +++ b/rbi/lib/openai/resources/embeddings.rbi @@ -15,7 +15,34 @@ module OpenAI ) .returns(OpenAI::Models::CreateEmbeddingResponse) end - def create(input:, model:, dimensions: nil, encoding_format: nil, user: nil, request_options: {}) + def create( + # Input text to embed, encoded as a string or array of tokens. To embed multiple + # inputs in a single request, pass an array of strings or array of token arrays. + # The input must not exceed the max input tokens for the model (8192 tokens for + # `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 + # dimensions or less. + # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + # for counting tokens. Some models may also impose a limit on total number of + # tokens summed across inputs. + input:, + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. + model:, + # The number of dimensions the resulting output embeddings should have. Only + # supported in `text-embedding-3` and later models. + dimensions: nil, + # The format to return the embeddings in. Can be either `float` or + # [`base64`](https://pypi.org/project/pybase64/). + encoding_format: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: nil, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/files.rbi b/rbi/lib/openai/resources/files.rbi index 39b009e9..441f93e0 100644 --- a/rbi/lib/openai/resources/files.rbi +++ b/rbi/lib/openai/resources/files.rbi @@ -32,7 +32,16 @@ module OpenAI ) .returns(OpenAI::Models::FileObject) end - def create(file:, purpose:, request_options: {}) + def create( + # The File object (not file name) to be uploaded. + file:, + # The intended purpose of the uploaded file. One of: - `assistants`: Used in the + # Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for + # fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: + # Flexible file type for any purpose - `evals`: Used for eval data sets + purpose:, + request_options: {} + ) end # Returns information about a specific file. @@ -43,7 +52,11 @@ module OpenAI ) .returns(OpenAI::Models::FileObject) end - def retrieve(file_id, request_options: {}) + def retrieve( + # The ID of the file to use for this request. + file_id, + request_options: {} + ) end # Returns a list of files. @@ -57,7 +70,22 @@ module OpenAI ) .returns(OpenAI::CursorPage[OpenAI::Models::FileObject]) end - def list(after: nil, limit: nil, order: nil, purpose: nil, request_options: {}) + def list( + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + after: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 10,000, and the default is 10,000. + limit: nil, + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + order: nil, + # Only return files with the given purpose. + purpose: nil, + request_options: {} + ) end # Delete a file. @@ -68,7 +96,11 @@ module OpenAI ) .returns(OpenAI::Models::FileDeleted) end - def delete(file_id, request_options: {}) + def delete( + # The ID of the file to use for this request. + file_id, + request_options: {} + ) end # Returns the contents of the specified file. @@ -79,7 +111,11 @@ module OpenAI ) .returns(T.anything) end - def content(file_id, request_options: {}) + def content( + # The ID of the file to use for this request. + file_id, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/fine_tuning/jobs.rbi b/rbi/lib/openai/resources/fine_tuning/jobs.rbi index 5c720561..e1709e2c 100644 --- a/rbi/lib/openai/resources/fine_tuning/jobs.rbi +++ b/rbi/lib/openai/resources/fine_tuning/jobs.rbi @@ -31,14 +31,63 @@ module OpenAI .returns(OpenAI::Models::FineTuning::FineTuningJob) end def create( + # The name of the model to fine-tune. You can select one of the + # [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). model:, + # The ID of an uploaded file that contains training data. + # + # See [upload file](https://platform.openai.com/docs/api-reference/files/create) + # for how to upload a file. + # + # Your dataset must be formatted as a JSONL file. Additionally, you must upload + # your file with the purpose `fine-tune`. + # + # The contents of the file should differ depending on if the model uses the + # [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input), + # [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + # format, or if the fine-tuning method uses the + # [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) + # format. + # + # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) + # for more details. training_file:, + # The hyperparameters used for the fine-tuning job. This value is now deprecated + # in favor of `method`, and should be passed in under the `method` parameter. hyperparameters: nil, + # A list of integrations to enable for your fine-tuning job. integrations: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, + # The method used for fine-tuning. method_: nil, + # The seed controls the reproducibility of the job. Passing in the same seed and + # job parameters should produce the same results, but may differ in rare cases. If + # a seed is not specified, one will be generated for you. seed: nil, + # A string of up to 64 characters that will be added to your fine-tuned model + # name. + # + # For example, a `suffix` of "custom-model-name" would produce a model name like + # `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. suffix: nil, + # The ID of an uploaded file that contains validation data. + # + # If you provide this file, the data is used to generate validation metrics + # periodically during fine-tuning. These metrics can be viewed in the fine-tuning + # results file. The same data should not be present in both train and validation + # files. + # + # Your dataset must be formatted as a JSONL file. You must upload your file with + # the purpose `fine-tune`. + # + # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) + # for more details. validation_file: nil, request_options: {} ) @@ -54,7 +103,11 @@ module OpenAI ) .returns(OpenAI::Models::FineTuning::FineTuningJob) end - def retrieve(fine_tuning_job_id, request_options: {}) + def retrieve( + # The ID of the fine-tuning job. + fine_tuning_job_id, + request_options: {} + ) end # List your organization's fine-tuning jobs @@ -67,7 +120,16 @@ module OpenAI ) .returns(OpenAI::CursorPage[OpenAI::Models::FineTuning::FineTuningJob]) end - def list(after: nil, limit: nil, metadata: nil, request_options: {}) + def list( + # Identifier for the last job from the previous pagination request. + after: nil, + # Number of fine-tuning jobs to retrieve. + limit: nil, + # Optional metadata filter. To filter, use the syntax `metadata[k]=v`. + # Alternatively, set `metadata=null` to indicate no metadata. + metadata: nil, + request_options: {} + ) end # Immediately cancel a fine-tune job. @@ -78,7 +140,11 @@ module OpenAI ) .returns(OpenAI::Models::FineTuning::FineTuningJob) end - def cancel(fine_tuning_job_id, request_options: {}) + def cancel( + # The ID of the fine-tuning job to cancel. + fine_tuning_job_id, + request_options: {} + ) end # Get status updates for a fine-tuning job. @@ -91,7 +157,15 @@ module OpenAI ) .returns(OpenAI::CursorPage[OpenAI::Models::FineTuning::FineTuningJobEvent]) end - def list_events(fine_tuning_job_id, after: nil, limit: nil, request_options: {}) + def list_events( + # The ID of the fine-tuning job to get events for. + fine_tuning_job_id, + # Identifier for the last event from the previous pagination request. + after: nil, + # Number of events to retrieve. + limit: nil, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/fine_tuning/jobs/checkpoints.rbi b/rbi/lib/openai/resources/fine_tuning/jobs/checkpoints.rbi index d0a7bb83..24509186 100644 --- a/rbi/lib/openai/resources/fine_tuning/jobs/checkpoints.rbi +++ b/rbi/lib/openai/resources/fine_tuning/jobs/checkpoints.rbi @@ -15,7 +15,15 @@ module OpenAI ) .returns(OpenAI::CursorPage[OpenAI::Models::FineTuning::Jobs::FineTuningJobCheckpoint]) end - def list(fine_tuning_job_id, after: nil, limit: nil, request_options: {}) + def list( + # The ID of the fine-tuning job to get checkpoints for. + fine_tuning_job_id, + # Identifier for the last checkpoint ID from the previous pagination request. + after: nil, + # Number of checkpoints to retrieve. + limit: nil, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/images.rbi b/rbi/lib/openai/resources/images.rbi index 3655f172..396f2edd 100644 --- a/rbi/lib/openai/resources/images.rbi +++ b/rbi/lib/openai/resources/images.rbi @@ -17,11 +17,25 @@ module OpenAI .returns(OpenAI::Models::ImagesResponse) end def create_variation( + # The image to use as the basis for the variation(s). Must be a valid PNG file, + # less than 4MB, and square. image:, + # The model to use for image generation. Only `dall-e-2` is supported at this + # time. model: nil, + # The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + # `n=1` is supported. n: nil, + # The format in which the generated images are returned. Must be one of `url` or + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. response_format: nil, + # The size of the generated images. Must be one of `256x256`, `512x512`, or + # `1024x1024`. size: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, request_options: {} ) @@ -43,13 +57,31 @@ module OpenAI .returns(OpenAI::Models::ImagesResponse) end def edit( + # The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask + # is not provided, image must have transparency, which will be used as the mask. image:, + # A text description of the desired image(s). The maximum length is 1000 + # characters. prompt:, + # An additional image whose fully transparent areas (e.g. where alpha is zero) + # indicate where `image` should be edited. Must be a valid PNG file, less than + # 4MB, and have the same dimensions as `image`. mask: nil, + # The model to use for image generation. Only `dall-e-2` is supported at this + # time. model: nil, + # The number of images to generate. Must be between 1 and 10. n: nil, + # The format in which the generated images are returned. Must be one of `url` or + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. response_format: nil, + # The size of the generated images. Must be one of `256x256`, `512x512`, or + # `1024x1024`. size: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, request_options: {} ) @@ -71,13 +103,34 @@ module OpenAI .returns(OpenAI::Models::ImagesResponse) end def generate( + # A text description of the desired image(s). The maximum length is 1000 + # characters for `dall-e-2` and 4000 characters for `dall-e-3`. prompt:, + # The model to use for image generation. model: nil, + # The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + # `n=1` is supported. n: nil, + # The quality of the image that will be generated. `hd` creates images with finer + # details and greater consistency across the image. This param is only supported + # for `dall-e-3`. quality: nil, + # The format in which the generated images are returned. Must be one of `url` or + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. response_format: nil, + # The size of the generated images. Must be one of `256x256`, `512x512`, or + # `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or + # `1024x1792` for `dall-e-3` models. size: nil, + # The style of the generated images. Must be one of `vivid` or `natural`. Vivid + # causes the model to lean towards generating hyper-real and dramatic images. + # Natural causes the model to produce more natural, less hyper-real looking + # images. This param is only supported for `dall-e-3`. style: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, request_options: {} ) diff --git a/rbi/lib/openai/resources/models.rbi b/rbi/lib/openai/resources/models.rbi index 04a4cf0d..52e65f29 100644 --- a/rbi/lib/openai/resources/models.rbi +++ b/rbi/lib/openai/resources/models.rbi @@ -12,7 +12,11 @@ module OpenAI ) .returns(OpenAI::Models::Model) end - def retrieve(model, request_options: {}) + def retrieve( + # The ID of the model to use for this request + model, + request_options: {} + ) end # Lists the currently available models, and provides basic information about each @@ -33,7 +37,11 @@ module OpenAI ) .returns(OpenAI::Models::ModelDeleted) end - def delete(model, request_options: {}) + def delete( + # The model to delete + model, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/moderations.rbi b/rbi/lib/openai/resources/moderations.rbi index 3b856753..3b9b2bd0 100644 --- a/rbi/lib/openai/resources/moderations.rbi +++ b/rbi/lib/openai/resources/moderations.rbi @@ -17,7 +17,17 @@ module OpenAI ) .returns(OpenAI::Models::ModerationCreateResponse) end - def create(input:, model: nil, request_options: {}) + def create( + # Input (or inputs) to classify. Can be a single string, an array of strings, or + # an array of multi-modal input objects similar to other models. + input:, + # The content moderation model you would like to use. Learn more in + # [the moderation guide](https://platform.openai.com/docs/guides/moderation), and + # learn about available models + # [here](https://platform.openai.com/docs/models#moderation). + model: nil, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/responses.rbi b/rbi/lib/openai/resources/responses.rbi index ded8cf36..823cc8a1 100644 --- a/rbi/lib/openai/resources/responses.rbi +++ b/rbi/lib/openai/resources/responses.rbi @@ -49,22 +49,109 @@ module OpenAI .returns(OpenAI::Models::Responses::Response) end def create( + # Text, image, or file inputs to the model, used to generate a response. + # + # Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Image inputs](https://platform.openai.com/docs/guides/images) + # - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + # - [Function calling](https://platform.openai.com/docs/guides/function-calling) input:, + # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. model:, + # Specify additional output data to include in the model response. Currently + # supported values are: + # + # - `file_search_call.results`: Include the search results of the file search tool + # call. + # - `message.input_image.image_url`: Include image urls from the input message. + # - `computer_call_output.output.image_url`: Include image urls from the computer + # call output. include: nil, + # Inserts a system (or developer) message as the first item in the model's + # context. + # + # When using along with `previous_response_id`, the instructions from a previous + # response will be not be carried over to the next response. This makes it simple + # to swap out system (or developer) messages in new responses. instructions: nil, + # An upper bound for the number of tokens that can be generated for a response, + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). max_output_tokens: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, + # Whether to allow the model to run tool calls in parallel. parallel_tool_calls: nil, + # The unique ID of the previous response to the model. Use this to create + # multi-turn conversations. Learn more about + # [conversation state](https://platform.openai.com/docs/guides/conversation-state). previous_response_id: nil, + # **o-series models only** + # + # Configuration options for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). reasoning: nil, + # Whether to store the generated model response for later retrieval via API. store: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. temperature: nil, + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) text: nil, + # How the model should select which tool (or tools) to use when generating a + # response. See the `tools` parameter to see how to specify which tools the model + # can call. tool_choice: nil, + # An array of tools the model may call while generating a response. You can + # specify which tool to use by setting the `tool_choice` parameter. + # + # The two categories of tools you can provide the model are: + # + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code. Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). tools: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. top_p: nil, + # The truncation strategy to use for the model response. + # + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. truncation: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, request_options: {} ) @@ -149,22 +236,109 @@ module OpenAI ) end def create_streaming( + # Text, image, or file inputs to the model, used to generate a response. + # + # Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Image inputs](https://platform.openai.com/docs/guides/images) + # - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + # - [Function calling](https://platform.openai.com/docs/guides/function-calling) input:, + # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. model:, + # Specify additional output data to include in the model response. Currently + # supported values are: + # + # - `file_search_call.results`: Include the search results of the file search tool + # call. + # - `message.input_image.image_url`: Include image urls from the input message. + # - `computer_call_output.output.image_url`: Include image urls from the computer + # call output. include: nil, + # Inserts a system (or developer) message as the first item in the model's + # context. + # + # When using along with `previous_response_id`, the instructions from a previous + # response will be not be carried over to the next response. This makes it simple + # to swap out system (or developer) messages in new responses. instructions: nil, + # An upper bound for the number of tokens that can be generated for a response, + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). max_output_tokens: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, + # Whether to allow the model to run tool calls in parallel. parallel_tool_calls: nil, + # The unique ID of the previous response to the model. Use this to create + # multi-turn conversations. Learn more about + # [conversation state](https://platform.openai.com/docs/guides/conversation-state). previous_response_id: nil, + # **o-series models only** + # + # Configuration options for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). reasoning: nil, + # Whether to store the generated model response for later retrieval via API. store: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. temperature: nil, + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) text: nil, + # How the model should select which tool (or tools) to use when generating a + # response. See the `tools` parameter to see how to specify which tools the model + # can call. tool_choice: nil, + # An array of tools the model may call while generating a response. You can + # specify which tool to use by setting the `tool_choice` parameter. + # + # The two categories of tools you can provide the model are: + # + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code. Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). tools: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. top_p: nil, + # The truncation strategy to use for the model response. + # + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. truncation: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, request_options: {} ) @@ -179,7 +353,14 @@ module OpenAI ) .returns(OpenAI::Models::Responses::Response) end - def retrieve(response_id, include: nil, request_options: {}) + def retrieve( + # The ID of the response to retrieve. + response_id, + # Additional fields to include in the response. See the `include` parameter for + # Response creation above for more information. + include: nil, + request_options: {} + ) end # Deletes a model response with the given ID. @@ -190,7 +371,11 @@ module OpenAI ) .void end - def delete(response_id, request_options: {}) + def delete( + # The ID of the response to delete. + response_id, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/responses/input_items.rbi b/rbi/lib/openai/resources/responses/input_items.rbi index afd82a10..f6d2e651 100644 --- a/rbi/lib/openai/resources/responses/input_items.rbi +++ b/rbi/lib/openai/resources/responses/input_items.rbi @@ -29,7 +29,23 @@ module OpenAI ] ) end - def list(response_id, after: nil, before: nil, limit: nil, order: nil, request_options: {}) + def list( + # The ID of the response to retrieve input items for. + response_id, + # An item ID to list items after, used in pagination. + after: nil, + # An item ID to list items before, used in pagination. + before: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + limit: nil, + # The order to return the input items in. Default is `asc`. + # + # - `asc`: Return the input items in ascending order. + # - `desc`: Return the input items in descending order. + order: nil, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/uploads.rbi b/rbi/lib/openai/resources/uploads.rbi index 561a624f..98a58dc5 100644 --- a/rbi/lib/openai/resources/uploads.rbi +++ b/rbi/lib/openai/resources/uploads.rbi @@ -36,7 +36,23 @@ module OpenAI ) .returns(OpenAI::Models::Upload) end - def create(bytes:, filename:, mime_type:, purpose:, request_options: {}) + def create( + # The number of bytes in the file you are uploading. + bytes:, + # The name of the file to upload. + filename:, + # The MIME type of the file. + # + # This must fall within the supported MIME types for your file purpose. See the + # supported MIME types for assistants and vision. + mime_type:, + # The intended purpose of the uploaded file. + # + # See the + # [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). + purpose:, + request_options: {} + ) end # Cancels the Upload. No Parts may be added after an Upload is cancelled. @@ -47,7 +63,11 @@ module OpenAI ) .returns(OpenAI::Models::Upload) end - def cancel(upload_id, request_options: {}) + def cancel( + # The ID of the Upload. + upload_id, + request_options: {} + ) end # Completes the @@ -72,7 +92,16 @@ module OpenAI ) .returns(OpenAI::Models::Upload) end - def complete(upload_id, part_ids:, md5: nil, request_options: {}) + def complete( + # The ID of the Upload. + upload_id, + # The ordered list of Part IDs. + part_ids:, + # The optional md5 checksum for the file contents to verify if the bytes uploaded + # matches what you expect. + md5: nil, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/uploads/parts.rbi b/rbi/lib/openai/resources/uploads/parts.rbi index 347903f3..6e52432f 100644 --- a/rbi/lib/openai/resources/uploads/parts.rbi +++ b/rbi/lib/openai/resources/uploads/parts.rbi @@ -23,7 +23,13 @@ module OpenAI ) .returns(OpenAI::Models::Uploads::UploadPart) end - def create(upload_id, data:, request_options: {}) + def create( + # The ID of the Upload. + upload_id, + # The chunk of bytes for this Part. + data:, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/vector_stores.rbi b/rbi/lib/openai/resources/vector_stores.rbi index 4420a17f..32aecdbf 100644 --- a/rbi/lib/openai/resources/vector_stores.rbi +++ b/rbi/lib/openai/resources/vector_stores.rbi @@ -27,10 +27,23 @@ module OpenAI .returns(OpenAI::Models::VectorStore) end def create( + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. Only applicable if `file_ids` is non-empty. chunking_strategy: nil, + # The expiration policy for a vector store. expires_after: nil, + # A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + # the vector store should use. Useful for tools like `file_search` that can access + # files. file_ids: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, + # The name of the vector store. name: nil, request_options: {} ) @@ -44,7 +57,11 @@ module OpenAI ) .returns(OpenAI::Models::VectorStore) end - def retrieve(vector_store_id, request_options: {}) + def retrieve( + # The ID of the vector store to retrieve. + vector_store_id, + request_options: {} + ) end # Modifies a vector store. @@ -58,7 +75,22 @@ module OpenAI ) .returns(OpenAI::Models::VectorStore) end - def update(vector_store_id, expires_after: nil, metadata: nil, name: nil, request_options: {}) + def update( + # The ID of the vector store to modify. + vector_store_id, + # The expiration policy for a vector store. + expires_after: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # The name of the vector store. + name: nil, + request_options: {} + ) end # Returns a list of vector stores. @@ -72,7 +104,25 @@ module OpenAI ) .returns(OpenAI::CursorPage[OpenAI::Models::VectorStore]) end - def list(after: nil, before: nil, limit: nil, order: nil, request_options: {}) + def list( + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + after: nil, + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. + before: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + limit: nil, + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + order: nil, + request_options: {} + ) end # Delete a vector store. @@ -83,7 +133,11 @@ module OpenAI ) .returns(OpenAI::Models::VectorStoreDeleted) end - def delete(vector_store_id, request_options: {}) + def delete( + # The ID of the vector store to delete. + vector_store_id, + request_options: {} + ) end # Search a vector store for relevant chunks based on a query and file attributes @@ -101,11 +155,18 @@ module OpenAI .returns(OpenAI::Page[OpenAI::Models::VectorStoreSearchResponse]) end def search( + # The ID of the vector store to search. vector_store_id, + # A query string for a search query:, + # A filter to apply based on file attributes. filters: nil, + # The maximum number of results to return. This number should be between 1 and 50 + # inclusive. max_num_results: nil, + # Ranking options for search. ranking_options: nil, + # Whether to rewrite the natural language query for vector search. rewrite_query: nil, request_options: {} ) diff --git a/rbi/lib/openai/resources/vector_stores/file_batches.rbi b/rbi/lib/openai/resources/vector_stores/file_batches.rbi index dbed991d..ec4e8e7a 100644 --- a/rbi/lib/openai/resources/vector_stores/file_batches.rbi +++ b/rbi/lib/openai/resources/vector_stores/file_batches.rbi @@ -18,7 +18,24 @@ module OpenAI ) .returns(OpenAI::Models::VectorStores::VectorStoreFileBatch) end - def create(vector_store_id, file_ids:, attributes: nil, chunking_strategy: nil, request_options: {}) + def create( + # The ID of the vector store for which to create a File Batch. + vector_store_id, + # A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + # the vector store should use. Useful for tools like `file_search` that can access + # files. + file_ids:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. + attributes: nil, + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. Only applicable if `file_ids` is non-empty. + chunking_strategy: nil, + request_options: {} + ) end # Retrieves a vector store file batch. @@ -30,7 +47,13 @@ module OpenAI ) .returns(OpenAI::Models::VectorStores::VectorStoreFileBatch) end - def retrieve(batch_id, vector_store_id:, request_options: {}) + def retrieve( + # The ID of the file batch being retrieved. + batch_id, + # The ID of the vector store that the file batch belongs to. + vector_store_id:, + request_options: {} + ) end # Cancel a vector store file batch. This attempts to cancel the processing of @@ -43,7 +66,13 @@ module OpenAI ) .returns(OpenAI::Models::VectorStores::VectorStoreFileBatch) end - def cancel(batch_id, vector_store_id:, request_options: {}) + def cancel( + # The ID of the file batch to cancel. + batch_id, + # The ID of the vector store that the file batch belongs to. + vector_store_id:, + request_options: {} + ) end # Returns a list of vector store files in a batch. @@ -61,12 +90,28 @@ module OpenAI .returns(OpenAI::CursorPage[OpenAI::Models::VectorStores::VectorStoreFile]) end def list_files( + # Path param: The ID of the file batch that the files belong to. batch_id, + # Path param: The ID of the vector store that the files belong to. vector_store_id:, + # Query param: A cursor for use in pagination. `after` is an object ID that + # defines your place in the list. For instance, if you make a list request and + # receive 100 objects, ending with obj_foo, your subsequent call can include + # after=obj_foo in order to fetch the next page of the list. after: nil, + # Query param: A cursor for use in pagination. `before` is an object ID that + # defines your place in the list. For instance, if you make a list request and + # receive 100 objects, starting with obj_foo, your subsequent call can include + # before=obj_foo in order to fetch the previous page of the list. before: nil, + # Query param: Filter by file status. One of `in_progress`, `completed`, `failed`, + # `cancelled`. filter: nil, + # Query param: A limit on the number of objects to be returned. Limit can range + # between 1 and 100, and the default is 20. limit: nil, + # Query param: Sort order by the `created_at` timestamp of the objects. `asc` for + # ascending order and `desc` for descending order. order: nil, request_options: {} ) diff --git a/rbi/lib/openai/resources/vector_stores/files.rbi b/rbi/lib/openai/resources/vector_stores/files.rbi index daf2b28a..2fc3ae57 100644 --- a/rbi/lib/openai/resources/vector_stores/files.rbi +++ b/rbi/lib/openai/resources/vector_stores/files.rbi @@ -20,7 +20,24 @@ module OpenAI ) .returns(OpenAI::Models::VectorStores::VectorStoreFile) end - def create(vector_store_id, file_id:, attributes: nil, chunking_strategy: nil, request_options: {}) + def create( + # The ID of the vector store for which to create a File. + vector_store_id, + # A [File](https://platform.openai.com/docs/api-reference/files) ID that the + # vector store should use. Useful for tools like `file_search` that can access + # files. + file_id:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. + attributes: nil, + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. Only applicable if `file_ids` is non-empty. + chunking_strategy: nil, + request_options: {} + ) end # Retrieves a vector store file. @@ -32,7 +49,13 @@ module OpenAI ) .returns(OpenAI::Models::VectorStores::VectorStoreFile) end - def retrieve(file_id, vector_store_id:, request_options: {}) + def retrieve( + # The ID of the file being retrieved. + file_id, + # The ID of the vector store that the file belongs to. + vector_store_id:, + request_options: {} + ) end # Update attributes on a vector store file. @@ -45,7 +68,19 @@ module OpenAI ) .returns(OpenAI::Models::VectorStores::VectorStoreFile) end - def update(file_id, vector_store_id:, attributes:, request_options: {}) + def update( + # Path param: The ID of the file to update attributes. + file_id, + # Path param: The ID of the vector store the file belongs to. + vector_store_id:, + # Body param: Set of 16 key-value pairs that can be attached to an object. This + # can be useful for storing additional information about the object in a + # structured format, and querying for objects via API or the dashboard. Keys are + # strings with a maximum length of 64 characters. Values are strings with a + # maximum length of 512 characters, booleans, or numbers. + attributes:, + request_options: {} + ) end # Returns a list of vector store files. @@ -62,11 +97,25 @@ module OpenAI .returns(OpenAI::CursorPage[OpenAI::Models::VectorStores::VectorStoreFile]) end def list( + # The ID of the vector store that the files belong to. vector_store_id, + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. after: nil, + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. before: nil, + # Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. filter: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. limit: nil, + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. order: nil, request_options: {} ) @@ -84,7 +133,13 @@ module OpenAI ) .returns(OpenAI::Models::VectorStores::VectorStoreFileDeleted) end - def delete(file_id, vector_store_id:, request_options: {}) + def delete( + # The ID of the file to delete. + file_id, + # The ID of the vector store that the file belongs to. + vector_store_id:, + request_options: {} + ) end # Retrieve the parsed contents of a vector store file. @@ -96,7 +151,13 @@ module OpenAI ) .returns(OpenAI::Page[OpenAI::Models::VectorStores::FileContentResponse]) end - def content(file_id, vector_store_id:, request_options: {}) + def content( + # The ID of the file within the vector store. + file_id, + # The ID of the vector store. + vector_store_id:, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/util.rbi b/rbi/lib/openai/util.rbi index 62ce155d..8faebf37 100644 --- a/rbi/lib/openai/util.rbi +++ b/rbi/lib/openai/util.rbi @@ -68,7 +68,13 @@ module OpenAI params(values: T::Array[T.anything], sentinel: T.nilable(T.anything), concat: T::Boolean) .returns(T.anything) end - def deep_merge(*values, sentinel: nil, concat: false) + def deep_merge( + *values, + # the value to return if no values are provided. + sentinel: nil, + # whether to merge sequences by concatenation. + concat: false + ) end # @api private